From 3af6864abe2a2aafb188b77458314446d8505aa1 Mon Sep 17 00:00:00 2001 From: Andrew Pilloud Date: Mon, 3 Oct 2022 17:45:24 -0700 Subject: [PATCH 001/115] Publish Python nexmark metrics to influxdb --- .../benchmarks/nexmark/nexmark_launcher.py | 85 ++++++++++++++++++- 1 file changed, 84 insertions(+), 1 deletion(-) diff --git a/sdks/python/apache_beam/testing/benchmarks/nexmark/nexmark_launcher.py b/sdks/python/apache_beam/testing/benchmarks/nexmark/nexmark_launcher.py index e4babe5f42e8..04c6d325e194 100644 --- a/sdks/python/apache_beam/testing/benchmarks/nexmark/nexmark_launcher.py +++ b/sdks/python/apache_beam/testing/benchmarks/nexmark/nexmark_launcher.py @@ -60,10 +60,15 @@ # pytype: skip-file import argparse +import json import logging +import os import time import uuid +import requests +from requests.auth import HTTPBasicAuth + import apache_beam as beam from apache_beam.options.pipeline_options import GoogleCloudOptions from apache_beam.options.pipeline_options import PipelineOptions @@ -123,6 +128,13 @@ def __init__(self): logging.info('creating sub %s', self.topic_name) sub.create() + self.export_influxdb = self.args.export_summary_to_influxdb + if self.export_influxdb: + self.influx_database = self.args.influx_database + self.influx_host = self.args.influx_host + self.influx_base = self.args.base_influx_measurement + self.influx_retention = self.args.influx_retention_policy + def parse_args(self): parser = argparse.ArgumentParser() @@ -170,6 +182,32 @@ def parse_args(self): choices=['PUBLISH_ONLY', 'SUBSCRIBE_ONLY', 'COMBINED'], help='Pubsub mode used in the pipeline.') + parser.add_argument( + '--export_summary_to_influxdb', + default=False, + action='store_true', + help='If set store results in influxdb') + parser.add_argument( + '--influx_database', + type=str, + default='beam_test_metrics', + help='Influx database name') + parser.add_argument( + '--influx_host', + type=str, + default='http://localhost:8086', + help='Influx database url') + parser.add_argument( + '--base_influx_measurement', + type=str, + default='nexmark', + help='Prefix to influx measurement') + parser.add_argument( + '--influx_retention_policy', + type=str, + default='forever', + help='Retention policy for stored results') + self.args, self.pipeline_args = parser.parse_known_args() logging.basicConfig( level=getattr(logging, self.args.loglevel, None), @@ -243,7 +281,8 @@ def read_from_pubsub(self): | 'deserialization' >> beam.ParDo(nexmark_util.ParseJsonEventFn())) return events - def run_query(self, query, query_args, pipeline_options, query_errors): + def run_query( + self, query_num, query, query_args, pipeline_options, query_errors): try: self.pipeline = beam.Pipeline(options=self.pipeline_options) nexmark_util.setup_coder() @@ -269,6 +308,8 @@ def run_query(self, query, query_args, pipeline_options, query_errors): result.wait_until_finish() perf = self.monitor(result, event_monitor, result_monitor) self.log_performance(perf) + if self.export_influxdb: + self.publish_performance_influxdb(query_num, perf) except Exception as exc: query_errors.append(str(exc)) @@ -349,6 +390,47 @@ def log_performance(perf): 'query run took %.1f seconds and processed %.1f events per second' % (perf.runtime_sec, perf.event_per_sec)) + def publish_performance_influxdb(self, query_num, perf): + processingMode = "streaming" if self.streaming else "batch" + measurement = "%s_%d_python_%s" % ( + self.influx_base, query_num, processingMode) + + tags = {'runner': self.pipeline_options.view_as(StandardOptions).runner} + + mt = ','.join([measurement] + [k + "=" + v for k, v in tags.items()]) + + fields = { + 'numResults': "%di" % (perf.result_count), + 'runtimeMs': "%di" % (perf.runtime_sec * 1000), + } + + ts = int(time.time()) + payload = '\n'.join( + ["%s %s=%s %d" % (mt, k, v, ts) for k, v in fields.items()]) + + url = '%s/write' % (self.influx_host) + query_str = { + 'db': self.influx_database, + 'rp': self.influx_retention, + 'precision': 's', + } + + user = os.getenv('INFLUXDB_USER') + password = os.getenv('INFLUXDB_USER_PASSWORD') + auth = HTTPBasicAuth(user, password) + + try: + response = requests.post(url, params=query_str, data=payload, auth=auth) + except requests.exceptions.RequestException as e: + logging.warning('Failed to publish metrics to InfluxDB: ' + str(e)) + else: + if response.status_code != 204: + content = json.loads(response.content) + logging.warning( + 'Failed to publish metrics to InfluxDB. Received status code %s ' + 'with an error message: %s' % + (response.status_code, content['error'])) + @staticmethod def get_performance(result, event_monitor, result_monitor): event_count = nexmark_util.get_counter_metric( @@ -429,6 +511,7 @@ def run(self): for i in self.args.query: logging.info('Running query %d', i) self.run_query( + i, queries[i], query_args, self.pipeline_options, From 688054fbd700611818a5f13eea1cfca6f5fbb7d8 Mon Sep 17 00:00:00 2001 From: Andrew Pilloud Date: Fri, 14 Oct 2022 16:15:24 -0700 Subject: [PATCH 002/115] Exclude nexmark from codecov, it has no tests --- .github/codecov.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/codecov.yml b/.github/codecov.yml index 0eaf91cdbdd6..c1c5dfb17bb4 100644 --- a/.github/codecov.yml +++ b/.github/codecov.yml @@ -64,6 +64,7 @@ ignore: - "**/*_test_py3*.py" - "**/*_microbenchmark.py" - "sdks/go/pkg/beam/register/register.go" + - "sdks/python/apache_beam/testing/benchmarks/nexmark/**" # See https://docs.codecov.com/docs/flags for options. flag_management: From 8aa3e054ec57ffed9a0c999624b50bd66f435e4c Mon Sep 17 00:00:00 2001 From: bulat safiullin Date: Thu, 4 Aug 2022 20:16:07 +0600 Subject: [PATCH 003/115] [Website] update calendar section mobile classes #22694 --- website/www/site/assets/scss/_calendar.scss | 6 ++++++ website/www/site/assets/scss/_local.scss | 16 ++++++++++++++++ website/www/site/layouts/index.html | 6 +++--- 3 files changed, 25 insertions(+), 3 deletions(-) diff --git a/website/www/site/assets/scss/_calendar.scss b/website/www/site/assets/scss/_calendar.scss index 8e27fccbd6ea..68d08872c961 100644 --- a/website/www/site/assets/scss/_calendar.scss +++ b/website/www/site/assets/scss/_calendar.scss @@ -94,6 +94,9 @@ padding: 24px 19.2px 24.7px 20px; margin-bottom: 24px; } + @media (max-width: $ak-breakpoint-xs) { + width: 260px; + } &:hover { text-decoration: none; box-shadow: 0 4px 20px 0 rgba(0, 0, 0, 0.24), @@ -251,6 +254,9 @@ max-width: 327px; height: 356px; padding: 32px 20px; + @media (max-width: $ak-breakpoint-xs) { + max-width: 260px; + } .calendar-card-big-title { margin-top: 35px; diff --git a/website/www/site/assets/scss/_local.scss b/website/www/site/assets/scss/_local.scss index a297bc15bde8..f638f64a1185 100644 --- a/website/www/site/assets/scss/_local.scss +++ b/website/www/site/assets/scss/_local.scss @@ -15,8 +15,24 @@ * limitations under the License. */ +@import "media"; + .paragraph-wrap { a { word-break: break-word; } } + +.calendar-mobile--twitter { + @media (max-width: $ak-breakpoint-xs) { + iframe { + width: 260px !important; + } + } +} + +.calendar-mobile--events { + @media (max-width: $ak-breakpoint-xs) { + overflow-x: auto; + } +} diff --git a/website/www/site/layouts/index.html b/website/www/site/layouts/index.html index 396b0dbac839..6ada1652bdfd 100644 --- a/website/www/site/layouts/index.html +++ b/website/www/site/layouts/index.html @@ -104,7 +104,7 @@

- @@ -148,8 +148,8 @@

-
- +
+
From 35419247faf6eb098cf7778d36eeeaab5773bc04 Mon Sep 17 00:00:00 2001 From: Kenneth Knowles Date: Wed, 19 Oct 2022 15:30:12 -0700 Subject: [PATCH 004/115] Verify that secondary key coder is deterministic in SortValues --- .../beam/sdk/extensions/sorter/SortValues.java | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/sdks/java/extensions/sorter/src/main/java/org/apache/beam/sdk/extensions/sorter/SortValues.java b/sdks/java/extensions/sorter/src/main/java/org/apache/beam/sdk/extensions/sorter/SortValues.java index 5c489af6e6b4..bc9fb2f89554 100644 --- a/sdks/java/extensions/sorter/src/main/java/org/apache/beam/sdk/extensions/sorter/SortValues.java +++ b/sdks/java/extensions/sorter/src/main/java/org/apache/beam/sdk/extensions/sorter/SortValues.java @@ -76,13 +76,20 @@ SortValues create( @Override public PCollection>>> expand( PCollection>>> input) { + + Coder secondaryKeyCoder = getSecondaryKeyCoder(input.getCoder()); + try { + secondaryKeyCoder.verifyDeterministic(); + } catch (Coder.NonDeterministicException e) { + throw new IllegalStateException( + "the secondary key coder of SortValues must be deterministic", e); + } + return input .apply( ParDo.of( new SortValuesDoFn<>( - sorterOptions, - getSecondaryKeyCoder(input.getCoder()), - getValueCoder(input.getCoder())))) + sorterOptions, secondaryKeyCoder, getValueCoder(input.getCoder())))) .setCoder(input.getCoder()); } From 3e8bc10c7678e99ef480398cd939f3cb48e0c631 Mon Sep 17 00:00:00 2001 From: riteshghorse Date: Thu, 20 Oct 2022 13:42:52 -0400 Subject: [PATCH 005/115] fix lints --- sdks/go/pkg/beam/core/runtime/xlangx/expansionx/process.go | 2 +- sdks/go/pkg/beam/transforms/xlang/python/external.go | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/sdks/go/pkg/beam/core/runtime/xlangx/expansionx/process.go b/sdks/go/pkg/beam/core/runtime/xlangx/expansionx/process.go index 28dc3294f44f..590c9392a991 100644 --- a/sdks/go/pkg/beam/core/runtime/xlangx/expansionx/process.go +++ b/sdks/go/pkg/beam/core/runtime/xlangx/expansionx/process.go @@ -58,7 +58,7 @@ func NewExpansionServiceRunner(jarPath, servicePort string) (*ExpansionServiceRu return &ExpansionServiceRunner{execPath: jarPath, servicePort: servicePort, serviceCommand: serviceCommand}, nil } -// NewExpansionServiceRunner builds an ExpansionServiceRunner struct for a given python module and +// NewPyExpansionServiceRunner builds an ExpansionServiceRunner struct for a given python module and // Beam version and returns a pointer to it. Passing an empty string as servicePort will request an // open port to be assigned to the service. func NewPyExpansionServiceRunner(pythonExec, module, servicePort string) (*ExpansionServiceRunner, error) { diff --git a/sdks/go/pkg/beam/transforms/xlang/python/external.go b/sdks/go/pkg/beam/transforms/xlang/python/external.go index 629ede0f9527..3fd6edd37e32 100644 --- a/sdks/go/pkg/beam/transforms/xlang/python/external.go +++ b/sdks/go/pkg/beam/transforms/xlang/python/external.go @@ -27,7 +27,8 @@ import ( ) const ( - pythonCallableUrn = "beam:logical_type:python_callable:v1" + pythonCallableUrn = "beam:logical_type:python_callable:v1" + // ExpansionServiceModule is the module containing the python expansion service for python external transforms. ExpansionServiceModule = "apache_beam.runners.portability.expansion_service_main" ) From a138a4f86f6d70dadb9eaefd16ca92d608ba5c5d Mon Sep 17 00:00:00 2001 From: Anand Inguva <34158215+AnandInguva@users.noreply.github.com> Date: Thu, 20 Oct 2022 18:53:10 -0400 Subject: [PATCH 006/115] Update google cloud vision >= 2.0.0 (#23755) Co-authored-by: Anand Inguva --- sdks/python/apache_beam/ml/gcp/visionml.py | 51 +++++++++++-------- .../apache_beam/ml/gcp/visionml_test.py | 9 ++-- .../apache_beam/ml/gcp/visionml_test_it.py | 7 ++- .../py310/base_image_requirements.txt | 6 +-- .../py37/base_image_requirements.txt | 4 +- .../py38/base_image_requirements.txt | 6 +-- .../py39/base_image_requirements.txt | 6 +-- sdks/python/setup.py | 2 +- 8 files changed, 51 insertions(+), 40 deletions(-) diff --git a/sdks/python/apache_beam/ml/gcp/visionml.py b/sdks/python/apache_beam/ml/gcp/visionml.py index 3e556b903c44..dd29dd377388 100644 --- a/sdks/python/apache_beam/ml/gcp/visionml.py +++ b/sdks/python/apache_beam/ml/gcp/visionml.py @@ -80,7 +80,7 @@ def __init__( metadata=None): """ Args: - features: (List[``vision.types.Feature.enums.Feature``]) Required. + features: (List[``vision.Feature``]) Required. The Vision API features to detect retry: (google.api_core.retry.Retry) Optional. A retry object used to retry requests. @@ -107,9 +107,9 @@ def __init__( image_contexts = [(''gs://cloud-samples-data/vision/ocr/sign.jpg'', Union[dict, - ``vision.types.ImageContext()``]), + ``vision.ImageContext()``]), (''gs://cloud-samples-data/vision/ocr/sign.jpg'', Union[dict, - ``vision.types.ImageContext()``]),] + ``vision.ImageContext()``]),] context_side_input = ( @@ -152,9 +152,8 @@ def expand(self, pvalue): client_options=self.client_options, metadata=self.metadata))) - @typehints.with_input_types( - Union[str, bytes], Optional[vision.types.ImageContext]) - @typehints.with_output_types(List[vision.types.AnnotateImageRequest]) + @typehints.with_input_types(Union[str, bytes], Optional[vision.ImageContext]) + @typehints.with_output_types(List[vision.AnnotateImageRequest]) def _create_image_annotation_pairs(self, element, context_side_input): if context_side_input: # If we have a side input image context, use that image_context = context_side_input.get(element) @@ -162,13 +161,18 @@ def _create_image_annotation_pairs(self, element, context_side_input): image_context = None if isinstance(element, str): - image = vision.types.Image( - source=vision.types.ImageSource(image_uri=element)) + + image = vision.Image( + {'source': vision.ImageSource({'image_uri': element})}) + else: # Typehint checks only allows str or bytes - image = vision.types.Image(content=element) + image = vision.Image(content=element) - request = vision.types.AnnotateImageRequest( - image=image, features=self.features, image_context=image_context) + request = vision.AnnotateImageRequest({ + 'image': image, + 'features': self.features, + 'image_context': image_context + }) yield request @@ -181,7 +185,7 @@ class AnnotateImageWithContext(AnnotateImage): Element is a tuple of:: (Union[str, bytes], - Optional[``vision.types.ImageContext``]) + Optional[``vision.ImageContext``]) where the former is either an URI (e.g. a GCS URI) or bytes base64-encoded image data. @@ -197,7 +201,7 @@ def __init__( metadata=None): """ Args: - features: (List[``vision.types.Feature.enums.Feature``]) Required. + features: (List[``vision.Feature``]) Required. The Vision API features to detect retry: (google.api_core.retry.Retry) Optional. A retry object used to retry requests. @@ -244,25 +248,28 @@ def expand(self, pvalue): metadata=self.metadata))) @typehints.with_input_types( - Tuple[Union[str, bytes], Optional[vision.types.ImageContext]]) - @typehints.with_output_types(List[vision.types.AnnotateImageRequest]) + Tuple[Union[str, bytes], Optional[vision.ImageContext]]) + @typehints.with_output_types(List[vision.AnnotateImageRequest]) def _create_image_annotation_pairs(self, element, **kwargs): element, image_context = element # Unpack (image, image_context) tuple if isinstance(element, str): - image = vision.types.Image( - source=vision.types.ImageSource(image_uri=element)) + image = vision.Image( + {'source': vision.ImageSource({'image_uri': element})}) else: # Typehint checks only allows str or bytes - image = vision.types.Image(content=element) + image = vision.Image({"content": element}) - request = vision.types.AnnotateImageRequest( - image=image, features=self.features, image_context=image_context) + request = vision.AnnotateImageRequest({ + 'image': image, + 'features': self.features, + 'image_context': image_context + }) yield request -@typehints.with_input_types(List[vision.types.AnnotateImageRequest]) +@typehints.with_input_types(List[vision.AnnotateImageRequest]) class _ImageAnnotateFn(DoFn): """A DoFn that sends each input element to the GCP Vision API. - Returns ``google.cloud.vision.types.BatchAnnotateImagesResponse``. + Returns ``google.cloud.vision.BatchAnnotateImagesResponse``. """ def __init__(self, features, retry, timeout, client_options, metadata): super().__init__() diff --git a/sdks/python/apache_beam/ml/gcp/visionml_test.py b/sdks/python/apache_beam/ml/gcp/visionml_test.py index f038442468f8..479b3d80e4de 100644 --- a/sdks/python/apache_beam/ml/gcp/visionml_test.py +++ b/sdks/python/apache_beam/ml/gcp/visionml_test.py @@ -45,12 +45,13 @@ def setUp(self): self._mock_client = mock.Mock() self._mock_client.batch_annotate_images.return_value = None - feature_type = vision.enums.Feature.Type.TEXT_DETECTION + feature_type = vision.Feature.Type.TEXT_DETECTION self.features = [ - vision.types.Feature( - type=feature_type, max_results=3, model="builtin/stable") + vision.Feature({ + 'type': feature_type, 'max_results': 3, 'model': "builtin/stable" + }) ] - self.img_ctx = vision.types.ImageContext() + self.img_ctx = vision.ImageContext() self.min_batch_size = 1 self.max_batch_size = 1 diff --git a/sdks/python/apache_beam/ml/gcp/visionml_test_it.py b/sdks/python/apache_beam/ml/gcp/visionml_test_it.py index 4413266dcc5c..ea3fc9768ff5 100644 --- a/sdks/python/apache_beam/ml/gcp/visionml_test_it.py +++ b/sdks/python/apache_beam/ml/gcp/visionml_test_it.py @@ -47,7 +47,8 @@ def test_text_detection_with_language_hint(self): IMAGES_TO_ANNOTATE = [ 'gs://apache-beam-samples/advanced_analytics/vision/sign.jpg' ] - IMAGE_CONTEXT = [vision.types.ImageContext(language_hints=['en'])] + + IMAGE_CONTEXT = [vision.ImageContext({'language_hints': ['en']})] with TestPipeline(is_integration_test=True) as p: contexts = p | 'Create context' >> beam.Create( @@ -57,7 +58,9 @@ def test_text_detection_with_language_hint(self): p | beam.Create(IMAGES_TO_ANNOTATE) | AnnotateImage( - features=[vision.types.Feature(type='TEXT_DETECTION')], + features=[ + vision.Feature({'type_': vision.Feature.Type.TEXT_DETECTION}) + ], context_side_input=beam.pvalue.AsDict(contexts)) | beam.ParDo(extract)) diff --git a/sdks/python/container/py310/base_image_requirements.txt b/sdks/python/container/py310/base_image_requirements.txt index aeb8d0d990ba..651a332f909f 100644 --- a/sdks/python/container/py310/base_image_requirements.txt +++ b/sdks/python/container/py310/base_image_requirements.txt @@ -66,7 +66,7 @@ google-cloud-pubsublite==1.5.0 google-cloud-recommendations-ai==0.7.1 google-cloud-spanner==3.22.2 google-cloud-videointelligence==1.16.3 -google-cloud-vision==1.0.2 +google-cloud-vision==3.1.4 google-crc32c==1.5.0 google-pasta==0.2.0 google-resumable-media==2.4.0 @@ -102,7 +102,7 @@ overrides==6.5.0 packaging==21.3 pandas==1.4.4 parameterized==0.8.1 -pbr==5.10.0 +pbr==5.11.0 pluggy==1.0.0 proto-plus==1.22.1 protobuf==3.19.6 @@ -131,7 +131,7 @@ requests-mock==1.10.0 requests-oauthlib==1.3.1 rsa==4.9 scikit-learn==1.1.2 -scipy==1.9.2 +scipy==1.9.3 six==1.16.0 sortedcontainers==2.4.0 soupsieve==2.3.2.post1 diff --git a/sdks/python/container/py37/base_image_requirements.txt b/sdks/python/container/py37/base_image_requirements.txt index 53041ca821f0..2dbf7d1827db 100644 --- a/sdks/python/container/py37/base_image_requirements.txt +++ b/sdks/python/container/py37/base_image_requirements.txt @@ -70,7 +70,7 @@ google-cloud-recommendations-ai==0.7.1 google-cloud-spanner==3.22.2 google-cloud-storage==2.5.0 google-cloud-videointelligence==1.16.3 -google-cloud-vision==1.0.2 +google-cloud-vision==3.1.4 google-crc32c==1.5.0 google-pasta==0.2.0 google-python-cloud-debugger==3.1 @@ -109,7 +109,7 @@ overrides==6.5.0 packaging==21.3 pandas==1.3.5 parameterized==0.8.1 -pbr==5.10.0 +pbr==5.11.0 pluggy==1.0.0 proto-plus==1.22.1 protobuf==3.19.6 diff --git a/sdks/python/container/py38/base_image_requirements.txt b/sdks/python/container/py38/base_image_requirements.txt index e5f7fec58485..ca45ef30f856 100644 --- a/sdks/python/container/py38/base_image_requirements.txt +++ b/sdks/python/container/py38/base_image_requirements.txt @@ -70,7 +70,7 @@ google-cloud-recommendations-ai==0.7.1 google-cloud-spanner==3.22.2 google-cloud-storage==2.5.0 google-cloud-videointelligence==1.16.3 -google-cloud-vision==1.0.2 +google-cloud-vision==3.1.4 google-crc32c==1.5.0 google-pasta==0.2.0 google-python-cloud-debugger==3.1 @@ -109,7 +109,7 @@ overrides==6.5.0 packaging==21.3 pandas==1.4.4 parameterized==0.8.1 -pbr==5.10.0 +pbr==5.11.0 pluggy==1.0.0 proto-plus==1.22.1 protobuf==3.19.6 @@ -138,7 +138,7 @@ requests-mock==1.10.0 requests-oauthlib==1.3.1 rsa==4.9 scikit-learn==1.1.2 -scipy==1.9.2 +scipy==1.9.3 six==1.16.0 sortedcontainers==2.4.0 soupsieve==2.3.2.post1 diff --git a/sdks/python/container/py39/base_image_requirements.txt b/sdks/python/container/py39/base_image_requirements.txt index bc624e7df12c..f4b9cdca16a5 100644 --- a/sdks/python/container/py39/base_image_requirements.txt +++ b/sdks/python/container/py39/base_image_requirements.txt @@ -70,7 +70,7 @@ google-cloud-recommendations-ai==0.7.1 google-cloud-spanner==3.22.2 google-cloud-storage==2.5.0 google-cloud-videointelligence==1.16.3 -google-cloud-vision==1.0.2 +google-cloud-vision==3.1.4 google-crc32c==1.5.0 google-pasta==0.2.0 google-python-cloud-debugger==3.1 @@ -109,7 +109,7 @@ overrides==6.5.0 packaging==21.3 pandas==1.4.4 parameterized==0.8.1 -pbr==5.10.0 +pbr==5.11.0 pluggy==1.0.0 proto-plus==1.22.1 protobuf==3.19.6 @@ -138,7 +138,7 @@ requests-mock==1.10.0 requests-oauthlib==1.3.1 rsa==4.9 scikit-learn==1.1.2 -scipy==1.9.2 +scipy==1.9.3 six==1.16.0 sortedcontainers==2.4.0 soupsieve==2.3.2.post1 diff --git a/sdks/python/setup.py b/sdks/python/setup.py index c91fb2e71a85..aaa761b14763 100644 --- a/sdks/python/setup.py +++ b/sdks/python/setup.py @@ -314,7 +314,7 @@ def get_portability_package_data(): 'google-cloud-dlp>=3.0.0,<4', 'google-cloud-language>=1.3.0,<2', 'google-cloud-videointelligence>=1.8.0,<2', - 'google-cloud-vision>=0.38.0,<2', + 'google-cloud-vision>=2,<4', 'google-cloud-recommendations-ai>=0.1.0,<0.8.0' ], 'interactive': [ From 2e49c7efa4e8783f9a84093b3cb7296ab17f32c1 Mon Sep 17 00:00:00 2001 From: Lukasz Cwik Date: Thu, 20 Oct 2022 18:21:59 -0700 Subject: [PATCH 007/115] Update GcsIO initialization to support converting input parameters to PipelineOptions for authentication (#23766) fixes #23764 --- sdks/python/apache_beam/internal/gcp/auth.py | 6 ++++++ sdks/python/apache_beam/io/gcp/gcsio.py | 8 ++++++++ 2 files changed, 14 insertions(+) diff --git a/sdks/python/apache_beam/internal/gcp/auth.py b/sdks/python/apache_beam/internal/gcp/auth.py index 699ec79d4b8a..47c3416babd4 100644 --- a/sdks/python/apache_beam/internal/gcp/auth.py +++ b/sdks/python/apache_beam/internal/gcp/auth.py @@ -22,8 +22,10 @@ import logging import socket import threading +from typing import Optional from apache_beam.options.pipeline_options import GoogleCloudOptions +from apache_beam.options.pipeline_options import PipelineOptions # google.auth is only available when Beam is installed with the gcp extra. try: @@ -63,6 +65,8 @@ def set_running_in_gce(worker_executing_project): def get_service_credentials(pipeline_options): + # type: (PipelineOptions) -> Optional[google.auth.credentials.Credentials] + """For internal use only; no backwards-compatibility guarantees. Get credentials to access Google services. @@ -115,6 +119,7 @@ class _Credentials(object): @classmethod def get_service_credentials(cls, pipeline_options): + # type: (PipelineOptions) -> Optional[google.auth.credentials.Credentials] with cls._credentials_lock: if cls._credentials_init: return cls._credentials @@ -134,6 +139,7 @@ def get_service_credentials(cls, pipeline_options): @staticmethod def _get_service_credentials(pipeline_options): + # type: (PipelineOptions) -> Optional[google.auth.credentials.Credentials] if not _GOOGLE_AUTH_AVAILABLE: _LOGGER.warning( 'Unable to find default credentials because the google-auth library ' diff --git a/sdks/python/apache_beam/io/gcp/gcsio.py b/sdks/python/apache_beam/io/gcp/gcsio.py index d4ceeda4bd9d..e34a0b774535 100644 --- a/sdks/python/apache_beam/io/gcp/gcsio.py +++ b/sdks/python/apache_beam/io/gcp/gcsio.py @@ -38,6 +38,8 @@ import time import traceback from itertools import islice +from typing import Optional +from typing import Union import apache_beam from apache_beam.internal.http_client import get_new_http @@ -49,6 +51,7 @@ from apache_beam.io.filesystemio import UploaderStream from apache_beam.io.gcp import resource_identifiers from apache_beam.metrics import monitoring_infos +from apache_beam.options.pipeline_options import PipelineOptions from apache_beam.utils import retry __all__ = ['GcsIO'] @@ -158,7 +161,12 @@ class GcsIOError(IOError, retry.PermanentException): class GcsIO(object): """Google Cloud Storage I/O client.""" def __init__(self, storage_client=None, pipeline_options=None): + # type: (Optional[storage.StorageV1], Optional[Union[dict, PipelineOptions]]) -> None if storage_client is None: + if not pipeline_options: + pipeline_options = PipelineOptions() + elif isinstance(pipeline_options, dict): + pipeline_options = PipelineOptions.from_dictionary(pipeline_options) storage_client = storage.StorageV1( credentials=auth.get_service_credentials(pipeline_options), get_credentials=False, From 69fe1cc86f0355e06db344a37dc4c69748ae61ca Mon Sep 17 00:00:00 2001 From: Chamikara Jayalath Date: Thu, 20 Oct 2022 20:47:02 -0700 Subject: [PATCH 008/115] Adds instructions for running the Multi-language Java quickstart from released Beam (#23721) * Adds instructions for running the Multi-language Java quickstart from released Beam * Fix dependencies * Addressing reviewer comments --- .../PythonDataframeWordCount.java | 0 examples/multi-language/README.md | 6 ++ examples/multi-language/build.gradle | 1 - .../sdks/java-multi-language-pipelines.md | 79 +++++++++++++++---- 4 files changed, 71 insertions(+), 15 deletions(-) rename examples/{multi-language => java}/src/main/java/org/apache/beam/examples/multilanguage/PythonDataframeWordCount.java (100%) diff --git a/examples/multi-language/src/main/java/org/apache/beam/examples/multilanguage/PythonDataframeWordCount.java b/examples/java/src/main/java/org/apache/beam/examples/multilanguage/PythonDataframeWordCount.java similarity index 100% rename from examples/multi-language/src/main/java/org/apache/beam/examples/multilanguage/PythonDataframeWordCount.java rename to examples/java/src/main/java/org/apache/beam/examples/multilanguage/PythonDataframeWordCount.java diff --git a/examples/multi-language/README.md b/examples/multi-language/README.md index 127ab8c30eb2..dea314095b41 100644 --- a/examples/multi-language/README.md +++ b/examples/multi-language/README.md @@ -166,3 +166,9 @@ of the digit. The second item is the predicted label of the digit. ``` gsutil cat gs://$GCP_BUCKET/multi-language-beam/output* ``` + +### Python Dataframe Wordcount + +This example is covered in the [Java multi-language pipelines quickstart](https://beam.apache.org/documentation/sdks/java-multi-language-pipelines/). +The pipeline source code is available at +[PythonDataframeWordCount.java](https://github.com/apache/beam/tree/master/examples/java/src/main/java/org/apache/beam/examples/multilanguage/PythonDataframeWordCount.java). diff --git a/examples/multi-language/build.gradle b/examples/multi-language/build.gradle index 61fdb686f4eb..b266faeb8f17 100644 --- a/examples/multi-language/build.gradle +++ b/examples/multi-language/build.gradle @@ -40,7 +40,6 @@ dependencies { runtimeOnly project(path: ":runners:portability:java") implementation library.java.vendored_guava_26_0_jre implementation project(":sdks:java:expansion-service") - implementation project(":sdks:java:extensions:python") permitUnusedDeclared project(":sdks:java:expansion-service") // BEAM-11761 } diff --git a/website/www/site/content/en/documentation/sdks/java-multi-language-pipelines.md b/website/www/site/content/en/documentation/sdks/java-multi-language-pipelines.md index 5f1b971f2046..4b260e57973a 100644 --- a/website/www/site/content/en/documentation/sdks/java-multi-language-pipelines.md +++ b/website/www/site/content/en/documentation/sdks/java-multi-language-pipelines.md @@ -138,26 +138,27 @@ default Beam SDK, you might need to run your own expansion service. In such cases, [start the expansion service](#advanced-start-an-expansion-service) before running your pipeline. -Here we've provided commands for running the example pipeline using -Gradle on a [Beam HEAD Git clone](https://github.com/apache/beam). -If you need a more stable environment, please -[setup a Java project](/get-started/quickstart-java/) that uses the latest -released Beam version and include the necessary dependencies. +### Run with Dataflow runner at HEAD (Beam 2.41.0 and later) -### Run with Dataflow runner +> **Note:** Due to [issue#23717](https://github.com/apache/beam/issues/23717), +> Beam 2.42.0 requires manually starting up an expansion service (see +> [these instructions](https://beam.apache.org/documentation/sdks/java-multi-language-pipelines/#advanced-start-an-expansion-service)) +> and using the additional pipeline option `--expansionService=localhost:` +> when executing the pipeline. The following script runs the example multi-language pipeline on Dataflow, using example text from a Cloud Storage bucket. You’ll need to adapt the script to your environment. ``` +export GCP_PROJECT= export OUTPUT_BUCKET= export GCP_REGION= export TEMP_LOCATION=gs://$OUTPUT_BUCKET/tmp -export PYTHON_VERSION= ./gradlew :examples:multi-language:pythonDataframeWordCount --args=" \ --runner=DataflowRunner \ +--project=$GCP_PROJECT \ --output=gs://${OUTPUT_BUCKET}/count \ --region=${GCP_REGION}" ``` @@ -192,10 +193,15 @@ python -m apache_beam.runners.portability.local_job_service_main -p $JOB_SERVER_ 5. Run the pipeline. +> **Note:** Due to [issue#23717](https://github.com/apache/beam/issues/23717), +> Beam 2.42.0 requires manually starting up an expansion service (see +> [these instructions](https://beam.apache.org/documentation/sdks/java-multi-language-pipelines/#advanced-start-an-expansion-service)) +> and using the additional pipeline option `--expansionService=localhost:` +> when executing the pipeline. + ``` export JOB_SERVER_PORT= # Same port as before export OUTPUT_FILE= -export PYTHON_VERSION= ./gradlew :examples:multi-language:pythonDataframeWordCount --args=" \ --runner=PortableRunner \ @@ -226,19 +232,64 @@ For example, to start the standard expansion service for a Python transform, [ExpansionServiceServicer](https://github.com/apache/beam/blob/master/sdks/python/apache_beam/runners/portability/expansion_service.py), follow these steps: -1. Activate a Python virtual environment and install Apache Beam, as described - in the [Python quick start](/get-started/quickstart-py/). -2. In the **beam/sdks/python** directory of the Beam source code, run the - following command: +1. Activate a new virtual environment following +[these instructions](https://beam.apache.org/get-started/quickstart-py/#create-and-activate-a-virtual-environment). + +2. Install Apache Beam with `gcp` and `dataframe` packages. + +``` +pip install apache-beam[gcp,dataframe] +``` + +4. Run the following command ``` - python apache_beam/runners/portability/expansion_service_main.py -p 18089 --fully_qualified_name_glob "*" + python -m apache_beam.runners.portability.expansion_service_main -p --fully_qualified_name_glob "*" ``` The command runs [expansion_service_main.py](https://github.com/apache/beam/blob/master/sdks/python/apache_beam/runners/portability/expansion_service_main.py), which starts the standard expansion service. When you use Gradle to run your Java pipeline, you can specify the expansion service with the -`expansionService` option. For example: `--expansionService=localhost:18089`. +`expansionService` option. For example: `--expansionService=localhost:`. + +### Run with Dataflow runner using a Beam release (Beam 2.43.0 and later) + +> **Note:** Due to [issue#23717](https://github.com/apache/beam/issues/23717), +> Beam 2.42.0 requires manually starting up an expansion service (see +> [these instructions](https://beam.apache.org/documentation/sdks/java-multi-language-pipelines/#advanced-start-an-expansion-service)) +> and using the additional pipeline option `--expansionService=localhost:` +> when executing the pipeline. + +* Check out the Beam examples Maven archetype for the relevant Beam version. + +``` +export BEAM_VERSION= + +mvn archetype:generate \ + -DarchetypeGroupId=org.apache.beam \ + -DarchetypeArtifactId=beam-sdks-java-maven-archetypes-examples \ + -DarchetypeVersion=$BEAM_VERSION \ + -DgroupId=org.example \ + -DartifactId=multi-language-beam \ + -Dversion="0.1" \ + -Dpackage=org.apache.beam.examples \ + -DinteractiveMode=false +``` + +* Run the pipeline. + +``` +export GCP_PROJECT= +export GCP_BUCKET= +export GCP_REGION= + +mvn compile exec:java -Dexec.mainClass=org.apache.beam.examples.multilanguage.PythonDataframeWordCount \ + -Dexec.args="--runner=DataflowRunner --project=$GCP_PROJECT \ + --region=us-central1 \ + --gcpTempLocation=gs://$GCP_BUCKET/multi-language-beam/tmp \ + --output=gs://$GCP_BUCKET/multi-language-beam/output" \ + -Pdataflow-runner +``` ## Next steps From ea566b7f4749a4339febe0e57ee2079b9998e8c5 Mon Sep 17 00:00:00 2001 From: Moritz Mack Date: Fri, 21 Oct 2022 06:38:59 +0200 Subject: [PATCH 009/115] Remove obsolete sparkRunner task from hadoop-format: not triggered, no tests selected (addresses #23728) --- sdks/java/io/hadoop-format/build.gradle | 40 ------------------------- 1 file changed, 40 deletions(-) diff --git a/sdks/java/io/hadoop-format/build.gradle b/sdks/java/io/hadoop-format/build.gradle index 702d37175d53..ec70824a5ab8 100644 --- a/sdks/java/io/hadoop-format/build.gradle +++ b/sdks/java/io/hadoop-format/build.gradle @@ -40,14 +40,6 @@ hadoopVersions.each {kv -> configurations.create("hadoopVersion$kv.key")} def elastic_search_version = "7.12.0" -configurations.create("sparkRunner") -configurations.sparkRunner { - // Ban certain dependencies to prevent a StackOverflow within Spark - // because JUL -> SLF4J -> JUL, and similarly JDK14 -> SLF4J -> JDK14 - exclude group: "org.slf4j", module: "jul-to-slf4j" - exclude group: "org.slf4j", module: "slf4j-jdk14" -} - // Ban dependencies from the test runtime classpath configurations.testRuntimeClasspath { // Prevent a StackOverflow because of wiring LOG4J -> SLF4J -> LOG4J @@ -115,15 +107,6 @@ dependencies { testRuntimeOnly library.java.slf4j_jdk14 testRuntimeOnly project(path: ":runners:direct-java", configuration: "shadow") - delegate.add("sparkRunner", project(path: ":sdks:java:io:hadoop-format", configuration: "testRuntimeMigration")) - - sparkRunner project(path: ":examples:java", configuration: "testRuntimeMigration") - sparkRunner project(path: ":examples:java:twitter", configuration: "testRuntimeMigration") - sparkRunner project(":runners:spark:2") - sparkRunner project(":sdks:java:io:hadoop-file-system") - sparkRunner library.java.spark_streaming - sparkRunner library.java.spark_core - hadoopVersions.each {kv -> "hadoopVersion$kv.key" "org.apache.hadoop:hadoop-common:$kv.value" "hadoopVersion$kv.key" "org.apache.hadoop:hadoop-mapreduce-client-core:$kv.value" @@ -169,29 +152,6 @@ task createTargetDirectoryForCassandra() { } test.dependsOn createTargetDirectoryForCassandra -def runnerClass = "org.apache.beam.runners.spark.TestSparkRunner" -task sparkRunner(type: Test) { - group = "Verification" - def beamTestPipelineOptions = [ - "--project=hadoop-format", - "--tempRoot=/tmp/hadoop-format/", - "--streaming=false", - "--runner=" + runnerClass, - "--enableSparkMetricSinks=false", - ] - classpath = configurations.sparkRunner - include "**/HadoopFormatIOSequenceFileTest.class" - useJUnit { - includeCategories 'org.apache.beam.sdk.testing.ValidatesRunner' - } - forkEvery 1 - maxParallelForks 4 - systemProperty "spark.ui.enabled", "false" - systemProperty "spark.ui.showConsoleProgress", "false" - systemProperty "beam.spark.test.reuseSparkContext", "true" - systemProperty "beamTestPipelineOptions", JsonOutput.toJson(beamTestPipelineOptions) -} - task hadoopVersionsTest(group: "Verification") { description = "Runs Hadoop format tests with different Hadoop versions" dependsOn createTaskNames(hadoopVersions, "Test") From 231f07d4af4f0344a43f151250856d8632d39867 Mon Sep 17 00:00:00 2001 From: Moritz Mack Date: Fri, 21 Oct 2022 08:23:07 +0200 Subject: [PATCH 010/115] Remove Spark2 from Java testing projects (addresses #23728) (#23749) --- sdks/java/testing/load-tests/build.gradle | 15 +------- sdks/java/testing/nexmark/build.gradle | 47 +++++++++-------------- sdks/java/testing/tpcds/README.md | 4 +- sdks/java/testing/tpcds/build.gradle | 2 +- 4 files changed, 23 insertions(+), 45 deletions(-) diff --git a/sdks/java/testing/load-tests/build.gradle b/sdks/java/testing/load-tests/build.gradle index 2d93993a5657..e157f2fabf32 100644 --- a/sdks/java/testing/load-tests/build.gradle +++ b/sdks/java/testing/load-tests/build.gradle @@ -39,7 +39,7 @@ def runnerDependency = (project.hasProperty(runnerProperty) : ":runners:direct-java") def loadTestRunnerVersionProperty = "runner.version" def loadTestRunnerVersion = project.findProperty(loadTestRunnerVersionProperty) -def shouldProvideSpark = ":runners:spark:2".equals(runnerDependency) +def isSparkRunner = runnerDependency.startsWith(":runners:spark:") def isDataflowRunner = ":runners:google-cloud-dataflow-java".equals(runnerDependency) def isDataflowRunnerV2 = isDataflowRunner && "V2".equals(loadTestRunnerVersion) def runnerConfiguration = ":runners:direct-java".equals(runnerDependency) ? "shadow" : null @@ -82,20 +82,9 @@ dependencies { gradleRun project(project.path) gradleRun project(path: runnerDependency, configuration: runnerConfiguration) - - // The Spark runner requires the user to provide a Spark dependency. For self-contained - // runs with the Spark runner, we can provide such a dependency. This is deliberately phrased - // to not hardcode any runner other than :runners:direct-java - if (shouldProvideSpark) { - gradleRun library.java.spark_streaming - gradleRun library.java.spark_core, { - exclude group:"org.slf4j", module:"jul-to-slf4j" - } - gradleRun library.java.spark_sql - } } -if (shouldProvideSpark) { +if (isSparkRunner) { configurations.gradleRun { // Using Spark runner causes a StackOverflowError if slf4j-jdk14 is on the classpath exclude group: "org.slf4j", module: "slf4j-jdk14" diff --git a/sdks/java/testing/nexmark/build.gradle b/sdks/java/testing/nexmark/build.gradle index 3a8d3440c80b..a7fbf2e08ad4 100644 --- a/sdks/java/testing/nexmark/build.gradle +++ b/sdks/java/testing/nexmark/build.gradle @@ -38,8 +38,7 @@ def nexmarkRunnerDependency = project.findProperty(nexmarkRunnerProperty) ?: ":runners:direct-java" def nexmarkRunnerVersionProperty = "nexmark.runner.version" def nexmarkRunnerVersion = project.findProperty(nexmarkRunnerVersionProperty) -def shouldProvideSpark2 = ":runners:spark:2".equals(nexmarkRunnerDependency) -def shouldProvideSpark3 = ":runners:spark:3".equals(nexmarkRunnerDependency) +def isSparkRunner = nexmarkRunnerDependency.startsWith(":runners:spark:") def isDataflowRunner = ":runners:google-cloud-dataflow-java".equals(nexmarkRunnerDependency) def isDataflowRunnerV2 = isDataflowRunner && "V2".equals(nexmarkRunnerVersion) def runnerConfiguration = ":runners:direct-java".equals(nexmarkRunnerDependency) ? "shadow" : null @@ -91,39 +90,15 @@ dependencies { testImplementation project(path: ":sdks:java:testing:test-utils", configuration: "testRuntimeMigration") gradleRun project(project.path) gradleRun project(path: nexmarkRunnerDependency, configuration: runnerConfiguration) - - // The Spark runner requires the user to provide a Spark dependency. For self-contained - // runs with the Spark runner, we can provide such a dependency. This is deliberately phrased - // to not hardcode any runner other than :runners:direct-java - if (shouldProvideSpark2) { - gradleRun library.java.spark_core, { - exclude group:"org.slf4j", module:"jul-to-slf4j" - } - gradleRun library.java.spark_sql - gradleRun library.java.spark_streaming - } - if (shouldProvideSpark3) { - gradleRun library.java.spark3_core, { - exclude group:"org.slf4j", module:"jul-to-slf4j" - } - - gradleRun library.java.spark3_sql - gradleRun library.java.spark3_streaming - } } -if (shouldProvideSpark2) { - configurations.gradleRun { - // Using Spark runner causes a StackOverflowError if slf4j-jdk14 is on the classpath - exclude group: "org.slf4j", module: "slf4j-jdk14" - } -} -if (shouldProvideSpark3) { +if (isSparkRunner) { configurations.gradleRun { // Using Spark runner causes a StackOverflowError if slf4j-jdk14 is on the classpath exclude group: "org.slf4j", module: "slf4j-jdk14" } } + def getNexmarkArgs = { def nexmarkArgsStr = project.findProperty(nexmarkArgsProperty) ?: "" def nexmarkArgsList = new ArrayList() @@ -155,6 +130,12 @@ def getNexmarkArgs = { } } } + + if(isSparkRunner) { + // For transparency, be explicit about configuration of local Spark + nexmarkArgsList.add("--sparkMaster=local[4]") + } + return nexmarkArgsList } @@ -162,7 +143,7 @@ def getNexmarkArgs = { // // Parameters: // -Pnexmark.runner -// Specify a runner subproject, such as ":runners:spark:2" or ":runners:flink:1.13" +// Specify a runner subproject, such as ":runners:spark:3" or ":runners:flink:1.13" // Defaults to ":runners:direct-java" // // -Pnexmark.args @@ -177,6 +158,14 @@ task run(type: JavaExec) { dependsOn ":runners:google-cloud-dataflow-java:worker:legacy-worker:shadowJar" } } + if(isSparkRunner) { + // Disable UI + systemProperty "spark.ui.enabled", "false" + systemProperty "spark.ui.showConsoleProgress", "false" + // Dataset runner only + systemProperty "spark.sql.shuffle.partitions", "4" + } + mainClass = "org.apache.beam.sdk.nexmark.Main" classpath = configurations.gradleRun args nexmarkArgsList.toArray() diff --git a/sdks/java/testing/tpcds/README.md b/sdks/java/testing/tpcds/README.md index 247b5cbe9300..85826e341ffb 100644 --- a/sdks/java/testing/tpcds/README.md +++ b/sdks/java/testing/tpcds/README.md @@ -55,10 +55,10 @@ To run a query using ZetaSQL planner (currently Query96 can be run using ZetaSQL ## Spark Runner -To execute TPC-DS benchmark with Query3 for 1Gb dataset on Apache Spark 2.x, run the following example command from the command line: +To execute TPC-DS benchmark with Query3 for 1Gb dataset on Apache Spark 3.x, run the following example command from the command line: ```bash -./gradlew :sdks:java:testing:tpcds:run -Ptpcds.runner=":runners:spark:2" -Ptpcds.args=" \ +./gradlew :sdks:java:testing:tpcds:run -Ptpcds.runner=":runners:spark:3" -Ptpcds.args=" \ --runner=SparkRunner \ --queries=3 \ --tpcParallel=1 \ diff --git a/sdks/java/testing/tpcds/build.gradle b/sdks/java/testing/tpcds/build.gradle index e9537cfe50ca..325222e8e8f1 100644 --- a/sdks/java/testing/tpcds/build.gradle +++ b/sdks/java/testing/tpcds/build.gradle @@ -94,7 +94,7 @@ if (isSpark) { // // Parameters: // -Ptpcds.runner -// Specify a runner subproject, such as ":runners:spark:2" or ":runners:flink:1.13" +// Specify a runner subproject, such as ":runners:spark:3" or ":runners:flink:1.13" // Defaults to ":runners:direct-java" // // -Ptpcds.args From cba999a2deb7908f1733fa72b42e4a60047d3028 Mon Sep 17 00:00:00 2001 From: Philippe Moussalli Date: Fri, 21 Oct 2022 16:06:29 +0200 Subject: [PATCH 011/115] bugfix/wrong-notebook-linl (#23777) --- .../site/content/en/documentation/ml/multi-model-pipelines.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/www/site/content/en/documentation/ml/multi-model-pipelines.md b/website/www/site/content/en/documentation/ml/multi-model-pipelines.md index be614e4b5000..ad1f5ff80f46 100644 --- a/website/www/site/content/en/documentation/ml/multi-model-pipelines.md +++ b/website/www/site/content/en/documentation/ml/multi-model-pipelines.md @@ -90,7 +90,7 @@ with pipeline as p: ``` In -this [notebook](https://github.com/apache/beam/tree/master/examples/notebooks/beam-ml/run-inference-multi-model.ipynb) +this [notebook](https://github.com/apache/beam/tree/master/examples/notebooks/beam-ml/run_inference_multi_model.ipynb) , we show an end-to-end example of a cascade pipeline used for generating and ranking image captions. The solution consists of two open-source models: From e4aa86fbbfe9a7d6597d863fc5f4e32a6de370ba Mon Sep 17 00:00:00 2001 From: Vitaly Terentyev Date: Fri, 21 Oct 2022 18:30:41 +0400 Subject: [PATCH 012/115] [CdapIO] Integration CdapIO with SparkReceiverIO (#22584) * [BEAM-14378] Add SparkReceiverIO * Fix comments * Resolve comments * Fix checkstyle * Implementation of storeFn function was extended * Null-check was added * Integration CdapIO with SparkReceiverIO * Refactoring * Resolve comments * Fix tests Co-authored-by: Sorokin Andrey --- sdks/java/io/cdap/build.gradle | 3 + .../org/apache/beam/sdk/io/cdap/CdapIO.java | 260 ++++++++++++++++-- .../beam/sdk/io/cdap/ConfigWrapper.java | 4 + .../apache/beam/sdk/io/cdap/MappingUtils.java | 112 +++++++- .../org/apache/beam/sdk/io/cdap/Plugin.java | 84 ++++-- .../org/apache/beam/sdk/io/cdap/CdapIOIT.java | 6 +- .../apache/beam/sdk/io/cdap/CdapIOTest.java | 68 ++++- .../beam/sdk/io/cdap/EmployeeConfig.java | 5 +- .../apache/beam/sdk/io/cdap/PluginTest.java | 6 +- .../cdap/{ => batch}/EmployeeBatchSink.java | 4 +- .../cdap/{ => batch}/EmployeeBatchSource.java | 6 +- .../cdap/{ => batch}/EmployeeInputFormat.java | 3 +- .../EmployeeInputFormatProvider.java | 4 +- .../{ => batch}/EmployeeOutputFormat.java | 9 +- .../EmployeeOutputFormatProvider.java | 4 +- .../io/cdap/streaming/EmployeeReceiver.java | 91 ++++++ .../streaming/EmployeeStreamingSource.java | 76 +++++ sdks/java/io/sparkreceiver/build.gradle | 1 - .../ReadFromSparkReceiverWithOffsetDoFn.java | 52 +++- .../ArrayBufferDataReceiver.java | 85 ++++++ .../sparkreceiver/ByteBufferDataReceiver.java | 84 ++++++ .../sparkreceiver/IteratorDataReceiver.java | 87 ++++++ .../io/sparkreceiver/SparkReceiverIOTest.java | 89 ++++-- 23 files changed, 1025 insertions(+), 118 deletions(-) rename sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/{ => batch}/EmployeeBatchSink.java (95%) rename sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/{ => batch}/EmployeeBatchSource.java (94%) rename sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/{ => batch}/EmployeeInputFormat.java (97%) rename sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/{ => batch}/EmployeeInputFormatProvider.java (93%) rename sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/{ => batch}/EmployeeOutputFormat.java (88%) rename sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/{ => batch}/EmployeeOutputFormatProvider.java (93%) create mode 100644 sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/streaming/EmployeeReceiver.java create mode 100644 sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/streaming/EmployeeStreamingSource.java create mode 100644 sdks/java/io/sparkreceiver/src/test/java/org/apache/beam/sdk/io/sparkreceiver/ArrayBufferDataReceiver.java create mode 100644 sdks/java/io/sparkreceiver/src/test/java/org/apache/beam/sdk/io/sparkreceiver/ByteBufferDataReceiver.java create mode 100644 sdks/java/io/sparkreceiver/src/test/java/org/apache/beam/sdk/io/sparkreceiver/IteratorDataReceiver.java diff --git a/sdks/java/io/cdap/build.gradle b/sdks/java/io/cdap/build.gradle index 1bcc0ece146b..3cfc01f79f7a 100644 --- a/sdks/java/io/cdap/build.gradle +++ b/sdks/java/io/cdap/build.gradle @@ -52,14 +52,17 @@ dependencies { implementation library.java.cdap_plugin_zendesk implementation library.java.commons_lang3 implementation library.java.guava + implementation library.java.google_code_gson implementation library.java.hadoop_common implementation library.java.hadoop_mapreduce_client_core implementation library.java.jackson_core implementation library.java.jackson_databind implementation library.java.slf4j_api + implementation library.java.spark_streaming implementation library.java.tephra implementation library.java.vendored_guava_26_0_jre implementation project(path: ":sdks:java:core", configuration: "shadow") + implementation project(":sdks:java:io:sparkreceiver") implementation project(":sdks:java:io:hadoop-format") testImplementation library.java.cdap_plugin_service_now testImplementation library.java.cdap_etl_api diff --git a/sdks/java/io/cdap/src/main/java/org/apache/beam/sdk/io/cdap/CdapIO.java b/sdks/java/io/cdap/src/main/java/org/apache/beam/sdk/io/cdap/CdapIO.java index f2655507cf56..5590bb061654 100644 --- a/sdks/java/io/cdap/src/main/java/org/apache/beam/sdk/io/cdap/CdapIO.java +++ b/sdks/java/io/cdap/src/main/java/org/apache/beam/sdk/io/cdap/CdapIO.java @@ -17,27 +17,163 @@ */ package org.apache.beam.sdk.io.cdap; -import static org.apache.beam.sdk.util.Preconditions.checkArgumentNotNull; +import static org.apache.beam.sdk.io.cdap.MappingUtils.getOffsetFnForPluginClass; +import static org.apache.beam.sdk.io.cdap.MappingUtils.getPluginByClass; +import static org.apache.beam.sdk.io.cdap.MappingUtils.getReceiverBuilderByPluginClass; +import static org.apache.beam.sdk.util.Preconditions.checkStateNotNull; import static org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.Preconditions.checkArgument; import com.google.auto.value.AutoValue; import io.cdap.cdap.api.plugin.PluginConfig; +import java.util.Map; import org.apache.beam.sdk.annotations.Experimental; import org.apache.beam.sdk.annotations.Experimental.Kind; +import org.apache.beam.sdk.coders.CannotProvideCoderException; +import org.apache.beam.sdk.coders.Coder; import org.apache.beam.sdk.io.hadoop.format.HDFSSynchronization; import org.apache.beam.sdk.io.hadoop.format.HadoopFormatIO; +import org.apache.beam.sdk.io.sparkreceiver.SparkReceiverIO; +import org.apache.beam.sdk.transforms.MapElements; import org.apache.beam.sdk.transforms.PTransform; +import org.apache.beam.sdk.transforms.SerializableFunction; import org.apache.beam.sdk.values.KV; import org.apache.beam.sdk.values.PBegin; import org.apache.beam.sdk.values.PCollection; import org.apache.beam.sdk.values.PDone; +import org.apache.beam.sdk.values.TypeDescriptor; import org.apache.commons.lang3.NotImplementedException; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.mapreduce.InputFormat; +import org.apache.hadoop.mapreduce.OutputFormat; import org.checkerframework.checker.nullness.qual.Nullable; /** - * An unbounded/bounded sources and sinks from CDAP plugins. + * A {@link CdapIO} is a Transform for reading data from source or writing data to sink of a Cdap + * Plugin. It uses {@link HadoopFormatIO} for Batch and SparkReceiverIO for Streaming. + * + *

Read from Cdap Plugin Bounded Source

+ * + *

To configure {@link CdapIO} source, you must specify Cdap {@link Plugin}, Cdap {@link + * PluginConfig}, key and value classes. + * + *

{@link Plugin} is the Wrapper class for the Cdap Plugin. It contains main information about + * the Plugin. The object of the {@link Plugin} class can be created with the {@link + * Plugin#createBatch(Class, Class, Class)} method. Method requires the following parameters: + * + *

    + *
  • {@link io.cdap.cdap.etl.api.batch.BatchSource} class + *
  • {@link InputFormat} class + *
  • {@link io.cdap.cdap.api.data.batch.InputFormatProvider} class + *
+ * + *

For more information about the InputFormat and InputFormatProvider, see {@link + * HadoopFormatIO}. + * + *

Every Cdap Plugin has its {@link PluginConfig} class with necessary fields to configure the + * Plugin. You can set the {@link Map} of your parameters with the {@link + * ConfigWrapper#withParams(Map)} method where the key is the field name. + * + *

For example, to create a basic {@link CdapIO#read()} transform: + * + *

{@code
+ * Pipeline p = ...; // Create pipeline.
+ *
+ * // Create PluginConfig for specific plugin
+ * EmployeeConfig pluginConfig =
+ *         new ConfigWrapper<>(EmployeeConfig.class).withParams(TEST_EMPLOYEE_PARAMS_MAP).build();
+ *
+ * // Read using CDAP batch plugin
+ * p.apply("ReadBatch",
+ * CdapIO.read()
+ *             .withCdapPlugin(
+ *                 Plugin.createBatch(
+ *                     EmployeeBatchSource.class,
+ *                     EmployeeInputFormat.class,
+ *                     EmployeeInputFormatProvider.class))
+ *             .withPluginConfig(pluginConfig)
+ *             .withKeyClass(String.class)
+ *             .withValueClass(String.class));
+ * }
+ * + *

Write to Cdap Plugin Bounded Sink

+ * + *

To configure {@link CdapIO} sink, just as {@link CdapIO#read()} Cdap {@link Plugin}, Cdap + * {@link PluginConfig}, key, value classes must be specified. In addition, it's necessary to + * determine locks directory path {@link CdapIO.Write#withLocksDirPath(String)}. It's used for + * {@link HDFSSynchronization} configuration for {@link HadoopFormatIO}. More info can be found in + * {@link HadoopFormatIO} documentation. + * + *

To create the object of the {@link Plugin} class with the {@link Plugin#createBatch(Class, + * Class, Class)} method, need to specify the following parameters: + * + *

    + *
  • {@link io.cdap.cdap.etl.api.batch.BatchSink} class + *
  • {@link OutputFormat} class + *
  • {@link io.cdap.cdap.api.data.batch.OutputFormatProvider} class + *
+ * + *

For more information about the OutputFormat and OutputFormatProvider, see {@link + * HadoopFormatIO}. + * + *

Example of {@link CdapIO#write()} usage: + * + *

{@code
+ * Pipeline p = ...; // Create pipeline.
+ *
+ * // Get or create data to write
+ * PCollection> input = p.apply(Create.of(data));
+ *
+ * // Create PluginConfig for specific plugin
+ * EmployeeConfig pluginConfig =
+ *         new ConfigWrapper<>(EmployeeConfig.class).withParams(TEST_EMPLOYEE_PARAMS_MAP).build();
+ *
+ * // Write using CDAP batch plugin
+ * input.apply(
+ *         "WriteBatch",
+ *         CdapIO.write()
+ *             .withCdapPlugin(
+ *                 Plugin.createBatch(
+ *                     EmployeeBatchSink.class,
+ *                     EmployeeOutputFormat.class,
+ *                     EmployeeOutputFormatProvider.class))
+ *             .withPluginConfig(pluginConfig)
+ *             .withKeyClass(String.class)
+ *             .withValueClass(String.class)
+ *             .withLocksDirPath(tmpFolder.getRoot().getAbsolutePath()));
+ *     p.run();
+ * }
+ * + *

Read from Cdap Plugin Streaming Source

+ * + *

To configure {@link CdapIO} source, you must specify Cdap {@link Plugin}, Cdap {@link + * PluginConfig}, key and value classes. + * + *

{@link Plugin} is the Wrapper class for the Cdap Plugin. It contains main information about + * the Plugin. The object of the {@link Plugin} class can be created with the {@link + * Plugin#createStreaming(Class)} method. Method requires {@link + * io.cdap.cdap.etl.api.streaming.StreamingSource} class parameter. + * + *

Every Cdap Plugin has its {@link PluginConfig} class with necessary fields to configure the + * Plugin. You can set the {@link Map} of your parameters with the {@link + * ConfigWrapper#withParams(Map)} method where the key is the field name. + * + *

For example, to create a basic {@link CdapIO#read()} transform: + * + *

{@code
+ * Pipeline p = ...; // Create pipeline.
+ *
+ * // Create PluginConfig for specific plugin
+ * EmployeeConfig pluginConfig =
+ *         new ConfigWrapper<>(EmployeeConfig.class).withParams(TEST_EMPLOYEE_PARAMS_MAP).build();
+ *
+ * // Read using CDAP streaming plugin
+ * p.apply("ReadStreaming",
+ * CdapIO.read()
+ *             .withCdapPlugin(Plugin.createStreaming(EmployeeStreamingSource.class))
+ *             .withPluginConfig(pluginConfig)
+ *             .withKeyClass(String.class)
+ *             .withValueClass(String.class));
+ * }
*/ @Experimental(Kind.SOURCE_SINK) public class CdapIO { @@ -54,12 +190,25 @@ public static Write write() { @AutoValue @AutoValue.CopyAnnotations public abstract static class Read extends PTransform>> { + abstract @Nullable PluginConfig getPluginConfig(); abstract @Nullable Plugin getCdapPlugin(); + /** + * Depending on selected {@link HadoopFormatIO} type ({@link InputFormat} or {@link + * OutputFormat}), appropriate key class ("key.class") in Hadoop {@link Configuration} must be + * provided. If you set different Format key class than Format's actual key class then, it may + * result in an error. More info can be found in {@link HadoopFormatIO} documentation. + */ abstract @Nullable Class getKeyClass(); + /** + * Depending on selected {@link HadoopFormatIO} type ({@link InputFormat} or {@link + * OutputFormat}), appropriate value class ("value.class") in Hadoop {@link Configuration} must + * be provided. If you set different Format value class than Format's actual value class then, + * it may result in an error. More info can be found in {@link HadoopFormatIO} documentation. + */ abstract @Nullable Class getValueClass(); abstract Builder toBuilder(); @@ -79,27 +228,32 @@ abstract static class Builder { abstract Read build(); } + /** Sets a CDAP {@link Plugin}. */ public Read withCdapPlugin(Plugin plugin) { checkArgument(plugin != null, "Cdap plugin can not be null"); return toBuilder().setCdapPlugin(plugin).build(); } + /** Sets a CDAP Plugin class. */ public Read withCdapPluginClass(Class cdapPluginClass) { checkArgument(cdapPluginClass != null, "Cdap plugin class can not be null"); Plugin plugin = MappingUtils.getPluginByClass(cdapPluginClass); return toBuilder().setCdapPlugin(plugin).build(); } + /** Sets a {@link PluginConfig}. */ public Read withPluginConfig(PluginConfig pluginConfig) { checkArgument(pluginConfig != null, "Plugin config can not be null"); return toBuilder().setPluginConfig(pluginConfig).build(); } + /** Sets a key class. */ public Read withKeyClass(Class keyClass) { checkArgument(keyClass != null, "Key class can not be null"); return toBuilder().setKeyClass(keyClass).build(); } + /** Sets a value class. */ public Read withValueClass(Class valueClass) { checkArgument(valueClass != null, "Value class can not be null"); return toBuilder().setValueClass(valueClass).build(); @@ -107,19 +261,38 @@ public Read withValueClass(Class valueClass) { @Override public PCollection> expand(PBegin input) { - Plugin plugin = checkArgumentNotNull(getCdapPlugin(), "withCdapPluginClass() is required"); - PluginConfig pluginConfig = - checkArgumentNotNull(getPluginConfig(), "withPluginConfig() is required"); - Class keyClass = checkArgumentNotNull(getKeyClass(), "withKeyClass() is required"); - Class valueClass = checkArgumentNotNull(getValueClass(), "withValueClass() is required"); - - plugin.withConfig(pluginConfig).withHadoopConfiguration(keyClass, valueClass).prepareRun(); - - if (plugin.isUnbounded()) { - // TODO: implement SparkReceiverIO.<~>read() - throw new NotImplementedException("Support for unbounded plugins is not implemented!"); + Plugin cdapPlugin = getCdapPlugin(); + checkStateNotNull(cdapPlugin, "withCdapPluginClass() is required"); + + PluginConfig pluginConfig = getPluginConfig(); + checkStateNotNull(pluginConfig, "withPluginConfig() is required"); + + Class valueClass = getValueClass(); + checkStateNotNull(valueClass, "withValueClass() is required"); + + Class keyClass = getKeyClass(); + checkStateNotNull(keyClass, "withKeyClass() is required"); + + cdapPlugin.withConfig(pluginConfig); + + if (cdapPlugin.isUnbounded()) { + SparkReceiverIO.Read reader = + SparkReceiverIO.read() + .withGetOffsetFn(getOffsetFnForPluginClass(cdapPlugin.getPluginClass(), valueClass)) + .withSparkReceiverBuilder( + getReceiverBuilderByPluginClass( + cdapPlugin.getPluginClass(), pluginConfig, valueClass)); + try { + Coder coder = input.getPipeline().getCoderRegistry().getCoder(valueClass); + PCollection values = input.apply(reader).setCoder(coder); + SerializableFunction> fn = input1 -> KV.of(null, input1); + return values.apply(MapElements.into(new TypeDescriptor>() {}).via(fn)); + } catch (CannotProvideCoderException e) { + throw new IllegalStateException("Could not get value Coder", e); + } } else { - Configuration hConf = plugin.getHadoopConfiguration(); + cdapPlugin.withHadoopConfiguration(keyClass, valueClass).prepareRun(); + Configuration hConf = cdapPlugin.getHadoopConfiguration(); HadoopFormatIO.Read readFromHadoop = HadoopFormatIO.read().withConfiguration(hConf); return input.apply(readFromHadoop); @@ -127,7 +300,7 @@ public PCollection> expand(PBegin input) { } } - /** A {@link PTransform} to read from CDAP source. */ + /** A {@link PTransform} to write to CDAP sink. */ @AutoValue @AutoValue.CopyAnnotations public abstract static class Write extends PTransform>, PDone> { @@ -136,10 +309,28 @@ public abstract static class Write extends PTransform abstract @Nullable Plugin getCdapPlugin(); + /** + * Depending on selected {@link HadoopFormatIO} type ({@link InputFormat} or {@link + * OutputFormat}), appropriate key class ("key.class") in Hadoop {@link Configuration} must be + * provided. If you set different Format key class than Format's actual key class then, it may + * result in an error. More info can be found in {@link HadoopFormatIO} documentation. + */ abstract @Nullable Class getKeyClass(); + /** + * Depending on selected {@link HadoopFormatIO} type ({@link InputFormat} or {@link + * OutputFormat}), appropriate value class ("value.class") in Hadoop {@link Configuration} must + * be provided. If you set different Format value class than Format's actual value class then, + * it may result in an error. More info can be found in {@link HadoopFormatIO} documentation. + */ abstract @Nullable Class getValueClass(); + /** + * Directory where locks will be stored. This directory MUST be different that directory which + * is possibly stored under FileOutputFormat.outputDir key. Used for {@link HDFSSynchronization} + * configuration for {@link HadoopFormatIO}. More info can be found in {@link HadoopFormatIO} + * documentation. + */ abstract @Nullable String getLocksDirPath(); abstract Builder toBuilder(); @@ -161,32 +352,38 @@ abstract static class Builder { abstract Write build(); } + /** Sets a CDAP {@link Plugin}. */ public Write withCdapPlugin(Plugin plugin) { checkArgument(plugin != null, "Cdap plugin can not be null"); return toBuilder().setCdapPlugin(plugin).build(); } + /** Sets a CDAP Plugin class. */ public Write withCdapPluginClass(Class cdapPluginClass) { checkArgument(cdapPluginClass != null, "Cdap plugin class can not be null"); - Plugin plugin = MappingUtils.getPluginByClass(cdapPluginClass); + Plugin plugin = getPluginByClass(cdapPluginClass); return toBuilder().setCdapPlugin(plugin).build(); } + /** Sets a {@link PluginConfig}. */ public Write withPluginConfig(PluginConfig pluginConfig) { checkArgument(pluginConfig != null, "Plugin config can not be null"); return toBuilder().setPluginConfig(pluginConfig).build(); } + /** Sets a key class. */ public Write withKeyClass(Class keyClass) { checkArgument(keyClass != null, "Key class can not be null"); return toBuilder().setKeyClass(keyClass).build(); } + /** Sets path to directory where locks will be stored. */ public Write withLocksDirPath(String locksDirPath) { checkArgument(locksDirPath != null, "Locks dir path can not be null"); return toBuilder().setLocksDirPath(locksDirPath).build(); } + /** Sets a value class. */ public Write withValueClass(Class valueClass) { checkArgument(valueClass != null, "Value class can not be null"); return toBuilder().setValueClass(valueClass).build(); @@ -194,21 +391,30 @@ public Write withValueClass(Class valueClass) { @Override public PDone expand(PCollection> input) { - Plugin plugin = checkArgumentNotNull(getCdapPlugin(), "withKeyClass() is required"); - PluginConfig pluginConfig = - checkArgumentNotNull(getPluginConfig(), "withKeyClass() is required"); - Class keyClass = checkArgumentNotNull(getKeyClass(), "withKeyClass() is required"); - Class valueClass = checkArgumentNotNull(getValueClass(), "withValueClass() is required"); - String locksDirPath = - checkArgumentNotNull(getLocksDirPath(), "withLocksDirPath() is required"); + Plugin cdapPlugin = getCdapPlugin(); + checkStateNotNull(cdapPlugin, "withCdapPluginClass() is required"); + + PluginConfig pluginConfig = getPluginConfig(); + checkStateNotNull(pluginConfig, "withPluginConfig() is required"); + + Class keyClass = getKeyClass(); + checkStateNotNull(keyClass, "withKeyClass() is required"); + Class valueClass = getValueClass(); + checkStateNotNull(valueClass, "withValueClass() is required"); + + String locksDirPath = getLocksDirPath(); + checkStateNotNull(locksDirPath, "withLocksDirPath() is required"); - plugin.withConfig(pluginConfig).withHadoopConfiguration(keyClass, valueClass).prepareRun(); + cdapPlugin + .withConfig(pluginConfig) + .withHadoopConfiguration(keyClass, valueClass) + .prepareRun(); - if (plugin.isUnbounded()) { + if (cdapPlugin.isUnbounded()) { // TODO: implement SparkReceiverIO.<~>write() throw new NotImplementedException("Support for unbounded plugins is not implemented!"); } else { - Configuration hConf = plugin.getHadoopConfiguration(); + Configuration hConf = cdapPlugin.getHadoopConfiguration(); HadoopFormatIO.Write writeHadoop = HadoopFormatIO.write() .withConfiguration(hConf) diff --git a/sdks/java/io/cdap/src/main/java/org/apache/beam/sdk/io/cdap/ConfigWrapper.java b/sdks/java/io/cdap/src/main/java/org/apache/beam/sdk/io/cdap/ConfigWrapper.java index 9a2124e21b46..b073e275be38 100644 --- a/sdks/java/io/cdap/src/main/java/org/apache/beam/sdk/io/cdap/ConfigWrapper.java +++ b/sdks/java/io/cdap/src/main/java/org/apache/beam/sdk/io/cdap/ConfigWrapper.java @@ -41,6 +41,7 @@ public ConfigWrapper(Class configClass) { this.configClass = configClass; } + /** Gets {@link ConfigWrapper} by JSON string. */ public ConfigWrapper fromJsonString(String jsonString) throws IOException { TypeReference> typeRef = new TypeReference>() {}; @@ -53,6 +54,7 @@ public ConfigWrapper fromJsonString(String jsonString) throws IOException { return this; } + /** Gets {@link ConfigWrapper} by JSON file. */ public ConfigWrapper fromJsonFile(File jsonFile) throws IOException { TypeReference> typeRef = new TypeReference>() {}; @@ -65,11 +67,13 @@ public ConfigWrapper fromJsonFile(File jsonFile) throws IOException { return this; } + /** Sets a {@link Plugin} parameters {@link Map}. */ public ConfigWrapper withParams(Map paramsMap) { this.paramsMap = new HashMap<>(paramsMap); return this; } + /** Sets a {@link Plugin} single parameter. */ public ConfigWrapper setParam(String paramName, Object param) { getParamsMap().put(paramName, param); return this; diff --git a/sdks/java/io/cdap/src/main/java/org/apache/beam/sdk/io/cdap/MappingUtils.java b/sdks/java/io/cdap/src/main/java/org/apache/beam/sdk/io/cdap/MappingUtils.java index f8c7ce5d7550..463cc501a982 100644 --- a/sdks/java/io/cdap/src/main/java/org/apache/beam/sdk/io/cdap/MappingUtils.java +++ b/sdks/java/io/cdap/src/main/java/org/apache/beam/sdk/io/cdap/MappingUtils.java @@ -17,14 +17,19 @@ */ package org.apache.beam.sdk.io.cdap; +import static org.apache.beam.sdk.util.Preconditions.checkArgumentNotNull; import static org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.Preconditions.checkArgument; +import com.google.gson.Gson; +import io.cdap.cdap.api.plugin.PluginConfig; import io.cdap.plugin.common.SourceInputFormatProvider; import io.cdap.plugin.hubspot.sink.batch.HubspotBatchSink; import io.cdap.plugin.hubspot.sink.batch.HubspotOutputFormat; import io.cdap.plugin.hubspot.source.batch.HubspotBatchSource; import io.cdap.plugin.hubspot.source.batch.HubspotInputFormat; import io.cdap.plugin.hubspot.source.batch.HubspotInputFormatProvider; +import io.cdap.plugin.hubspot.source.streaming.HubspotReceiver; +import io.cdap.plugin.hubspot.source.streaming.HubspotStreamingSource; import io.cdap.plugin.salesforce.plugin.source.batch.SalesforceBatchSource; import io.cdap.plugin.salesforce.plugin.source.batch.SalesforceInputFormat; import io.cdap.plugin.salesforce.plugin.source.batch.SalesforceInputFormatProvider; @@ -33,23 +38,118 @@ import io.cdap.plugin.zendesk.source.batch.ZendeskBatchSource; import io.cdap.plugin.zendesk.source.batch.ZendeskInputFormat; import io.cdap.plugin.zendesk.source.batch.ZendeskInputFormatProvider; +import java.util.HashMap; +import java.util.Map; +import org.apache.beam.sdk.io.sparkreceiver.ReceiverBuilder; +import org.apache.beam.sdk.transforms.SerializableFunction; +import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.reflect.TypeToken; +import org.apache.commons.lang3.tuple.ImmutablePair; +import org.apache.commons.lang3.tuple.Pair; +import org.apache.spark.streaming.receiver.Receiver; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +/** Util class for mapping plugins. */ public class MappingUtils { - public static Plugin getPluginByClass(Class pluginClass) { + private static final Logger LOG = LoggerFactory.getLogger(MappingUtils.class); + private static final String HUBSPOT_ID_FIELD = "vid"; + private static final Gson GSON = new Gson(); + + private static final Map< + Class, Pair, ReceiverBuilder>>> + REGISTERED_PLUGINS; + + static { + REGISTERED_PLUGINS = new HashMap<>(); + } + + /** Gets a {@link Plugin} by its class. */ + static Plugin getPluginByClass(Class pluginClass) { checkArgument(pluginClass != null, "Plugin class can not be null!"); if (pluginClass.equals(SalesforceBatchSource.class)) { - return Plugin.create( + return Plugin.createBatch( pluginClass, SalesforceInputFormat.class, SalesforceInputFormatProvider.class); } else if (pluginClass.equals(HubspotBatchSource.class)) { - return Plugin.create(pluginClass, HubspotInputFormat.class, HubspotInputFormatProvider.class); + return Plugin.createBatch( + pluginClass, HubspotInputFormat.class, HubspotInputFormatProvider.class); } else if (pluginClass.equals(ZendeskBatchSource.class)) { - return Plugin.create(pluginClass, ZendeskInputFormat.class, ZendeskInputFormatProvider.class); + return Plugin.createBatch( + pluginClass, ZendeskInputFormat.class, ZendeskInputFormatProvider.class); } else if (pluginClass.equals(HubspotBatchSink.class)) { - return Plugin.create(pluginClass, HubspotOutputFormat.class, SourceInputFormatProvider.class); + return Plugin.createBatch( + pluginClass, HubspotOutputFormat.class, SourceInputFormatProvider.class); } else if (pluginClass.equals(ServiceNowSource.class)) { - return Plugin.create( + return Plugin.createBatch( pluginClass, ServiceNowInputFormat.class, SourceInputFormatProvider.class); + } else if (pluginClass.equals(HubspotStreamingSource.class)) { + return Plugin.createStreaming(pluginClass); + } + throw new UnsupportedOperationException( + String.format("Given plugin class '%s' is not supported!", pluginClass.getName())); + } + + /** Gets a {@link ReceiverBuilder} by CDAP {@link Plugin} class. */ + @SuppressWarnings("unchecked") + static ReceiverBuilder> getReceiverBuilderByPluginClass( + Class pluginClass, PluginConfig pluginConfig, Class valueClass) { + checkArgument(pluginClass != null, "Plugin class can not be null!"); + checkArgument(pluginConfig != null, "Plugin config can not be null!"); + checkArgument(valueClass != null, "Value class can not be null!"); + if (pluginClass.equals(HubspotStreamingSource.class) && String.class.equals(valueClass)) { + ReceiverBuilder> receiverBuilder = + new ReceiverBuilder<>(HubspotReceiver.class).withConstructorArgs(pluginConfig); + return (ReceiverBuilder>) receiverBuilder; + } + if (REGISTERED_PLUGINS.containsKey(pluginClass)) { + return (ReceiverBuilder>) + REGISTERED_PLUGINS.get(pluginClass).getRight(); + } + throw new UnsupportedOperationException( + String.format("Given plugin class '%s' is not supported!", pluginClass.getName())); + } + + /** + * Register new CDAP Streaming {@link Plugin} class providing corresponding {@param getOffsetFn} + * and {@param receiverBuilder} params. + */ + public static void registerStreamingPlugin( + Class pluginClass, + SerializableFunction getOffsetFn, + ReceiverBuilder> receiverBuilder) { + REGISTERED_PLUGINS.put(pluginClass, new ImmutablePair<>(getOffsetFn, receiverBuilder)); + } + + private static SerializableFunction getOffsetFnForHubspot() { + return input -> { + if (input != null) { + try { + HashMap json = + GSON.fromJson(input, new TypeToken>() {}.getType()); + checkArgumentNotNull(json, "Can not get JSON from Hubspot input string"); + Object id = json.get(HUBSPOT_ID_FIELD); + checkArgumentNotNull(id, "Can not get ID from Hubspot input string"); + return ((Integer) id).longValue(); + } catch (Exception e) { + LOG.error("Can not get offset from json", e); + } + } + return 0L; + }; + } + + /** + * Gets a {@link SerializableFunction} that defines how to get record offset for CDAP {@link + * Plugin} class. + */ + @SuppressWarnings("unchecked") + static SerializableFunction getOffsetFnForPluginClass( + Class pluginClass, Class valueClass) { + if (pluginClass.equals(HubspotStreamingSource.class) && String.class.equals(valueClass)) { + return (SerializableFunction) getOffsetFnForHubspot(); + } + if (REGISTERED_PLUGINS.containsKey(pluginClass)) { + return (SerializableFunction) REGISTERED_PLUGINS.get(pluginClass).getLeft(); } throw new UnsupportedOperationException( String.format("Given plugin class '%s' is not supported!", pluginClass.getName())); diff --git a/sdks/java/io/cdap/src/main/java/org/apache/beam/sdk/io/cdap/Plugin.java b/sdks/java/io/cdap/src/main/java/org/apache/beam/sdk/io/cdap/Plugin.java index 31deb9d258db..6da476b56f3e 100644 --- a/sdks/java/io/cdap/src/main/java/org/apache/beam/sdk/io/cdap/Plugin.java +++ b/sdks/java/io/cdap/src/main/java/org/apache/beam/sdk/io/cdap/Plugin.java @@ -26,6 +26,7 @@ import io.cdap.cdap.etl.api.batch.BatchSinkContext; import io.cdap.cdap.etl.api.batch.BatchSource; import io.cdap.cdap.etl.api.batch.BatchSourceContext; +import io.cdap.cdap.etl.api.streaming.StreamingSource; import java.lang.annotation.Annotation; import java.lang.reflect.Constructor; import java.lang.reflect.Method; @@ -37,6 +38,7 @@ import org.apache.beam.sdk.io.cdap.context.BatchContextImpl; import org.apache.beam.sdk.io.cdap.context.BatchSinkContextImpl; import org.apache.beam.sdk.io.cdap.context.BatchSourceContextImpl; +import org.apache.beam.sdk.io.cdap.context.StreamingSourceContextImpl; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapreduce.MRJobConfig; import org.slf4j.Logger; @@ -49,6 +51,7 @@ public abstract class Plugin { private static final Logger LOG = LoggerFactory.getLogger(Plugin.class); private static final String PREPARE_RUN_METHOD_NAME = "prepareRun"; + private static final String GET_STREAM_METHOD_NAME = "getStream"; protected @Nullable PluginConfig pluginConfig; protected @Nullable Configuration hadoopConfiguration; @@ -61,10 +64,10 @@ public abstract class Plugin { public abstract Class getPluginClass(); /** Gets InputFormat or OutputFormat class for a plugin. */ - public abstract Class getFormatClass(); + public @Nullable abstract Class getFormatClass(); /** Gets InputFormatProvider or OutputFormatProvider class for a plugin. */ - public abstract Class getFormatProviderClass(); + public @Nullable abstract Class getFormatProviderClass(); /** Sets a plugin config. */ public Plugin withConfig(PluginConfig pluginConfig) { @@ -83,46 +86,57 @@ public Plugin withConfig(PluginConfig pluginConfig) { * validating connection to the CDAP sink/source and performing initial tuning. */ public void prepareRun() { - PluginConfig pluginConfig = getPluginConfig(); - checkStateNotNull(pluginConfig, "PluginConfig should be not null!"); + if (isUnbounded()) { + // Not needed for unbounded plugins + return; + } if (cdapPluginObj == null) { - try { - Constructor constructor = - getPluginClass().getDeclaredConstructor(pluginConfig.getClass()); - constructor.setAccessible(true); - cdapPluginObj = (SubmitterLifecycle) constructor.newInstance(pluginConfig); - } catch (Exception e) { - LOG.error("Can not instantiate CDAP plugin class", e); - throw new IllegalStateException("Can not call prepareRun"); - } + instantiateCdapPluginObj(); } + checkStateNotNull(cdapPluginObj, "Cdap Plugin object can't be null!"); try { cdapPluginObj.prepareRun(getContext()); - if (getPluginType().equals(PluginConstants.PluginType.SOURCE)) { - for (Map.Entry entry : - getContext().getInputFormatProvider().getInputFormatConfiguration().entrySet()) { - getHadoopConfiguration().set(entry.getKey(), entry.getValue()); - } - } else { - for (Map.Entry entry : - getContext().getOutputFormatProvider().getOutputFormatConfiguration().entrySet()) { - getHadoopConfiguration().set(entry.getKey(), entry.getValue()); - } - getHadoopConfiguration().set(MRJobConfig.ID, String.valueOf(1)); - } } catch (Exception e) { LOG.error("Error while prepareRun", e); throw new IllegalStateException("Error while prepareRun"); } + if (getPluginType().equals(PluginConstants.PluginType.SOURCE)) { + for (Map.Entry entry : + getContext().getInputFormatProvider().getInputFormatConfiguration().entrySet()) { + getHadoopConfiguration().set(entry.getKey(), entry.getValue()); + } + } else { + for (Map.Entry entry : + getContext().getOutputFormatProvider().getOutputFormatConfiguration().entrySet()) { + getHadoopConfiguration().set(entry.getKey(), entry.getValue()); + } + getHadoopConfiguration().set(MRJobConfig.ID, String.valueOf(1)); + } + } + + /** Creates an instance of {@link #cdapPluginObj} using {@link #pluginConfig}. */ + private void instantiateCdapPluginObj() { + PluginConfig pluginConfig = getPluginConfig(); + checkStateNotNull(pluginConfig, "PluginConfig should be not null!"); + try { + Constructor constructor = getPluginClass().getDeclaredConstructor(pluginConfig.getClass()); + constructor.setAccessible(true); + cdapPluginObj = (SubmitterLifecycle) constructor.newInstance(pluginConfig); + } catch (Exception e) { + LOG.error("Can not instantiate CDAP plugin class", e); + throw new IllegalStateException("Can not call prepareRun"); + } } /** Sets a plugin Hadoop configuration. */ public Plugin withHadoopConfiguration(Class formatKeyClass, Class formatValueClass) { + Class formatClass = getFormatClass(); + checkStateNotNull(formatClass, "Format class can't be null!"); PluginConstants.Format formatType = getFormatType(); PluginConstants.Hadoop hadoopType = getHadoopType(); getHadoopConfiguration() - .setClass(hadoopType.getFormatClass(), getFormatClass(), formatType.getFormatClass()); + .setClass(hadoopType.getFormatClass(), formatClass, formatType.getFormatClass()); getHadoopConfiguration().setClass(hadoopType.getKeyClass(), formatKeyClass, Object.class); getHadoopConfiguration().setClass(hadoopType.getValueClass(), formatValueClass, Object.class); @@ -163,7 +177,8 @@ private PluginConstants.Hadoop getHadoopType() { /** Gets value of a plugin type. */ public static PluginConstants.PluginType initPluginType(Class pluginClass) throws IllegalArgumentException { - if (BatchSource.class.isAssignableFrom(pluginClass)) { + if (StreamingSource.class.isAssignableFrom(pluginClass) + || BatchSource.class.isAssignableFrom(pluginClass)) { return PluginConstants.PluginType.SOURCE; } else if (BatchSink.class.isAssignableFrom(pluginClass)) { return PluginConstants.PluginType.SINK; @@ -188,6 +203,8 @@ public static BatchContextImpl initContext(Class cdapPluginClass) { } else if (contextClass.equals(BatchSinkContext.class)) { return new BatchSinkContextImpl(); } + } else if (method.getName().equals(GET_STREAM_METHOD_NAME)) { + return new StreamingSourceContextImpl(); } } throw new IllegalStateException("Cannot determine context class"); @@ -209,8 +226,8 @@ public Boolean isUnbounded() { return isUnbounded; } - /** Creates a plugin instance. */ - public static Plugin create( + /** Creates a batch plugin instance. */ + public static Plugin createBatch( Class newPluginClass, Class newFormatClass, Class newFormatProviderClass) { return builder() .setPluginClass(newPluginClass) @@ -221,6 +238,15 @@ public static Plugin create( .build(); } + /** Creates a streaming plugin instance. */ + public static Plugin createStreaming(Class newPluginClass) { + return builder() + .setPluginClass(newPluginClass) + .setPluginType(Plugin.initPluginType(newPluginClass)) + .setContext(Plugin.initContext(newPluginClass)) + .build(); + } + /** Creates a plugin builder instance. */ public static Builder builder() { return new AutoValue_Plugin.Builder(); diff --git a/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/CdapIOIT.java b/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/CdapIOIT.java index bb5f205fc517..8f2a987a5cda 100644 --- a/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/CdapIOIT.java +++ b/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/CdapIOIT.java @@ -162,7 +162,8 @@ private CdapIO.Write writeToDB(Mapwrite() .withCdapPlugin( - Plugin.create(DBBatchSink.class, DBOutputFormat.class, DBOutputFormatProvider.class)) + Plugin.createBatch( + DBBatchSink.class, DBOutputFormat.class, DBOutputFormatProvider.class)) .withPluginConfig(pluginConfig) .withKeyClass(TestRowDBWritable.class) .withValueClass(NullWritable.class) @@ -174,7 +175,8 @@ private CdapIO.Read readFromDB(Mapread() .withCdapPlugin( - Plugin.create(DBBatchSource.class, DBInputFormat.class, DBInputFormatProvider.class)) + Plugin.createBatch( + DBBatchSource.class, DBInputFormat.class, DBInputFormatProvider.class)) .withPluginConfig(pluginConfig) .withKeyClass(LongWritable.class) .withValueClass(TestRowDBWritable.class); diff --git a/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/CdapIOTest.java b/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/CdapIOTest.java index e978f5b8fcad..e18126e69acf 100644 --- a/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/CdapIOTest.java +++ b/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/CdapIOTest.java @@ -27,18 +27,34 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; +import org.apache.beam.runners.direct.DirectOptions; +import org.apache.beam.runners.direct.DirectRunner; +import org.apache.beam.sdk.Pipeline; import org.apache.beam.sdk.coders.KvCoder; +import org.apache.beam.sdk.coders.NullableCoder; import org.apache.beam.sdk.coders.StringUtf8Coder; +import org.apache.beam.sdk.io.cdap.batch.EmployeeBatchSink; +import org.apache.beam.sdk.io.cdap.batch.EmployeeBatchSource; +import org.apache.beam.sdk.io.cdap.batch.EmployeeInputFormat; +import org.apache.beam.sdk.io.cdap.batch.EmployeeInputFormatProvider; +import org.apache.beam.sdk.io.cdap.batch.EmployeeOutputFormat; +import org.apache.beam.sdk.io.cdap.batch.EmployeeOutputFormatProvider; import org.apache.beam.sdk.io.cdap.context.BatchSinkContextImpl; import org.apache.beam.sdk.io.cdap.context.BatchSourceContextImpl; +import org.apache.beam.sdk.io.cdap.streaming.EmployeeReceiver; +import org.apache.beam.sdk.io.cdap.streaming.EmployeeStreamingSource; +import org.apache.beam.sdk.io.sparkreceiver.ReceiverBuilder; +import org.apache.beam.sdk.options.PipelineOptionsFactory; import org.apache.beam.sdk.testing.PAssert; import org.apache.beam.sdk.testing.TestPipeline; import org.apache.beam.sdk.transforms.Create; +import org.apache.beam.sdk.transforms.Values; import org.apache.beam.sdk.values.KV; import org.apache.beam.sdk.values.PBegin; import org.apache.beam.sdk.values.PCollection; import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.ImmutableMap; import org.apache.hadoop.mapreduce.OutputCommitter; +import org.joda.time.Duration; import org.junit.Before; import org.junit.Rule; import org.junit.Test; @@ -74,7 +90,7 @@ public void testReadBuildsCorrectly() { CdapIO.Read read = CdapIO.read() .withCdapPlugin( - Plugin.create( + Plugin.createBatch( EmployeeBatchSource.class, EmployeeInputFormat.class, EmployeeInputFormatProvider.class)) @@ -125,7 +141,7 @@ public void testReadObjectCreationFailsIfValueClassIsNull() { public void testReadExpandingFailsMissingCdapPluginClass() { PBegin testPBegin = PBegin.in(TestPipeline.create()); CdapIO.Read read = CdapIO.read(); - assertThrows(IllegalArgumentException.class, () -> read.expand(testPBegin)); + assertThrows(IllegalStateException.class, () -> read.expand(testPBegin)); } @Test @@ -136,13 +152,13 @@ public void testReadObjectCreationFailsIfCdapPluginClassIsNotSupported() { } @Test - public void testReadingData() { + public void testReadFromCdapBatchPlugin() { EmployeeConfig pluginConfig = new ConfigWrapper<>(EmployeeConfig.class).withParams(TEST_EMPLOYEE_PARAMS_MAP).build(); CdapIO.Read read = CdapIO.read() .withCdapPlugin( - Plugin.create( + Plugin.createBatch( EmployeeBatchSource.class, EmployeeInputFormat.class, EmployeeInputFormatProvider.class)) @@ -154,11 +170,43 @@ public void testReadingData() { for (int i = 1; i < EmployeeInputFormat.NUM_OF_TEST_EMPLOYEE_RECORDS; i++) { expected.add(KV.of(String.valueOf(i), EmployeeInputFormat.EMPLOYEE_NAME_PREFIX + i)); } - PCollection> actual = p.apply("ReadTest", read); + PCollection> actual = p.apply("ReadBatchTest", read); PAssert.that(actual).containsInAnyOrder(expected); p.run(); } + @Test + public void testReadFromCdapStreamingPlugin() { + DirectOptions options = PipelineOptionsFactory.as(DirectOptions.class); + options.setBlockOnRun(false); + options.setRunner(DirectRunner.class); + Pipeline p = Pipeline.create(options); + + EmployeeConfig pluginConfig = + new ConfigWrapper<>(EmployeeConfig.class).withParams(TEST_EMPLOYEE_PARAMS_MAP).build(); + MappingUtils.registerStreamingPlugin( + EmployeeStreamingSource.class, + Long::valueOf, + new ReceiverBuilder<>(EmployeeReceiver.class).withConstructorArgs(pluginConfig)); + + CdapIO.Read read = + CdapIO.read() + .withCdapPlugin(Plugin.createStreaming(EmployeeStreamingSource.class)) + .withPluginConfig(pluginConfig) + .withKeyClass(String.class) + .withValueClass(String.class); + + List storedRecords = EmployeeReceiver.getStoredRecords(); + + PCollection actual = + p.apply("ReadStreamingTest", read) + .setCoder(KvCoder.of(NullableCoder.of(StringUtf8Coder.of()), StringUtf8Coder.of())) + .apply(Values.create()); + + PAssert.that(actual).containsInAnyOrder(storedRecords); + p.run().waitUntilFinish(Duration.standardSeconds(15)); + } + @Test public void testWriteBuildsCorrectly() { EmployeeConfig pluginConfig = @@ -167,7 +215,7 @@ public void testWriteBuildsCorrectly() { CdapIO.Write write = CdapIO.write() .withCdapPlugin( - Plugin.create( + Plugin.createBatch( EmployeeBatchSink.class, EmployeeOutputFormat.class, EmployeeOutputFormatProvider.class)) @@ -230,7 +278,7 @@ public void testWriteExpandingFailsMissingCdapPluginClass() { PCollection> testPCollection = Create.empty(KvCoder.of(StringUtf8Coder.of(), StringUtf8Coder.of())).expand(testPBegin); CdapIO.Write write = CdapIO.write(); - assertThrows(IllegalArgumentException.class, () -> write.expand(testPCollection)); + assertThrows(IllegalStateException.class, () -> write.expand(testPCollection)); } @Test @@ -241,7 +289,7 @@ public void testWriteObjectCreationFailsIfCdapPluginClassIsNotSupported() { } @Test - public void testWritingData() throws IOException { + public void testWriteWithCdapBatchSinkPlugin() throws IOException { List> data = new ArrayList<>(); for (int i = 0; i < EmployeeInputFormat.NUM_OF_TEST_EMPLOYEE_RECORDS; i++) { data.add(KV.of(String.valueOf(i), EmployeeInputFormat.EMPLOYEE_NAME_PREFIX + i)); @@ -254,10 +302,10 @@ public void testWritingData() throws IOException { "Write", CdapIO.write() .withCdapPlugin( - Plugin.create( + Plugin.createBatch( EmployeeBatchSink.class, EmployeeOutputFormat.class, - EmployeeInputFormatProvider.class)) + EmployeeOutputFormatProvider.class)) .withPluginConfig(pluginConfig) .withKeyClass(String.class) .withValueClass(String.class) diff --git a/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/EmployeeConfig.java b/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/EmployeeConfig.java index d02f4548cd3a..547af887fc5d 100644 --- a/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/EmployeeConfig.java +++ b/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/EmployeeConfig.java @@ -25,10 +25,13 @@ import io.cdap.plugin.common.ReferencePluginConfig; import java.util.HashSet; import java.util.Set; +import org.apache.beam.sdk.io.cdap.batch.EmployeeBatchSink; +import org.apache.beam.sdk.io.cdap.batch.EmployeeBatchSource; /** * {@link io.cdap.cdap.api.plugin.PluginConfig} for {@link EmployeeBatchSource} and {@link - * EmployeeBatchSink} CDAP plugins. Used to test {@link CdapIO#read()} and {@link CdapIO#write()}. + * EmployeeBatchSink} CDAP plugins. Used to test {@link org.apache.beam.sdk.io.cdap.CdapIO#read()} + * and {@link org.apache.beam.sdk.io.cdap.CdapIO#write()}. */ public class EmployeeConfig extends ReferencePluginConfig { diff --git a/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/PluginTest.java b/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/PluginTest.java index 501c91b6cdaf..2fcfe6f36c0b 100644 --- a/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/PluginTest.java +++ b/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/PluginTest.java @@ -65,7 +65,7 @@ public class PluginTest { public void testBuildingSourcePluginWithCDAPClasses() { try { Plugin serviceNowSourcePlugin = - Plugin.create( + Plugin.createBatch( ServiceNowSource.class, ServiceNowInputFormat.class, SourceInputFormatProvider.class) @@ -93,7 +93,7 @@ public void testBuildingSourcePluginWithCDAPClasses() { @Test public void testSettingPluginType() { Plugin serviceNowSourcePlugin = - Plugin.create( + Plugin.createBatch( ServiceNowSource.class, ServiceNowInputFormat.class, SourceInputFormatProvider.class) @@ -108,7 +108,7 @@ public void testSettingPluginType() { public void testSettingPluginTypeFailed() { try { Plugin serviceNowSourcePlugin = - Plugin.create(Object.class, Object.class, Object.class) + Plugin.createBatch(Object.class, Object.class, Object.class) .withConfig(serviceNowSourceConfig) .withHadoopConfiguration(Schema.class, MapWritable.class); fail("This should have thrown an exception"); diff --git a/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/EmployeeBatchSink.java b/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/batch/EmployeeBatchSink.java similarity index 95% rename from sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/EmployeeBatchSink.java rename to sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/batch/EmployeeBatchSink.java index 1e0b835fac77..052d9ab0f6a8 100644 --- a/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/EmployeeBatchSink.java +++ b/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/batch/EmployeeBatchSink.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.beam.sdk.io.cdap; +package org.apache.beam.sdk.io.cdap.batch; import io.cdap.cdap.api.annotation.Description; import io.cdap.cdap.api.annotation.Name; @@ -28,6 +28,8 @@ import io.cdap.cdap.etl.api.PipelineConfigurer; import io.cdap.cdap.etl.api.batch.BatchSink; import io.cdap.cdap.etl.api.batch.BatchSinkContext; +import org.apache.beam.sdk.io.cdap.CdapIO; +import org.apache.beam.sdk.io.cdap.EmployeeConfig; /** Imitation of CDAP {@link BatchSink} plugin. Used to test {@link CdapIO#write()}. */ @Plugin(type = BatchSink.PLUGIN_TYPE) diff --git a/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/EmployeeBatchSource.java b/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/batch/EmployeeBatchSource.java similarity index 94% rename from sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/EmployeeBatchSource.java rename to sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/batch/EmployeeBatchSource.java index 27494c8ce9c8..3daf2fb69b98 100644 --- a/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/EmployeeBatchSource.java +++ b/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/batch/EmployeeBatchSource.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.beam.sdk.io.cdap; +package org.apache.beam.sdk.io.cdap.batch; import io.cdap.cdap.api.annotation.Description; import io.cdap.cdap.api.annotation.Name; @@ -32,6 +32,8 @@ import io.cdap.plugin.common.IdUtils; import io.cdap.plugin.common.LineageRecorder; import java.util.stream.Collectors; +import org.apache.beam.sdk.io.cdap.CdapIO; +import org.apache.beam.sdk.io.cdap.EmployeeConfig; /** Imitation of CDAP {@link BatchSource} plugin. Used to test {@link CdapIO#read()}. */ @Plugin(type = BatchSource.PLUGIN_TYPE) @@ -41,7 +43,7 @@ public class EmployeeBatchSource extends BatchSource()); } - static List> getWrittenOutput() { + public static List> getWrittenOutput() { return output; } - static OutputCommitter getOutputCommitter() { + public static OutputCommitter getOutputCommitter() { return outputCommitter; } } diff --git a/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/EmployeeOutputFormatProvider.java b/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/batch/EmployeeOutputFormatProvider.java similarity index 93% rename from sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/EmployeeOutputFormatProvider.java rename to sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/batch/EmployeeOutputFormatProvider.java index 826b3177d302..a42c0c89aca1 100644 --- a/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/EmployeeOutputFormatProvider.java +++ b/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/batch/EmployeeOutputFormatProvider.java @@ -15,12 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.beam.sdk.io.cdap; +package org.apache.beam.sdk.io.cdap.batch; import com.google.gson.Gson; import com.google.gson.GsonBuilder; import io.cdap.cdap.api.data.batch.OutputFormatProvider; import java.util.Map; +import org.apache.beam.sdk.io.cdap.CdapIO; +import org.apache.beam.sdk.io.cdap.EmployeeConfig; import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.ImmutableMap; /** diff --git a/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/streaming/EmployeeReceiver.java b/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/streaming/EmployeeReceiver.java new file mode 100644 index 000000000000..fcd0fa7b8d76 --- /dev/null +++ b/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/streaming/EmployeeReceiver.java @@ -0,0 +1,91 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.beam.sdk.io.cdap.streaming; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import org.apache.beam.sdk.io.cdap.EmployeeConfig; +import org.apache.beam.sdk.io.sparkreceiver.HasOffset; +import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.spark.storage.StorageLevel; +import org.apache.spark.streaming.receiver.Receiver; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Imitation of Spark {@link Receiver} for {@link EmployeeStreamingSource} CDAP plugin. Used to test + * {@link org.apache.beam.sdk.io.cdap.CdapIO#read()}. + */ +public class EmployeeReceiver extends Receiver implements HasOffset { + + public static final int RECORDS_COUNT = 20; + + private static final Logger LOG = LoggerFactory.getLogger(EmployeeReceiver.class); + private static final int TIMEOUT_MS = 500; + private static final List STORED_RECORDS = new ArrayList<>(); + private final EmployeeConfig config; + private Long startOffset; + + EmployeeReceiver(EmployeeConfig config) { + super(StorageLevel.MEMORY_AND_DISK_2()); + this.config = config; + LOG.info("Created EmployeeReceiver with objectType = {}", this.config.objectType); + } + + @Override + public void setStartOffset(Long startOffset) { + if (startOffset != null) { + this.startOffset = startOffset; + } + } + + @Override + @SuppressWarnings("FutureReturnValueIgnored") + public void onStart() { + Executors.newSingleThreadExecutor(new ThreadFactoryBuilder().build()).submit(this::receive); + } + + @Override + public void onStop() {} + + @Override + public Long getEndOffset() { + return Long.MAX_VALUE; + } + + private void receive() { + Long currentOffset = startOffset; + while (!isStopped()) { + if (currentOffset <= RECORDS_COUNT) { + STORED_RECORDS.add(currentOffset.toString()); + store((currentOffset++).toString()); + } + try { + TimeUnit.MILLISECONDS.sleep(TIMEOUT_MS); + } catch (InterruptedException e) { + LOG.error("Interrupted", e); + } + } + } + + public static List getStoredRecords() { + return STORED_RECORDS; + } +} diff --git a/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/streaming/EmployeeStreamingSource.java b/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/streaming/EmployeeStreamingSource.java new file mode 100644 index 000000000000..e73688e97725 --- /dev/null +++ b/sdks/java/io/cdap/src/test/java/org/apache/beam/sdk/io/cdap/streaming/EmployeeStreamingSource.java @@ -0,0 +1,76 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.beam.sdk.io.cdap.streaming; + +import io.cdap.cdap.api.annotation.Description; +import io.cdap.cdap.api.annotation.Name; +import io.cdap.cdap.api.annotation.Plugin; +import io.cdap.cdap.api.data.format.StructuredRecord; +import io.cdap.cdap.etl.api.FailureCollector; +import io.cdap.cdap.etl.api.PipelineConfigurer; +import io.cdap.cdap.etl.api.streaming.StreamingContext; +import io.cdap.cdap.etl.api.streaming.StreamingSource; +import java.io.IOException; +import org.apache.beam.sdk.io.cdap.CdapIO; +import org.apache.beam.sdk.io.cdap.EmployeeConfig; +import org.apache.spark.streaming.api.java.JavaDStream; +import org.apache.spark.streaming.api.java.JavaStreamingContext; + +/** Imitation of CDAP {@link StreamingSource} plugin. Used to test {@link CdapIO#read()}. */ +@Plugin(type = StreamingSource.PLUGIN_TYPE) +@Name(EmployeeStreamingSource.NAME) +@Description("Plugin reads Employee in streaming") +public class EmployeeStreamingSource extends StreamingSource { + + public static final String NAME = "EmployeeStreamingSource"; + + private final EmployeeConfig config; + + public EmployeeStreamingSource(EmployeeConfig config) { + this.config = config; + } + + @Override + public void configurePipeline(PipelineConfigurer pipelineConfigurer) { + FailureCollector collector = pipelineConfigurer.getStageConfigurer().getFailureCollector(); + config.validate(collector); // validate when macros are not substituted + collector.getOrThrowException(); + + pipelineConfigurer.getStageConfigurer().setOutputSchema(config.getSchema()); + } + + @Override + public JavaDStream getStream(StreamingContext streamingContext) + throws IOException { + FailureCollector collector = streamingContext.getFailureCollector(); + config.validate(collector); // validate when macros are substituted + collector.getOrThrowException(); + + JavaStreamingContext jssc = streamingContext.getSparkStreamingContext(); + + return jssc.receiverStream(new EmployeeReceiver(config)) + .map(jsonString -> transform(jsonString, config)); + } + + public static StructuredRecord transform(String value, EmployeeConfig config) { + StructuredRecord.Builder builder = StructuredRecord.builder(config.getSchema()); + builder.set("id", value); + builder.set("name", "Employee " + value); + return builder.build(); + } +} diff --git a/sdks/java/io/sparkreceiver/build.gradle b/sdks/java/io/sparkreceiver/build.gradle index 8d4b96f298cd..fb1f681d9265 100644 --- a/sdks/java/io/sparkreceiver/build.gradle +++ b/sdks/java/io/sparkreceiver/build.gradle @@ -47,7 +47,6 @@ dependencies { implementation library.java.vendored_guava_26_0_jre implementation project(path: ":sdks:java:core", configuration: "shadow") compileOnly "org.scala-lang:scala-library:2.11.12" - testImplementation project(path: ":sdks:java:io:cdap", configuration: "testRuntimeMigration") testImplementation library.java.junit testImplementation project(path: ":runners:direct-java", configuration: "shadow") testImplementation project(path: ":examples:java", configuration: "testRuntimeMigration") diff --git a/sdks/java/io/sparkreceiver/src/main/java/org/apache/beam/sdk/io/sparkreceiver/ReadFromSparkReceiverWithOffsetDoFn.java b/sdks/java/io/sparkreceiver/src/main/java/org/apache/beam/sdk/io/sparkreceiver/ReadFromSparkReceiverWithOffsetDoFn.java index c51a5168ce39..2eb46561e2cf 100644 --- a/sdks/java/io/sparkreceiver/src/main/java/org/apache/beam/sdk/io/sparkreceiver/ReadFromSparkReceiverWithOffsetDoFn.java +++ b/sdks/java/io/sparkreceiver/src/main/java/org/apache/beam/sdk/io/sparkreceiver/ReadFromSparkReceiverWithOffsetDoFn.java @@ -19,6 +19,7 @@ import static org.apache.beam.sdk.util.Preconditions.checkStateNotNull; +import java.nio.ByteBuffer; import java.util.Queue; import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.TimeUnit; @@ -33,12 +34,15 @@ import org.apache.beam.sdk.transforms.splittabledofn.WatermarkEstimator; import org.apache.beam.sdk.transforms.splittabledofn.WatermarkEstimators; import org.apache.beam.sdk.transforms.windowing.BoundedWindow; +import org.apache.commons.lang3.SerializationUtils; import org.apache.spark.SparkConf; import org.apache.spark.streaming.receiver.Receiver; import org.checkerframework.checker.nullness.qual.Nullable; import org.joda.time.Instant; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import scala.collection.Iterator; +import scala.collection.mutable.ArrayBuffer; /** * A SplittableDoFn which reads from {@link Receiver} that implements {@link HasOffset}. By default, @@ -141,15 +145,46 @@ public boolean hasRecords() { @Override public void start(Receiver sparkReceiver) { this.sparkReceiver = sparkReceiver; - try { - new WrappedSupervisor( - sparkReceiver, - new SparkConf(), - objects -> { - V record = (V) objects[0]; - recordsQueue.offer(record); + + final SerializableFunction storeFn = + (input) -> { + if (input == null) { return null; - }); + } + /* + Use only [0] element - data. + The other elements are not needed because they are related to Spark environment options. + */ + Object data = input[0]; + + if (data instanceof ByteBuffer) { + final ByteBuffer byteBuffer = ((ByteBuffer) data).asReadOnlyBuffer(); + final byte[] bytes = new byte[byteBuffer.limit()]; + byteBuffer.get(bytes); + final V record = SerializationUtils.deserialize(bytes); + recordsQueue.offer(record); + } else if (data instanceof Iterator) { + final Iterator iterator = (Iterator) data; + while (iterator.hasNext()) { + V record = iterator.next(); + recordsQueue.offer(record); + } + } else if (data instanceof ArrayBuffer) { + final ArrayBuffer arrayBuffer = (ArrayBuffer) data; + final Iterator iterator = arrayBuffer.iterator(); + while (iterator.hasNext()) { + V record = iterator.next(); + recordsQueue.offer(record); + } + } else { + V record = (V) data; + recordsQueue.offer(record); + } + return null; + }; + + try { + new WrappedSupervisor(sparkReceiver, new SparkConf(), storeFn); } catch (Exception e) { LOG.error("Can not init Spark Receiver!", e); throw new IllegalStateException("Spark Receiver was not initialized"); @@ -202,6 +237,7 @@ public ProcessContinuation processElement( } Instant currentTimeStamp = getTimestampFn.apply(record); ((ManualWatermarkEstimator) watermarkEstimator).setWatermark(currentTimeStamp); + System.err.println(record); receiver.outputWithTimestamp(record, currentTimeStamp); } } diff --git a/sdks/java/io/sparkreceiver/src/test/java/org/apache/beam/sdk/io/sparkreceiver/ArrayBufferDataReceiver.java b/sdks/java/io/sparkreceiver/src/test/java/org/apache/beam/sdk/io/sparkreceiver/ArrayBufferDataReceiver.java new file mode 100644 index 000000000000..849ea0a1373e --- /dev/null +++ b/sdks/java/io/sparkreceiver/src/test/java/org/apache/beam/sdk/io/sparkreceiver/ArrayBufferDataReceiver.java @@ -0,0 +1,85 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.beam.sdk.io.sparkreceiver; + +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.spark.storage.StorageLevel; +import org.apache.spark.streaming.receiver.Receiver; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import scala.collection.mutable.ArrayBuffer; + +/** + * Imitation of Spark {@link Receiver} that implements {@link HasOffset} interface and pushes data + * passing the {@link ArrayBuffer}. Used to test {@link SparkReceiverIO#read()}. + */ +public class ArrayBufferDataReceiver extends Receiver implements HasOffset { + + private static final Logger LOG = LoggerFactory.getLogger(ArrayBufferDataReceiver.class); + private static final int TIMEOUT_MS = 500; + public static final int RECORDS_COUNT = 20; + + private Long startOffset; + + ArrayBufferDataReceiver() { + super(StorageLevel.MEMORY_AND_DISK_2()); + } + + @Override + public void setStartOffset(Long startOffset) { + if (startOffset != null) { + this.startOffset = startOffset; + } + } + + @Override + @SuppressWarnings("FutureReturnValueIgnored") + public void onStart() { + Executors.newSingleThreadExecutor(new ThreadFactoryBuilder().build()).submit(this::receive); + } + + @Override + public void onStop() {} + + @Override + public Long getEndOffset() { + return Long.MAX_VALUE; + } + + private void receive() { + Long currentOffset = startOffset; + while (!isStopped()) { + if (currentOffset < RECORDS_COUNT) { + ArrayBuffer dataArray = new ArrayBuffer<>(); + for (int i = 0; i < Math.max(2, RECORDS_COUNT / 10); i++) { + dataArray.$plus$eq(String.valueOf(currentOffset++)); + } + store(dataArray); + } else { + break; + } + try { + TimeUnit.MILLISECONDS.sleep(TIMEOUT_MS); + } catch (InterruptedException e) { + LOG.error("Interrupted", e); + } + } + } +} diff --git a/sdks/java/io/sparkreceiver/src/test/java/org/apache/beam/sdk/io/sparkreceiver/ByteBufferDataReceiver.java b/sdks/java/io/sparkreceiver/src/test/java/org/apache/beam/sdk/io/sparkreceiver/ByteBufferDataReceiver.java new file mode 100644 index 000000000000..dcef495aa67a --- /dev/null +++ b/sdks/java/io/sparkreceiver/src/test/java/org/apache/beam/sdk/io/sparkreceiver/ByteBufferDataReceiver.java @@ -0,0 +1,84 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.beam.sdk.io.sparkreceiver; + +import java.nio.ByteBuffer; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.commons.lang3.SerializationUtils; +import org.apache.spark.storage.StorageLevel; +import org.apache.spark.streaming.receiver.Receiver; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Imitation of Spark {@link Receiver} that implements {@link HasOffset} interface and pushes data + * passing the {@link ByteBuffer}. Used to test {@link SparkReceiverIO#read()}. + */ +public class ByteBufferDataReceiver extends Receiver implements HasOffset { + + private static final Logger LOG = LoggerFactory.getLogger(ByteBufferDataReceiver.class); + private static final int TIMEOUT_MS = 500; + public static final int RECORDS_COUNT = 20; + + private Long startOffset; + + ByteBufferDataReceiver() { + super(StorageLevel.MEMORY_AND_DISK_2()); + } + + @Override + public void setStartOffset(Long startOffset) { + if (startOffset != null) { + this.startOffset = startOffset; + } + } + + @Override + @SuppressWarnings("FutureReturnValueIgnored") + public void onStart() { + Executors.newSingleThreadExecutor(new ThreadFactoryBuilder().build()).submit(this::receive); + } + + @Override + public void onStop() {} + + @Override + public Long getEndOffset() { + return Long.MAX_VALUE; + } + + private void receive() { + Long currentOffset = startOffset; + while (!isStopped()) { + if (currentOffset < RECORDS_COUNT) { + ByteBuffer dataBuffer = + ByteBuffer.wrap(SerializationUtils.serialize(String.valueOf(currentOffset++))); + store(dataBuffer); + } else { + break; + } + try { + TimeUnit.MILLISECONDS.sleep(TIMEOUT_MS); + } catch (InterruptedException e) { + LOG.error("Interrupted", e); + } + } + } +} diff --git a/sdks/java/io/sparkreceiver/src/test/java/org/apache/beam/sdk/io/sparkreceiver/IteratorDataReceiver.java b/sdks/java/io/sparkreceiver/src/test/java/org/apache/beam/sdk/io/sparkreceiver/IteratorDataReceiver.java new file mode 100644 index 000000000000..8999802542c2 --- /dev/null +++ b/sdks/java/io/sparkreceiver/src/test/java/org/apache/beam/sdk/io/sparkreceiver/IteratorDataReceiver.java @@ -0,0 +1,87 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.beam.sdk.io.sparkreceiver; + +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.spark.storage.StorageLevel; +import org.apache.spark.streaming.receiver.Receiver; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Imitation of Spark {@link Receiver} that implements {@link HasOffset} interface and pushes data + * passing the {@link Iterator}. Used to test {@link SparkReceiverIO#read()}. + */ +public class IteratorDataReceiver extends Receiver implements HasOffset { + + private static final Logger LOG = LoggerFactory.getLogger(IteratorDataReceiver.class); + private static final int TIMEOUT_MS = 500; + public static final int RECORDS_COUNT = 20; + + private Long startOffset; + + IteratorDataReceiver() { + super(StorageLevel.MEMORY_AND_DISK_2()); + } + + @Override + public void setStartOffset(Long startOffset) { + if (startOffset != null) { + this.startOffset = startOffset; + } + } + + @Override + @SuppressWarnings("FutureReturnValueIgnored") + public void onStart() { + Executors.newSingleThreadExecutor(new ThreadFactoryBuilder().build()).submit(this::receive); + } + + @Override + public void onStop() {} + + @Override + public Long getEndOffset() { + return Long.MAX_VALUE; + } + + private void receive() { + Long currentOffset = startOffset; + while (!isStopped()) { + if (currentOffset < RECORDS_COUNT) { + List dataArray = new ArrayList<>(); + for (int i = 0; i < Math.max(2, RECORDS_COUNT / 10); i++) { + dataArray.add(String.valueOf(currentOffset++)); + } + store(dataArray.iterator()); + } else { + break; + } + try { + TimeUnit.MILLISECONDS.sleep(TIMEOUT_MS); + } catch (InterruptedException e) { + LOG.error("Interrupted", e); + } + } + } +} diff --git a/sdks/java/io/sparkreceiver/src/test/java/org/apache/beam/sdk/io/sparkreceiver/SparkReceiverIOTest.java b/sdks/java/io/sparkreceiver/src/test/java/org/apache/beam/sdk/io/sparkreceiver/SparkReceiverIOTest.java index e81dca5150e5..6931e7199926 100644 --- a/sdks/java/io/sparkreceiver/src/test/java/org/apache/beam/sdk/io/sparkreceiver/SparkReceiverIOTest.java +++ b/sdks/java/io/sparkreceiver/src/test/java/org/apache/beam/sdk/io/sparkreceiver/SparkReceiverIOTest.java @@ -20,14 +20,14 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertThrows; -import java.util.HashSet; -import java.util.Set; +import java.util.ArrayList; +import java.util.List; import org.apache.beam.sdk.coders.StringUtf8Coder; +import org.apache.beam.sdk.testing.PAssert; import org.apache.beam.sdk.testing.TestPipeline; import org.apache.beam.sdk.testing.TestPipelineOptions; -import org.apache.beam.sdk.transforms.DoFn; -import org.apache.beam.sdk.transforms.ParDo; import org.apache.beam.sdk.transforms.SerializableFunction; +import org.apache.beam.sdk.values.PCollection; import org.joda.time.Duration; import org.joda.time.Instant; import org.junit.Rule; @@ -110,11 +110,13 @@ public void testReadFromCustomReceiverWithOffset() { .withTimestampFn(Instant::parse) .withSparkReceiverBuilder(receiverBuilder); + List expected = new ArrayList<>(); for (int i = 0; i < CustomReceiverWithOffset.RECORDS_COUNT; i++) { - TestOutputDoFn.EXPECTED_RECORDS.add(String.valueOf(i)); + expected.add(String.valueOf(i)); } - pipeline.apply(reader).setCoder(StringUtf8Coder.of()).apply(ParDo.of(new TestOutputDoFn())); + PCollection actual = pipeline.apply(reader).setCoder(StringUtf8Coder.of()); + PAssert.that(actual).containsInAnyOrder(expected); pipeline.run().waitUntilFinish(Duration.standardSeconds(15)); } @@ -129,28 +131,73 @@ public void testReadFromCustomReceiverWithOffsetFailsAndReread() { .withTimestampFn(Instant::parse) .withSparkReceiverBuilder(receiverBuilder); + List expected = new ArrayList<>(); for (int i = 0; i < CustomReceiverWithOffset.RECORDS_COUNT; i++) { - TestOutputDoFn.EXPECTED_RECORDS.add(String.valueOf(i)); + expected.add(String.valueOf(i)); } - pipeline.apply(reader).setCoder(StringUtf8Coder.of()).apply(ParDo.of(new TestOutputDoFn())); + PCollection actual = pipeline.apply(reader).setCoder(StringUtf8Coder.of()); + PAssert.that(actual).containsInAnyOrder(expected); pipeline.run().waitUntilFinish(Duration.standardSeconds(15)); + } + + @Test + public void testReadFromReceiverArrayBufferData() { + ReceiverBuilder receiverBuilder = + new ReceiverBuilder<>(ArrayBufferDataReceiver.class).withConstructorArgs(); + SparkReceiverIO.Read reader = + SparkReceiverIO.read() + .withGetOffsetFn(Long::valueOf) + .withTimestampFn(Instant::parse) + .withSparkReceiverBuilder(receiverBuilder); + + List expected = new ArrayList<>(); + for (int i = 0; i < ArrayBufferDataReceiver.RECORDS_COUNT; i++) { + expected.add(String.valueOf(i)); + } + PCollection actual = pipeline.apply(reader).setCoder(StringUtf8Coder.of()); + + PAssert.that(actual).containsInAnyOrder(expected); + pipeline.run().waitUntilFinish(Duration.standardSeconds(15)); + } + + @Test + public void testReadFromReceiverByteBufferData() { + ReceiverBuilder receiverBuilder = + new ReceiverBuilder<>(ByteBufferDataReceiver.class).withConstructorArgs(); + SparkReceiverIO.Read reader = + SparkReceiverIO.read() + .withGetOffsetFn(Long::valueOf) + .withTimestampFn(Instant::parse) + .withSparkReceiverBuilder(receiverBuilder); - assertEquals(0, TestOutputDoFn.EXPECTED_RECORDS.size()); + List expected = new ArrayList<>(); + for (int i = 0; i < ByteBufferDataReceiver.RECORDS_COUNT; i++) { + expected.add(String.valueOf(i)); + } + PCollection actual = pipeline.apply(reader).setCoder(StringUtf8Coder.of()); + + PAssert.that(actual).containsInAnyOrder(expected); + pipeline.run().waitUntilFinish(Duration.standardSeconds(15)); } - /** {@link DoFn} that throws {@code RuntimeException} if receives unexpected element. */ - private static class TestOutputDoFn extends DoFn { - private static final Set EXPECTED_RECORDS = new HashSet<>(); - - @ProcessElement - public void processElement(@Element String element, OutputReceiver outputReceiver) { - if (!EXPECTED_RECORDS.contains(element)) { - throw new RuntimeException("Received unexpected element: " + element); - } else { - EXPECTED_RECORDS.remove(element); - outputReceiver.output(element); - } + @Test + public void testReadFromReceiverIteratorData() { + ReceiverBuilder receiverBuilder = + new ReceiverBuilder<>(IteratorDataReceiver.class).withConstructorArgs(); + SparkReceiverIO.Read reader = + SparkReceiverIO.read() + .withGetOffsetFn(Long::valueOf) + .withTimestampFn(Instant::parse) + .withSparkReceiverBuilder(receiverBuilder); + + List expected = new ArrayList<>(); + for (int i = 0; i < IteratorDataReceiver.RECORDS_COUNT; i++) { + expected.add(String.valueOf(i)); } + PCollection actual = pipeline.apply(reader).setCoder(StringUtf8Coder.of()); + + PAssert.that(actual).containsInAnyOrder(expected); + pipeline.run().waitUntilFinish(Duration.standardSeconds(15)); } } From 40b283b45ccadc786a57899f95012bed41cb25d8 Mon Sep 17 00:00:00 2001 From: Svetak Sundhar Date: Fri, 21 Oct 2022 11:25:04 -0400 Subject: [PATCH 013/115] Avoid Circular imports related to bigquery_schema_tools (#23731) * why rowcoder? * registering datetime.datetime as schema type * registering datetime.datetime as schema type * registered type in schemas.py * registered type in schemas.py * convert Timestamp to datetime.datetime, which will then get converted into apache_beam.utils.timestamp.Timestamp * experiment with converting to datetime.datetime * Timestamp to datetime.datetime mapping * Timestamp to datetime.datetime mapping * Timestamp to datetime.datetime mapping * np fix * apache_beam_utils.timestamp.Timestamp obj * apache_beam_utils.timestamp.Timestamp obj * fixed tests * Timestamp conversion * Timestamp conversion with lint * Timestamp conversion with lint * fix * fix * avoid circular import * avoid circular import * lint fixes --- sdks/python/apache_beam/io/gcp/bigquery.py | 5 +++-- .../apache_beam/io/gcp/bigquery_schema_tools.py | 17 ++++++++++------- 2 files changed, 13 insertions(+), 9 deletions(-) diff --git a/sdks/python/apache_beam/io/gcp/bigquery.py b/sdks/python/apache_beam/io/gcp/bigquery.py index bad20f69243f..7233326ce0c2 100644 --- a/sdks/python/apache_beam/io/gcp/bigquery.py +++ b/sdks/python/apache_beam/io/gcp/bigquery.py @@ -369,6 +369,7 @@ def chain_after(result): from apache_beam.io.avroio import _create_avro_source as create_avro_source from apache_beam.io.filesystems import CompressionTypes from apache_beam.io.filesystems import FileSystems +from apache_beam.io.gcp import bigquery_schema_tools from apache_beam.io.gcp import bigquery_tools from apache_beam.io.gcp.bigquery_io_metadata import create_bigquery_io_metadata from apache_beam.io.gcp.bigquery_read_internal import _BigQueryReadSplit @@ -2471,9 +2472,9 @@ def _expand_output_type(self, output_pcollection): raise TypeError( '%s: table must be of type string' '; got a callable instead' % self.__class__.__name__) - return output_pcollection | beam.io.gcp.bigquery_schema_tools.\ + return output_pcollection | bigquery_schema_tools.\ convert_to_usertype( - beam.io.gcp.bigquery.bigquery_tools.BigQueryWrapper().get_table( + bigquery_tools.BigQueryWrapper().get_table( project_id=table_details.projectId, dataset_id=table_details.datasetId, table_id=table_details.tableId).schema) diff --git a/sdks/python/apache_beam/io/gcp/bigquery_schema_tools.py b/sdks/python/apache_beam/io/gcp/bigquery_schema_tools.py index e78f7bd5a7f7..4c25aa62e0bd 100644 --- a/sdks/python/apache_beam/io/gcp/bigquery_schema_tools.py +++ b/sdks/python/apache_beam/io/gcp/bigquery_schema_tools.py @@ -28,9 +28,13 @@ import numpy as np import apache_beam as beam +import apache_beam.io.gcp.bigquery_tools +import apache_beam.typehints.schemas +import apache_beam.utils.proto_utils import apache_beam.utils.timestamp from apache_beam.io.gcp.internal.clients import bigquery from apache_beam.portability.api import schema_pb2 +from apache_beam.transforms import DoFn # BigQuery types as listed in # https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types @@ -91,13 +95,11 @@ def bq_field_to_type(field, mode): def convert_to_usertype(table_schema): - usertype = beam.io.gcp.bigquery_schema_tools. \ - generate_user_type_from_bq_schema(table_schema) - return beam.ParDo( - beam.io.gcp.bigquery_schema_tools.BeamSchemaConversionDoFn(usertype)) + usertype = generate_user_type_from_bq_schema(table_schema) + return beam.ParDo(BeamSchemaConversionDoFn(usertype)) -class BeamSchemaConversionDoFn(beam.DoFn): +class BeamSchemaConversionDoFn(DoFn): def __init__(self, pcoll_val_ctor): self._pcoll_val_ctor = pcoll_val_ctor @@ -113,8 +115,9 @@ def infer_output_type(self, input_type): @classmethod def _from_serialized_schema(cls, schema_str): return cls( - beam.typehints.schemas.named_tuple_from_schema( - beam.utils.proto_utils.parse_Bytes(schema_str, schema_pb2.Schema))) + apache_beam.typehints.schemas.named_tuple_from_schema( + apache_beam.utils.proto_utils.parse_Bytes( + schema_str, schema_pb2.Schema))) def __reduce__(self): # when pickling, use bytes representation of the schema. From b48cf30c9022a8c8703e2f239507629a9439ef2f Mon Sep 17 00:00:00 2001 From: Yi Hu Date: Fri, 21 Oct 2022 12:49:58 -0400 Subject: [PATCH 014/115] Use Flink 1.13 for load tests (#23767) --- .test-infra/jenkins/Flink.groovy | 4 ++-- .test-infra/jenkins/job_LoadTests_Combine_Flink_Python.groovy | 2 +- .test-infra/jenkins/job_LoadTests_GBK_Flink_Python.groovy | 2 +- .test-infra/jenkins/job_LoadTests_ParDo_Flink_Python.groovy | 2 +- .test-infra/jenkins/job_LoadTests_coGBK_Flink_Python.groovy | 2 +- .../job_PostCommit_Python_Chicago_Taxi_Example_Flink.groovy | 2 +- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.test-infra/jenkins/Flink.groovy b/.test-infra/jenkins/Flink.groovy index 2aecf8ea8311..4aadf6943ed7 100644 --- a/.test-infra/jenkins/Flink.groovy +++ b/.test-infra/jenkins/Flink.groovy @@ -17,7 +17,7 @@ */ class Flink { - private static final String flinkDownloadUrl = 'https://archive.apache.org/dist/flink/flink-1.12.3/flink-1.12.3-bin-scala_2.11.tgz' + private static final String flinkDownloadUrl = 'https://archive.apache.org/dist/flink/flink-1.13.6/flink-1.13.6-bin-scala_2.12.tgz' private static final String hadoopDownloadUrl = 'https://repo.maven.apache.org/maven2/org/apache/flink/flink-shaded-hadoop-2-uber/2.8.3-10.0/flink-shaded-hadoop-2-uber-2.8.3-10.0.jar' private static final String FLINK_DIR = '"$WORKSPACE/src/.test-infra/dataproc"' private static final String FLINK_SCRIPT = 'flink_cluster.sh' @@ -75,7 +75,7 @@ class Flink { } /** - * Updates the number of worker nodes in a cluster. + * Updates the number of worker nodes in a cluster. * * @param workerCount - the new number of worker nodes in the cluster */ diff --git a/.test-infra/jenkins/job_LoadTests_Combine_Flink_Python.groovy b/.test-infra/jenkins/job_LoadTests_Combine_Flink_Python.groovy index 50863a0ddf1f..b88a3fafc2d4 100644 --- a/.test-infra/jenkins/job_LoadTests_Combine_Flink_Python.groovy +++ b/.test-infra/jenkins/job_LoadTests_Combine_Flink_Python.groovy @@ -132,7 +132,7 @@ def loadTestJob = { scope, triggeringContext, mode -> "${DOCKER_CONTAINER_REGISTRY}/${DOCKER_BEAM_SDK_IMAGE}" ], initialParallelism, - "${DOCKER_CONTAINER_REGISTRY}/beam_flink1.12_job_server:latest") + "${DOCKER_CONTAINER_REGISTRY}/beam_flink1.13_job_server:latest") // Execute all scenarios connected with initial parallelism. loadTestsBuilder.loadTests(scope, CommonTestProperties.SDK.PYTHON, initialScenarios, 'Combine', mode) diff --git a/.test-infra/jenkins/job_LoadTests_GBK_Flink_Python.groovy b/.test-infra/jenkins/job_LoadTests_GBK_Flink_Python.groovy index be395c829e49..ade6bc16a69b 100644 --- a/.test-infra/jenkins/job_LoadTests_GBK_Flink_Python.groovy +++ b/.test-infra/jenkins/job_LoadTests_GBK_Flink_Python.groovy @@ -146,7 +146,7 @@ def loadTest = { scope, triggeringContext -> "${DOCKER_CONTAINER_REGISTRY}/${DOCKER_BEAM_SDK_IMAGE}" ], numberOfWorkers, - "${DOCKER_CONTAINER_REGISTRY}/beam_flink1.12_job_server:latest") + "${DOCKER_CONTAINER_REGISTRY}/beam_flink1.13_job_server:latest") def configurations = testScenarios.findAll { it.pipelineOptions?.parallelism?.value == numberOfWorkers } loadTestsBuilder.loadTests(scope, sdk, configurations, "GBK", "batch") diff --git a/.test-infra/jenkins/job_LoadTests_ParDo_Flink_Python.groovy b/.test-infra/jenkins/job_LoadTests_ParDo_Flink_Python.groovy index 793e06109d45..d07964d0d448 100644 --- a/.test-infra/jenkins/job_LoadTests_ParDo_Flink_Python.groovy +++ b/.test-infra/jenkins/job_LoadTests_ParDo_Flink_Python.groovy @@ -320,7 +320,7 @@ def loadTestJob = { scope, triggeringContext, mode -> "${DOCKER_CONTAINER_REGISTRY}/${DOCKER_BEAM_SDK_IMAGE}" ], numberOfWorkers, - "${DOCKER_CONTAINER_REGISTRY}/beam_flink1.12_job_server:latest") + "${DOCKER_CONTAINER_REGISTRY}/beam_flink1.13_job_server:latest") loadTestsBuilder.loadTests(scope, CommonTestProperties.SDK.PYTHON, testScenarios, 'ParDo', mode) } diff --git a/.test-infra/jenkins/job_LoadTests_coGBK_Flink_Python.groovy b/.test-infra/jenkins/job_LoadTests_coGBK_Flink_Python.groovy index 3e7dbaa706aa..e1bb58cbdc85 100644 --- a/.test-infra/jenkins/job_LoadTests_coGBK_Flink_Python.groovy +++ b/.test-infra/jenkins/job_LoadTests_coGBK_Flink_Python.groovy @@ -137,7 +137,7 @@ def loadTest = { scope, triggeringContext -> "${DOCKER_CONTAINER_REGISTRY}/${DOCKER_BEAM_SDK_IMAGE}" ], numberOfWorkers, - "${DOCKER_CONTAINER_REGISTRY}/beam_flink1.12_job_server:latest") + "${DOCKER_CONTAINER_REGISTRY}/beam_flink1.13_job_server:latest") loadTestsBuilder.loadTests(scope, CommonTestProperties.SDK.PYTHON, testScenarios, 'CoGBK', 'batch') } diff --git a/.test-infra/jenkins/job_PostCommit_Python_Chicago_Taxi_Example_Flink.groovy b/.test-infra/jenkins/job_PostCommit_Python_Chicago_Taxi_Example_Flink.groovy index 2874fc3bad3a..516bf028714c 100644 --- a/.test-infra/jenkins/job_PostCommit_Python_Chicago_Taxi_Example_Flink.groovy +++ b/.test-infra/jenkins/job_PostCommit_Python_Chicago_Taxi_Example_Flink.groovy @@ -38,7 +38,7 @@ def chicagoTaxiJob = { scope -> "${DOCKER_CONTAINER_REGISTRY}/${beamSdkDockerImage}" ], numberOfWorkers, - "${DOCKER_CONTAINER_REGISTRY}/beam_flink1.12_job_server:latest") + "${DOCKER_CONTAINER_REGISTRY}/beam_flink1.13_job_server:latest") def pipelineOptions = [ parallelism : numberOfWorkers, From 36d9112f9a892afbf06b12ca8f32efaa4c25f94d Mon Sep 17 00:00:00 2001 From: Kenneth Knowles Date: Thu, 20 Oct 2022 20:59:46 -0700 Subject: [PATCH 015/115] Re-enable PubsubTableProviderIT.testSQLSelectsArrayAttributes --- .../sql/meta/provider/pubsub/PubsubTableProviderIT.java | 2 -- 1 file changed, 2 deletions(-) diff --git a/sdks/java/extensions/sql/src/test/java/org/apache/beam/sdk/extensions/sql/meta/provider/pubsub/PubsubTableProviderIT.java b/sdks/java/extensions/sql/src/test/java/org/apache/beam/sdk/extensions/sql/meta/provider/pubsub/PubsubTableProviderIT.java index f8d8ff3098a7..7bd872e7c510 100644 --- a/sdks/java/extensions/sql/src/test/java/org/apache/beam/sdk/extensions/sql/meta/provider/pubsub/PubsubTableProviderIT.java +++ b/sdks/java/extensions/sql/src/test/java/org/apache/beam/sdk/extensions/sql/meta/provider/pubsub/PubsubTableProviderIT.java @@ -78,7 +78,6 @@ import org.hamcrest.Matcher; import org.joda.time.Duration; import org.joda.time.Instant; -import org.junit.Ignore; import org.junit.Rule; import org.junit.Test; import org.junit.runner.RunWith; @@ -196,7 +195,6 @@ public void testSQLSelectsPayloadContent() throws Exception { resultSignal.waitForSuccess(timeout); } - @Ignore("https://github.com/apache/beam/issues/20937") @Test public void testSQLSelectsArrayAttributes() throws Exception { From 6cf92145aefdae6061f00977d7fc0662b1f998e0 Mon Sep 17 00:00:00 2001 From: Robert Bradshaw Date: Fri, 21 Oct 2022 10:45:56 -0700 Subject: [PATCH 016/115] Remove obsolete native text io translation. (#23549) --- .../runners/dataflow/dataflow_runner.py | 26 +------------------ 1 file changed, 1 insertion(+), 25 deletions(-) diff --git a/sdks/python/apache_beam/runners/dataflow/dataflow_runner.py b/sdks/python/apache_beam/runners/dataflow/dataflow_runner.py index e16c88ea9ee2..f3bcd3a44889 100644 --- a/sdks/python/apache_beam/runners/dataflow/dataflow_runner.py +++ b/sdks/python/apache_beam/runners/dataflow/dataflow_runner.py @@ -1207,8 +1207,6 @@ def run_Read(self, transform_node, options): traceback.format_exc()) step.add_property(PropertyNames.SOURCE_STEP_INPUT, source_dict) - elif transform.source.format == 'text': - step.add_property(PropertyNames.FILE_PATTERN, transform.source.path) elif transform.source.format == 'pubsub': if not standard_options.streaming: raise ValueError( @@ -1274,29 +1272,7 @@ def run__NativeWrite(self, transform_node, options): TransformNames.WRITE, transform_node.full_label, transform_node) # TODO(mairbek): refactor if-else tree to use registerable functions. # Initialize the sink specific properties. - if transform.sink.format == 'text': - # Note that it is important to use typed properties (@type/value dicts) - # for non-string properties and also for empty strings. For example, - # in the code below the num_shards must have type and also - # file_name_suffix and shard_name_template (could be empty strings). - step.add_property( - PropertyNames.FILE_NAME_PREFIX, - transform.sink.file_name_prefix, - with_type=True) - step.add_property( - PropertyNames.FILE_NAME_SUFFIX, - transform.sink.file_name_suffix, - with_type=True) - step.add_property( - PropertyNames.SHARD_NAME_TEMPLATE, - transform.sink.shard_name_template, - with_type=True) - if transform.sink.num_shards > 0: - step.add_property( - PropertyNames.NUM_SHARDS, transform.sink.num_shards, with_type=True) - # TODO(silviuc): Implement sink validation. - step.add_property(PropertyNames.VALIDATE_SINK, False, with_type=True) - elif transform.sink.format == 'bigquery': + if transform.sink.format == 'bigquery': # TODO(silviuc): Add table validation if transform.sink.validate. step.add_property( PropertyNames.BIGQUERY_DATASET, From 10aac42fc4cf709e6db4e7f3e091f11d7a10acac Mon Sep 17 00:00:00 2001 From: Robert Bradshaw Date: Fri, 21 Oct 2022 10:47:06 -0700 Subject: [PATCH 017/115] More bigquery native sink cleanup. This is follow-up from https://github.com/apache/beam/pull/23558. --- .../runners/dataflow/dataflow_runner.py | 27 +------------------ 1 file changed, 1 insertion(+), 26 deletions(-) diff --git a/sdks/python/apache_beam/runners/dataflow/dataflow_runner.py b/sdks/python/apache_beam/runners/dataflow/dataflow_runner.py index f3bcd3a44889..d581c48cee13 100644 --- a/sdks/python/apache_beam/runners/dataflow/dataflow_runner.py +++ b/sdks/python/apache_beam/runners/dataflow/dataflow_runner.py @@ -1272,32 +1272,7 @@ def run__NativeWrite(self, transform_node, options): TransformNames.WRITE, transform_node.full_label, transform_node) # TODO(mairbek): refactor if-else tree to use registerable functions. # Initialize the sink specific properties. - if transform.sink.format == 'bigquery': - # TODO(silviuc): Add table validation if transform.sink.validate. - step.add_property( - PropertyNames.BIGQUERY_DATASET, - transform.sink.table_reference.datasetId) - step.add_property( - PropertyNames.BIGQUERY_TABLE, transform.sink.table_reference.tableId) - # If project owning the table was not specified then the project owning - # the workflow (current project) will be used. - if transform.sink.table_reference.projectId is not None: - step.add_property( - PropertyNames.BIGQUERY_PROJECT, - transform.sink.table_reference.projectId) - step.add_property( - PropertyNames.BIGQUERY_CREATE_DISPOSITION, - transform.sink.create_disposition) - step.add_property( - PropertyNames.BIGQUERY_WRITE_DISPOSITION, - transform.sink.write_disposition) - if transform.sink.table_schema is not None: - step.add_property( - PropertyNames.BIGQUERY_SCHEMA, transform.sink.schema_as_json()) - if transform.sink.kms_key is not None: - step.add_property( - PropertyNames.BIGQUERY_KMS_KEY, transform.sink.kms_key) - elif transform.sink.format == 'pubsub': + if transform.sink.format == 'pubsub': standard_options = options.view_as(StandardOptions) if not standard_options.streaming: raise ValueError( From 15ab5003e2115aac115f60db1e2cc8c7a4eddf8d Mon Sep 17 00:00:00 2001 From: Kenn Knowles Date: Fri, 21 Oct 2022 11:34:40 -0700 Subject: [PATCH 018/115] Eliminate nullness errors from GenerateSequence (#23744) --- .../apache/beam/sdk/io/GenerateSequence.java | 30 +++++++++++++------ 1 file changed, 21 insertions(+), 9 deletions(-) diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/io/GenerateSequence.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/io/GenerateSequence.java index 78ac1777e95f..742a75960749 100644 --- a/sdks/java/core/src/main/java/org/apache/beam/sdk/io/GenerateSequence.java +++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/io/GenerateSequence.java @@ -17,6 +17,7 @@ */ package org.apache.beam.sdk.io; +import static org.apache.beam.sdk.util.Preconditions.checkArgumentNotNull; import static org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.Preconditions.checkArgument; import com.google.auto.service.AutoService; @@ -33,6 +34,7 @@ import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.Preconditions; import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.ImmutableMap; import org.checkerframework.checker.nullness.qual.Nullable; +import org.checkerframework.dataflow.qual.Pure; import org.joda.time.Duration; import org.joda.time.Instant; @@ -70,22 +72,26 @@ * will be present in the resulting {@link PCollection}. */ @AutoValue -@SuppressWarnings({ - "nullness" // TODO(https://github.com/apache/beam/issues/20497) -}) public abstract class GenerateSequence extends PTransform> { + @Pure abstract long getFrom(); + @Pure abstract long getTo(); + @Pure abstract @Nullable SerializableFunction getTimestampFn(); + @Pure abstract long getElementsPerPeriod(); + @Pure abstract @Nullable Duration getPeriod(); + @Pure abstract @Nullable Duration getMaxReadTime(); + @Pure abstract Builder toBuilder(); @AutoValue.Builder @@ -97,14 +103,15 @@ abstract static class Builder abstract Builder setTo(long to); - abstract Builder setTimestampFn(SerializableFunction timestampFn); + abstract Builder setTimestampFn(@Nullable SerializableFunction timestampFn); abstract Builder setElementsPerPeriod(long elementsPerPeriod); - abstract Builder setPeriod(Duration period); + abstract Builder setPeriod(@Nullable Duration period); - abstract Builder setMaxReadTime(Duration maxReadTime); + abstract Builder setMaxReadTime(@Nullable Duration maxReadTime); + @Pure abstract GenerateSequence build(); @Override @@ -144,7 +151,7 @@ public static class External implements ExternalTransformRegistrar { /** Parameters class to expose the transform to an external SDK. */ @Experimental public static class ExternalConfiguration { - private Long start; + private Long start = 0L; private @Nullable Long stop; private @Nullable Long period; private @Nullable Long maxReadTime; @@ -223,8 +230,13 @@ public PCollection expand(PBegin input) { if (getTimestampFn() != null) { source = source.withTimestampFn(getTimestampFn()); } - if (getElementsPerPeriod() > 0) { - source = source.withRate(getElementsPerPeriod(), getPeriod()); + if (getPeriod() != null || getElementsPerPeriod() > 0) { + Duration period = + checkArgumentNotNull( + getPeriod(), "elements per period specified, but no period specified"); + checkArgument( + getElementsPerPeriod() > 0, "elements per period not specified, but period specified"); + source = source.withRate(getElementsPerPeriod(), period); } Read.Unbounded readUnbounded = Read.from(source); From 195d727150aeff6643a71fc5dd2f6e2af9473d14 Mon Sep 17 00:00:00 2001 From: Ning Kang Date: Fri, 21 Oct 2022 12:47:13 -0700 Subject: [PATCH 019/115] Updated ipywidgets Updated ipywidgets from v7 to v8 to work with JupyterLab. --- sdks/python/setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdks/python/setup.py b/sdks/python/setup.py index aaa761b14763..8451fc596466 100644 --- a/sdks/python/setup.py +++ b/sdks/python/setup.py @@ -324,7 +324,7 @@ def get_portability_package_data(): 'ipython>=7,<8;python_version<="3.7"', 'ipython>=8,<9;python_version>"3.7"', 'ipykernel>=6,<7', - 'ipywidgets>=7.6.5,<8', + 'ipywidgets>=8,<9', # Skip version 6.1.13 due to # https://github.com/jupyter/jupyter_client/issues/637 'jupyter-client>=6.1.11,<6.1.13', From 2d151c306ee0ccb2397c350f54d6333af88651d5 Mon Sep 17 00:00:00 2001 From: sysede <103770918+sysede@users.noreply.github.com> Date: Fri, 21 Oct 2022 16:31:36 -0400 Subject: [PATCH 020/115] Add logos to case-studies "Also Used By" (#23781) * Add logos to case-studies "Also Used By" * Delete Ricardo.png * Amazon * Amazon.md * Amazon * ML6 * Add to ML6 "Also used by" logo * Add to Amazon "Also used by" logo * Add Strivr to "Also Used by" logo * Add to TrustPilot "Also Used by" logo * Add Twitter to "Also Powered by" logo * Add Wayfair to "Also Used by" logo * Add Wizeline to "Also Used by" logo * Update Amazon.md to add / to images path * Update ML6.md to add / to images path * Update Strivr.md to add / to images path * Update TrustPilot.md to add / to images path * Update Twitter.md to add / to images path * Update Wayfair.md to add / to images path * Update Wizeline.md to add / to images path --- .../site/content/en/case-studies/Amazon.md | 18 ++++++++++++++++++ .../www/site/content/en/case-studies/ML6.md | 18 ++++++++++++++++++ .../site/content/en/case-studies/Strivr.md | 17 +++++++++++++++++ .../content/en/case-studies/TrustPilot.md | 17 +++++++++++++++++ .../site/content/en/case-studies/Twitter.md | 17 +++++++++++++++++ .../site/content/en/case-studies/Wayfair.md | 17 +++++++++++++++++ .../site/content/en/case-studies/Wizeline.md | 17 +++++++++++++++++ .../static/images/logos/powered-by/Amazon.png | Bin 0 -> 38653 bytes .../static/images/logos/powered-by/ML6.jpg | Bin 0 -> 6110 bytes .../static/images/logos/powered-by/Strivr.png | Bin 0 -> 5686 bytes .../images/logos/powered-by/Trustpilot.png | Bin 0 -> 25458 bytes .../images/logos/powered-by/Twitter.png | Bin 0 -> 9561 bytes .../images/logos/powered-by/Wayfair.png | Bin 0 -> 82679 bytes .../images/logos/powered-by/Wizeline.png | Bin 0 -> 6981 bytes 14 files changed, 121 insertions(+) create mode 100644 website/www/site/content/en/case-studies/Amazon.md create mode 100644 website/www/site/content/en/case-studies/ML6.md create mode 100644 website/www/site/content/en/case-studies/Strivr.md create mode 100644 website/www/site/content/en/case-studies/TrustPilot.md create mode 100644 website/www/site/content/en/case-studies/Twitter.md create mode 100644 website/www/site/content/en/case-studies/Wayfair.md create mode 100644 website/www/site/content/en/case-studies/Wizeline.md create mode 100644 website/www/site/static/images/logos/powered-by/Amazon.png create mode 100644 website/www/site/static/images/logos/powered-by/ML6.jpg create mode 100644 website/www/site/static/images/logos/powered-by/Strivr.png create mode 100644 website/www/site/static/images/logos/powered-by/Trustpilot.png create mode 100644 website/www/site/static/images/logos/powered-by/Twitter.png create mode 100644 website/www/site/static/images/logos/powered-by/Wayfair.png create mode 100644 website/www/site/static/images/logos/powered-by/Wizeline.png diff --git a/website/www/site/content/en/case-studies/Amazon.md b/website/www/site/content/en/case-studies/Amazon.md new file mode 100644 index 000000000000..2fba8b727aa4 --- /dev/null +++ b/website/www/site/content/en/case-studies/Amazon.md @@ -0,0 +1,18 @@ +--- +title: "Amazon" +icon: /images/logos/powered-by/Amazon.png +--- + + diff --git a/website/www/site/content/en/case-studies/ML6.md b/website/www/site/content/en/case-studies/ML6.md new file mode 100644 index 000000000000..6f41ce5405ce --- /dev/null +++ b/website/www/site/content/en/case-studies/ML6.md @@ -0,0 +1,18 @@ +--- +title: "ML6" +icon: /images/logos/powered-by/ML6.jpg +--- + + diff --git a/website/www/site/content/en/case-studies/Strivr.md b/website/www/site/content/en/case-studies/Strivr.md new file mode 100644 index 000000000000..4443d771f5b2 --- /dev/null +++ b/website/www/site/content/en/case-studies/Strivr.md @@ -0,0 +1,17 @@ +--- +title: "Strivr" +icon: /images/logos/powered-by/Strivr.png +--- + diff --git a/website/www/site/content/en/case-studies/TrustPilot.md b/website/www/site/content/en/case-studies/TrustPilot.md new file mode 100644 index 000000000000..09053e72f208 --- /dev/null +++ b/website/www/site/content/en/case-studies/TrustPilot.md @@ -0,0 +1,17 @@ +--- +title: "TrustPilot" +icon: /images/logos/powered-by/Trustpilot.png +--- + diff --git a/website/www/site/content/en/case-studies/Twitter.md b/website/www/site/content/en/case-studies/Twitter.md new file mode 100644 index 000000000000..16ce225c4e74 --- /dev/null +++ b/website/www/site/content/en/case-studies/Twitter.md @@ -0,0 +1,17 @@ +--- +title: "Twitter" +icon: /images/logos/powered-by/Twitter.png +--- + diff --git a/website/www/site/content/en/case-studies/Wayfair.md b/website/www/site/content/en/case-studies/Wayfair.md new file mode 100644 index 000000000000..88210a020508 --- /dev/null +++ b/website/www/site/content/en/case-studies/Wayfair.md @@ -0,0 +1,17 @@ +--- +title: "Wayfair" +icon: /images/logos/powered-by/Wayfair.png +--- + diff --git a/website/www/site/content/en/case-studies/Wizeline.md b/website/www/site/content/en/case-studies/Wizeline.md new file mode 100644 index 000000000000..e971353f667f --- /dev/null +++ b/website/www/site/content/en/case-studies/Wizeline.md @@ -0,0 +1,17 @@ +--- +title: "Wizeline" +icon: /images/logos/powered-by/Wizeline.png +--- + diff --git a/website/www/site/static/images/logos/powered-by/Amazon.png b/website/www/site/static/images/logos/powered-by/Amazon.png new file mode 100644 index 0000000000000000000000000000000000000000..7ff122bf2f5e913bd3f7162b8e2bede302161f7d GIT binary patch literal 38653 zcmX_Hby!s0*S$jvNHtA7eSh0egS_RZ+AJ+$k5m!TE^{lK>qj1`xCPP_8axE+`*z zJBCn-6th_-u<(@ZGw?{hDa!f&$3z= zg>A=RQ6c372*Nh$)wUld_Sw`uE30b-_QQHlv79R89AD(r<19ahUp+6)!pD3+eECx2 zME?#n&u8b?FMH!U`-Gw%r^h|2l~d}%&_uw=_Ogd8PvRLYvh_v?(PXN?nP(i{i_E*A zfYBgPDy0H3lu=#Z0%Y1lesgz^5wkY(XQ>;6PKgaAaTr+$!y-FoEqhYQT@V`5oYh!} z3V82;X5d&fwv7(TnM z>Y0@Wb~$=f4KQ2376Kjce*S33TUIK*F;QUFOSn`n7#E6_P)iqny#^;|_ewE##)B=s zU*!9ws3>Jr@#r9=SO!>*_h`IY&XP-NRD+c}I6E^5w3NKYFZL?#6(#E{acBl!1AP(! zHl!g~AyP02TpwdcRAcA&*mJQDHf#o}IWn!j>-|al0;pMk*6J+WI!X%@pc8M4T{koj z_@}?D1Yw3j-OO_}eIC>%KK~)q+S(dORE9Z?MD2(75+Cy#HkK1F?;|dLBsosaH`$Nn zQJ3CO(dd|%Jk(83m*GT1fKwcPOE}9P+=^jkt4o zaT_UGl3zSV?;2$YySd}&lCZI1A}J|JTNBwZa&}Eq<)y(cC?84>EZ+Un_s6E46Y7oF zb}lr&*4YfhUEsb0qk7f&SVuek#cl!m(oz*)xBh7$4MJ2O7oEs?_^7n?d-PR!Gy1Wz zVA`Ot4w4DSf{%KJ5)Eey!Xa8#f?rWj-85?Q$^u_#lK+Y~hKPtHrE3dqkiqXE(9G}~ z5~Nxc12h=l)fzLK(Z7DEElMOjS*VdI?ywFg&mmi0{gqY8A%AGk1Yi!AKNb8X1{ZT_3@iab4LF zm>r^pg^{X=N$*lgkt7FHR2L1xfsW)mR|-nHuwWfGh)wXXd*eL6Ny+ZSkI4OZ_RfSp z$_j62F>{e07(6At?X{+lTOXX>0iI+1BA8j_fgDJ39qwqC99wisdKw5yXABiu{?66B zrVBZAe zV^7cg#lz?eEPd$2l-C#XCrS!30I#ugtOB8_KI)W$8B6w{qe`n429u#UY&sT%wP`&{ z;$=dR4%!G##BQHma+GT!Q(_cOF0CJ`w@kiMSTvQ8UWg8$L|ei)-b{X1SbKP`y(Mu_ zIrXB~MsWYL@$dWad$<-YI+8#mJ@-VCz+E5z*SGQsXk{Cv)wT1_TRU2>Aie7(;~a8s)qgnxyYTd z+^bP1iB^1A8@5Fb-1g4^q3KtsEMJoHK;&UKdt*papAARH>?%%b)M7Z~^)e^o0{=dR z>}E*gzrwnnG z8E)~r9=L}kMst>Ti@9o1p z!PnJLQh=`0UNh&7h;5NoRRcWNus%LMzVKBHEej-p5_RUBJdJAR6nw4_)cMjVqtL}| zLm#%OQi=gccyKz*BP{~<=rK8n?Vs^BXWH=0)~2G(mR!H^2iio2ft1x1XKKCaGg8Fv z&3CUbz|shQ$8~Jah2-A?m1HSAi{@hKW_W;vIp<5G_vX&C6I+RTYupX>t1BEL8xh6C z0Xij<8$&%;WKf}>8|9OY4uh||t^T~JQ;e%W`TTg5t8a~#TAUe86WjYmqd1$=I%ry- z^Lj&l(Q2CS_tvqwo{g+x*y6_FGqQW%`L8NWdBs%y{o=I}TtXPJ0Sf^bZymZ(zO|++ z;ZS%?pHqX)PtwA?3?hm1RI_yM%I(jJ5UaaH$WId6KqWnm$H@a?KLTYK-`W3dYeyOS z7sBu1wQ-13VQ;j*hW^1=RsF>Y|I;p*?VSmvC>;OEVG*+o6HBe=@y)JR!}^OGvT$$s z^Z~+8yUO$?5S}wuTQu?AD4_<2jwKx76L&wcXyfRdc82qAtCMImed#q+x1C_mqa?VV zm@2lHKCoh(lf8P_Jx#Gp{~7U{nZmfb!jghl(2>rI3NYo7^9&7{hit!I-l=n}Q4NXV zL~I^drN!C5m)^ez6e(0!P&XnBIhll}+w1pLmzU&hHtDEBn}YwaY?H_&_W5W^%j?R< z6t~AyN$ZC!O~TrYIg`vR#23f;mq;x3d(|U!&v=ptf>}P}&76JE!o-j#cBWA`pozjJ z_K0WH;ispkqGkFOI0rYWdfy)aZJGpOOVzI!7!oIqB6;UZ^k}Fq&Z8%Kx3Z+Y^Xxj=Ya}Bd(f%y<4Grb;(W{Zmxyh-i(Ey#9 zy~~_Yhm$4v(hBwHW!xW^>g=P}?Jom!@jFRJtks82TeL#^73n1S%_v(5Sz>c}N%Uxx>h! z5${T&sE@>0h95Um7>!&uBFsYnzO)y*gC6>weI_*d6RV_>$0F_2-2uPo% z5}v06XkSV1GcTRh+lpUER6inJhAF=3AK7$ojKXVZ-XF{m_X|dqZ2dGfG4W?-vAQ_k z8UhD@hcLrt-J_gg=Rz9Drh_of?nw9j9SxJY>{lZhNa$ubk%1*E5HEtzu#?^=i(FL; zCI5L|T-%04F?W~%1}2pMqMp@nXZT;*s=I5*ZIKy|3*OqSe)t;K9ere4> z?|FI4xW@X{vNAUOa~J7X{eP1NQkL+R4U_xDRH+^J9nY>m2vUVl^4Zm7fGc+w*x&{B zZD7yrQ~)uX@h$UP=ykvIDa$o9-hsQgr_h~F1kPsNRjs5%gZ|Cq6F2=SH5$qS>(V0kiA%*oNyz!nxr{2kU>E^ZH zMORclUA#|U!NgoN>I^Hj$NpHI8Iu}Jd{UlcD$)8o-R95|V#`l-^ZkWhE8C85kpNXj z&)N00W|_>L<(g%#H8vK;egSq9V`k^cFbVQ^I{yBbG~fkEJT|KpiYKR^e?qQ1V%%iY z*O|TP-04a-df9D0o;rsXiL0M8Mq>PHN+$Kg{|qoRg{)t;Ev@^W$*p|{8rJzG;*F)P zx;lTC=*H*5--8#snYxilE_%!-hNo+Gu4ZO;lFdQLyAtYs=i{qiKjtd35>KvKgs8BK zcN*s_V>h#0bY0%PU}UGr?Z_1%1-J2o02S=BQju(k!-+W{(`7%2A-S_$XHY8?Otr;99X^8SN2@t zj>&wk%I5f@f2qQen5O$fbmD070ir&unR9`A3Xp^E{Ze7< z5qTqThI-DhCRw+B-ojB32ekgr=mpyZR>Iqw>@$3`a&nD{A`O=*<#i3f%TjpCv0AH@ zN9ZtY5{{D76B= zYL^Vvv;`xv%(2=d>?6YCw%Hp!{3PVb`H*J*zXNa=zrQ$EndL~VFVHT5RAArTTDHAC zJk{CW*s!*qNDMtYn>%GilIL5fjc)KAj5j~kbyV{1{P^T?OveqXZU2(foyaGs8?Zb2 ztM4yy?W<_D0A|88!mB%I+t^4U^#Q-dSIbZuEp8jZwAVvXUDuk|K>nEK6Rtym?{k#a z!chy~(}NpSLPg9H~j2>+MT^cv)W(^w$>rx1cay&Tj_nQ z10s3okMEBDQ7F>zl--DKd+^+^`kKd^d^3o$&m7NCD>@@ze#m`@eRxy7dH0c*e09jW zuUU_wzgP%Us==@Wg>}$~Ql3zF?lrY|Vgen2c`KAQIGl0eC?O${9=m-PsC%U;X7mhJ zzR^wjPbBc-cmoXgJTP?&;Y=^>p0klxbnr~33Z_yt575KUkZXk_F1dQS87gU+^%;!+ z&(=7BJBrq5_1m!F;<1kK-zc1z;|RHAtEN8c^&Nt`XYF4~$v^m3SXZa6xlHKQfGw(8 z+Uj+CsYKUPxJ|0po&%>=;`l?h>8dxmW}BZClsJ|FOX^76EwjT#dIz;ud z(T`kaV*S9jw-o+1cVipyFPdIpW?jluL{u`!QCd1Bl-AT)P%kTM$aaG9NXRqmi>*#c zneN&?P-k=6r5K~t`dY_@_6we9@6Zr!ctxr6VT1d6Z-VT-U!-7;z^4|Ev%Gg~cF4q| zrY0NnmY`50XK_CB#T7GJhMP=)^8$S)xOHq9i<7_z91tl}dFY8rnG>x}gCsWYvIf~awcuODk;^No%1C{cup9eb; zZa(iu^*Lz)y6zRJ>F~Fi>z6_88?xbTU@Q$%(GWa;4(Oy&S9v=%Z~mcahTAK1v}0Rf zu?rN>X2hK!HZf+4bG)s#}~vlaM21A)m}wR99bL-)%L%dih-LWNy#{3(%4V zf!glC3o-?V%u^Yl9fwxN)Kz!tEO^c8o2d-8x4c53mQ?)q};qt)t?NlDS;?|7aN zt_<~H2%0=rrnPTv1GXX+$u=Lm{RH@H78FPaGl zm`rtXa$S_L4`1`}^M<>cNR$mdyli`tN47nXflU|HbYUo(wP^kFpHO0-YWQ{^@dVii zme7&4+1c5wtm8Fz>&a9`AlBy1#GLADigUb9IjT2cem0q2eEY6(d6y0{JXAO#l_H4s zzdLszwu@|%DB%MLG0LXS?C@TJ>l}Jh2yKUWyN)&y&CV>7gt%_RZHv*rV|$(Oqw%F_ z>9y!RnMt9pPg8Vuv?O5GA@^Z^P!^7rlasD)M}H@8RN213T_i+U{-^EDv$b?EyYtAC zze(EIaDgo2K9bkti#;F5o1(^hTIR(DeE3u7`Qh`)0tkHg@t~$)l*L8M#CCn0W=~WP zUjF^B%4PZRPGpf~{M$eJb!|k#GJU%RC-&vh0V$TOgwB#Tl<Cd6B`7zd}%Iex{rjQ;X>k?V(x5e(CRH>k8sy&nmPnN?JV0Y$AHWz_K!i z>wTl$_!epL_Rm4>ZSRb)eqEEg(PCdH-i3#v+OhbPbK&$Dmgj-bO9d3O5U~hz$mq9k zGat{#%XcIO1JRU4nVY|h(NV;F&R-X5oSpY==@__@l<(5EUCgEaxpUA4z6~BX39Kvq zhr&)!@^hh!TegP0@nfSBV@b)U7lT%GaE6M7qhT@&Ja%%G&aT;|LhX{D2}~+RJA<^V zCK_pi%xG1bfu01|O1|UeKDHUUy0yOl0iE%zeUIzUnz5QB*vg9o<&7TC19VEHhzL`M z6hV?a)V$o!PaSY!`Wi}15t=6CqxhiFd^qLZ`8k;4NpRbSGNVzqX`6+gIL<(cLj#LH zI>G)|z}7N~_W|)*%6VHmOGqo7KEoIJz@<&EK}D^F1;E^jm0{Zg($>KrfiT-m1%u~@ z?NyB&aW^X1u!Z6{phhT2AHUQ48t+l$$&4+a#N|#O!m**do0b0BaZc!y=U{>XfgPZ< zzK4vYw4ep+2~!o5wmG7L+h8Fz@eZ zqv;!BU0?q(ld@0;1M{UToRELV*GJkw@=qgVc)qx$ALA?0afx|?FEl;)M}bQQ%aVgo zzr@#dMVlUxxdO4heX-BP;k}njH%p=yE!YAZT%$hmq*Ok45NFTUEZGk~Q~UaX)apVE z4UchEd5%)i#Of!+Z+=^bke84aQgj{se8?K%CwH6l<{RH~m8Qd}D4G?jrOR|}A=BK2 zzIAv|QtSrvvaA%cG&|0u!x|!5UZWqRYwnGfNy-l#OBa+WqDZs?bFd__>6niWW$TQ~ zh5=WT2z=U#YWjTCu(>YZo!>S_C(i)(G?nAJv`hLl)eY}%E4LtB=9zZ}1pfLJGNyi; zJ5(h9YC=$2b64)!^+?>k4@RRm*a?i>t~w>UepulLzs*2|ZR3FYfb2~GyrLXm>pCu( z>~1$bbQsAxY0+CwLcn+tr;`rWP0h^)3hSXoAed+Y5e-(zV}bsnZcg`ntl(mOq&KNz z(0SX^C(Q+Oe-}GxGIXW5K7c#Si`QY0e{t2ezi+4eufH&Mv5xHE>{&@I-+&Xm8pn_z z;c{~J-y==3W!z$3p`OW7loH&j5N$|KkBy;%)rC6X!|I~-GId=BQU(&9cy%AO^xd&w zV`O5Otcnr{sxl-t`*#*M8(IJ@>ajV)FzB_?M0It8{Lasgn{{v}$U}}GnK^wni;t&* zh+4%gx*ZIX?^F0=H@wGufsT4VBK7JdjiVeFZZ|9n&358@9}!f1!@M7prHfyYOD*6Z~6OvEp{@ zb4a|mv<6NC5eQYoEflLv+sYR!vSn*5I#kYC|Jv$d@DJX0{Tk$k)a8JYiw!Yi&Y7go zb)ZvcbIj5HI7ylG-JcGuk&JhX)(WQHvLU3@VHR@%WLIDK(cQ>4iJNBv1iXAW9icDo zS7Tifqm_$_RL~=~#LLqxP;b14hkNz0n9J+%r2Zm^u;jh;%h+v_ckbsA!?;9E?=Lzf z2iASNS*KNW?I4ECi7S8K;FH8YfY#$hn>=4P0%5x_e0R&$tA!QueGQy?u4$Whv22Se zFCWMVrj+*X0UO`H5~eB(HadQ$T*mFtfXsiJiUnG@`_^7t22^fQK8&Y0BYgvc6Rmfw z)A?jL^%fubj2=-Dt_P3>6yU7^SF=CG-yIPfCXQbFGqHmKC&={bX z&+=85Q(8P=y+7;AMZF<0Rou+u{*mB^noMGr-#W<7ABTuIjjyPLR@^dioqwH>it$%c zAyFjn|M7iNePa%?T6x3u;doCE0@w&-3Z|GN7Bg&L9{*NKmEmR8GXsNP;~ST4V9kxqkJ$>X-YyMjlzON^*Cg!uB+iqKAY0W{*Cn0+1m&`yu-!ao7YS`QQ>a0^v}k9e8>F7@qXYeY{i;{c~OSX2>JWW$@?sfU&gdVms3rx;1t0@ z@d0ak9P`aSZp$JG+}yilt+(v3UrtSJ37ErPVp_YANlSFn;iLF+)!+N?0zMg7pI1#RY2mvRJkrFaD&p+D(-K^q8S~*KI zm?Re$m*AW!hkE%Frugntt_hI9w^>~29(YP6WL%<(djK6>G9b{C-7qB1{#AlmWSr_o z))Tkrl45JRq4=Y>J@l2Jgb7{a1u74pTd?jJx3_5Pp55J9!DMV^^Ka+?*`5s~PK~$Z z@eEZ%2dMFpoj+Ldm_Tj+2xIk;i2WeyJXo|{o zLO=m{rT3E9dI~VS`vrH3gff8#@BwBa2S70410AFqM3cn#c@SKltpZD2%A9wJHY%u5 z)2E=d?6;=Kh5(?(I~9siQ+opNRf)KMk=ALYlao+P%xtF(W36r8<8++5hi{P?p78IL z0KqWfkWGcIUoQAnd51xHKv7(`t^Kod!SMdxo*DJy(v~S@!_;TI8hAY+zp-^U?|vS<&oz5)2tIY-Z_76S6X3z+e-tA0BD>{i>qm$ZYoL z`?R{e;cJ)(X8F|N;bDWBY=s(iNP}8v`+Z%OU)(7<5Y4$!Qcn*LVIGr~zge7zGVlN@ zL^Hek5(%kVn-l@288UAdvgsJr^$FA)aO8MW2I5LE4xMTRg2r)wbZs2BTKg}v059B+ z@V5utqYN9HM^Y|~DR@G87X<;0_exAwLB083-`|@Dd4AFPG0aZ@(yGUCb)}n3cg{wf z{A*r)dT}r2W1EEdZB*Cr7@a&>pCHs-=y6@LaD| zcx*nv8&uLFdFgxyX#IXWJI|H$=8`>O-eY1JlSN#cqr-af=auTS8NxE~#Aa_H!SA-< zCdA?|o6u6-S;$q4v&mN}?1Up#!SlILZkf|8KH>WPiX08Kq8hz~LUo~R$MRjaB`fU8E)I9~fEd67>+koSe=a+Zb$SdyVU~E=LQfjFmgX8OlZ(8V?}*rL zBMGeXaBMSeWBrt`6fUk^LNiI(UO-(;EqBL za_G^d&Es3_dTW>PrSej-H+RhUf_EIWwUW`IHaC3i}N$O7Uoz&&}@jf(P{wAb6pz65#wSxp6+X7EjN1uIT3l(~VsSJlXW9o$v2DZW} z?l-U?VSCcvpQ7AObgmQB*HO2Uaf_RZa|F`BfGt9z5NOPiCG&B}^w(y+_f!be;nRb4 zHKP{48p%)xa0?Md`g07>O$E;@VYw&AWiQLTA;<@5^LO85>eMJYpmg%H35c$~jS6tu(s&<1=I=9USX!kQ zEilscpg50V1Gf|>A_+8vzBb%OqN_s-H@7YOkOm2}*{ZY8=+ zT!SOxw}BoY@#a4M0OGT$VksvBgCEZzLBF-fG>VzO>+_lFc;2UwBt41ie^*}q9Cjkt zaFV`jtmp4ivxF`)M@UMJR=s=)DrK&PD2m1f)Hi#*ht)$wzXx|~t;kFli$Czu#QdXK z<~QJI%E)Ar%f;K(qeC?qgw=pmP|D+6OcT^SA(Yc1Wql-HjsJ-E<9m0OeZBpA513cV(USMI#yY1O5isnCisr~rj%J~sE7i4Gd0e_RpTC9q(F*zLDq6=0ci zSmts#``gfM4NL#;XEhxidlGnU&lQfGOwV~$SMlDZQ==(CXQx!N>rV>QDURLccmDQg z1gajaZS9_w)8c3A z3JmAv2E14gNedjT3Mr4ZQ-`xsRD_oZJeQ!k#yAY_=U*K3Ih;r|9QW66n}ISSMX}au z_ggT?JU7gKNDu~v1uZ+F#cHvUNaT($ETHHE);|y7%j~z@()aGacZ7QB<(iT#7&&&k zpWAMZn1O1?-79k$f*bmAyUV(KrmuJ0a0A6x)@S;HX5b!8$%V#(eQi&$W~KKnw4@~z zmCUp$&pm8t*kGn6=!>`D;%G4MgX^08AtAw%pIPpJ?i><_h=0W|e)V)9{r!e@mLEK1 zFK_CQpQnb>Du_oV-P(-y?ccdMd?__W6JjzIKK0c+n|IBkt+`4ypggxVWZ`AOIm7np z?I01XeQ)3hb$bjAC<(>#mN3#uRNADej771}Jcu*_A4pu@7@c&zldr4y9au1vgS<3hOH?(6VzFT)rKWQryIxh&XBK=KL z3X1s=fSsaeW>%^0o_kd_SP|d?ex}NaB9Vb_d*5rQ^_X&?|I!tlr*ykN5v-`dgnW>o z`IquO?w&6aJJF(urV_{J!~UgAMi<-3ckL3YSw7c9%=S6JIjr!~W(^O2>+R-nnJ}t$ zmvotwTy>dec-X;FG_+XS$&^+13@E^}8hZ9l&5#PNS zasCs`XxO-I#1?8@D;LYR%!6BPn=!yADWYDROk~KG{HG-ma*r(kK5eH*qpB<|4T-zM z$Ks2#7xNbl5T1O+1tN<^l_3;_#1<*;aouWoS;jAGv4pIye?oUTcFpj0BuDli3e0l| z#AS4E4p15`x1x{H4PJLB6Ntb239?*}Vzz=LjPnkrA~DcUSn6RDf}MQV@)@=RF922o z{=B2Dyi6id*xRrAb0p|)zzp8IwyFIcS!P0Hhyc&UV~}y8S$^o2&TpJB60zl;cDK() zkU7L#0>eSK+7;2@LOxuQVXb$aSk73rSoZSRE^gY(nZ+$EvSiXwzK2XII)Z6yB&9uK zgb5AC4Tju!pwM26H7B0QbhbIL-bn7ZC<*LtjlR&#J4ij>+fxS~Vv9dXQg)V6{i+wI z#5Kx^(1G1AXbp5X4(+=eg!xrv)wp}HQlmp;s2_3dPA~*~dgWLV(F>=G(-`JVQcm!@ zo}P5TFM;cvTX0f)^Ny1@G9iX{EvPI$fC_-Gx3{zV8br?z7EtEuAN=v`!YM)Jeq`6`5>f=Rd#W_4Y}s%np#}$ z>hxyL^K}FpXjs#d#lQx^>J8}0HdSw zrwXuKb(e4HOY{ld#{n|`ta^@^DJ%@$ugI2y5>d$;M&Db<9-#6=EcG<7{ef0lp_7gd zmjkMnJk3c#J1?E$i=Mx?yV|p0+F96}Hw>b?#f@(_^-as4fJG+KMq1+~gXxrm2)4sl z$s@2xOQ5G?I5Ma@jfvlQ2Tf}c&^|G+F$?p2bbJe%AU@^$6`o3}Y6W$>N*r?(h{W94 z!XDL72QJ+SfJ6TNyP&;l{V#wu$zz(Bqn95l7$#Q7(F?`Z<|Jt#aqJLU0WO&flggg& zwfAZNl1Fo4q99f?A-Lglwg_srJ)JU&5U}$~XRwxu#pAQ+w!ojtT+CI2_2am{Z$gE< zOEL3%Z6m(~+i8A@tO4giQaCp|%kh9Bst&dfG^xeCez#2pnWduRf*zHj+CkQBNMLJ} zx>bxwSTd1y5AEQAH_u$~UF|*vsj*|0aaW*@nUwY;s(vlMH4u}H_$_J!tM~fe8ONH0 z8!!FLA(}dSUzaWTD&WHlE{Z5oTw{g%xEQLCK0uqy=>F7=Z+E&#O0ysPf$?^^PCeN- zy3+wR!Pnzo6ZB7%;a!`TSP6X3-TurOHcP@HC6lNTq|KkRK^vf+(MSfq1-`+!jo#Vb z%bE~p(UTrmP`lUP&y;M|>x>;|*6{1$KP zM4@a0D%PDvpO?@^vI?|Nxsy(^51+-!fDjQ)3(oQbKtqthWh*5sH{cz=t|F>76N%P$ zQ>rLlOePI0HoK~A6Whw;&7qcO$Ukf;@0AVRz_2Z`*FD$3>T=sQ*;Sfy|9^` zmB~Z9fe_|XxKJY&<-sXb02qfMBYTILhTaw8$_K+c&VBYlZiRp+tsVL1~he&)8B1)TpdC zx)8f1*@hr+gs;?SWQaNo>w0^X^7s(n4d5wzvqj-M*|A+CR?O@)lq##>ku5e-5?eV1 zBJQRx)VE%4XHEP!gRA6BJ{}UOOe4w41+T`O5^SpE&oy5S$T1Fxd)ZQvx{>z%EjDhw z*GkLK{mS(+#;P$OP&cQ&+5t06ca>qvnDtP7&!<8kdGvGoTPEN?{A+*Ru2z5MO#qO0Uz zK03XA!n4vhz0epb*VSiYbNOcDw>!5Hd$aU@mXkDot56HiH|psU+)`s`hXDcmU`dNm zkL$PTM^Ui*q?XAJs9j@yf&*0#JLU_7P~s!KJ#=2^XT~2ry8WiknI&5u`U*ib`S%Sd z(GPFy1LLn;alPf`uVR6tEFMq9a6uW~ZV!#e$DNQ~=>(pYsg0M6j&6{cZ{z%~U*^9A zsk6`edh;4qlI<;A=)n(GE8OfWhlMI1opN8UN2WpUb3xnj_$3svJ8C4tmqX!2-v|zv ze^lG5m7SI%yVc`-dm0gIy>X*#+D0=IN3IbA4LZ%llvD2bG+Ki^k3is&FVT)w*;_Ek zFDFpV6fIzihC&%|);%s_Q`Mer#xu&^;A?5XL^FmUbUTKNEmIiGNlVQG4NQ0U6JCqE zd#SE_-`)`!D4v`=F%t$6V@J9U3gBHU%UltF;4Sp86k)0r_<5}r zbi-3OZB?b>nJQWNNSN4M&t4N*x{weQ5(~BJe#=qRzb|vvoAN&}{6nE=7mNn4y zZ)|K7vF8vZte4xoc!s(E#D>6bu@;3=Q|3r4iP&CCcgTQnm?V5IuEVm&9UbH!WA$+5 zdoFbc$_B3&*Pe9H>32kpf`je%a2{fTZJ8(ydh#NmCKqR06ijODnpJA$?>@xo0u?kEP`X z<`_oBewd#b4g~qaC(|?d!nW5Mz8WZOP^lC!W-99zR8!xRN>j*d(`gTGg6u9Erq>OF zRi=cbD{aBA7vp}t$ zeg*T<#TSD=h214$*J63OR(WmM17$y^@&dCB-X1YbW*VJY>g>Yl;U&}p zIu~9-MUbz+SEqvNS9#tc_<@(S3jKL=zi%R&B~eKSGyqpSDs|`?fa+$A;%4H7l~m zMnMGG`fm@LpKOciIJ%|kg#wi#wS`evV&!b^F5JWTle9F-0otLgV7ZlI0#W6~p6H%U zt`BhQQg}>wXXz|ZPnu6rsT|R(#oO|!CYgRkmzK6gB*6!B^6W8`$aI(b6Lf;Vi^y>J zH=1J0J&#%P*ftdF8*V8oRVL8yXxqR%%R!-z+PYBQ(*&5q>cz76J<6Nh!M5bp)}o@f z)>jM|%`GRUj&dns%)#`2W`hAOPQyum|vDcGNR7LSU@M|o6 zj>JEj$lt^X-UO9#z=Q#hd3Lyf+V~us8(e|p!-M>5!fZs-v6{x$qi@EJ4IeeaZ&`C_ zbxoC*KW<%Uub1Wm10sK^8fj#>@~El0?Vn!pFkI`?W_*VDAMVcshRh$LNt$i0VqJUTEl+-Ys>{OL~J9fcCz zwtLYiY#e)qMPttT74l+je5;K5S0$+UApDPc^y#nnyFO|`i$G5{wr)FP{{0F$sN32t zJv&^edH?syRjV#%>xSRY+Nm~f-XJz`>(y*wnbTPhr;Jl*rnAh@hk;9C2!F`*T{I{i znqvzCv$U8eQ8d=B~+PSDl*l9(s%B;=7A2(Xse$VSsm#Y!1OiSz ztblp_ZVhHz4@h7AWUF^=Ri)`OwIx-i%kd<&cHMOslkXY(z}&0)tZK?Grup?faju3G z0&_OvBeWqB6KFXolh-7sJ7;tPpK|{B?cIDRbvAiWt+z#q`$_c8cVbqf!|Me~1dFBx z%cO&(QAozgqdHJF`M`D-sFg&_vM%yq6U`PAThbt_7(F`>e)WuWkbAAU@pu*R*3P!S%iV?-Gb|W1Qw zSr~A<_aa=yQ+AxlPzsxC@zytBtTEE2xWHt@jt3$Oi_6$}h_2)tblU4L#JHN_x`S3o* ztA*+Df#!Fy?`$PjgD_g!T?HFoQ)R9YYCy3$xiZ~>F$8}ai}XczmEd0ND4zuie27K` zIBczSKa6?7i2ofHUihAk00W4R+ff-h-le!QV07xuNDjr6l{a+91%QPJt#D zP<9Xci_kNVQgbNFBA{(YYsF2>D5>*W+~H5-IS6U24OySu`!>pJ0b29C864Y7TbY7B zdBW`v-kT=ur|MCfPLlMA|0olxG%TI^Fa0>YJ=)Ygc?%WHCTH7z-m3JUN?`SQ4MF|H z0Tk5SW?Hey(|*|(X~k$YzWlj1t!jg1@)Ppb4l+JS@s9>U9R}6YQ3yq zzua2fW<%H4!jQ%4HbPPs!`v1n0*d$ihMow6yyTY0`{*kwv{@-1Y_bB+=sug&BZ+&y zxejQd%FOE9?CPJM-iGCg@CH>Zu3TI8RFtXMQr3fWxEa$G-L9 zmWIByg$xukq}Zz5HAg#uGGYyCR!r|iyz7@O4U{1w$S4WK;9*3Dd-mX=9{Z#k)b@OB zM|0tJXdlqdYeICz3Nm1!u$BL+Osmud>&$!BPrTmF;Nb~y-IM+vTR}#WDJfZb&mUQ4 zNo3fzUw>b>y#%wJ@@{FDVhk;@fkUhIt%HN2$5qU94YaG10h`#V-|Wfms)GEZEmzVK z79DfO9^Kz(V0;iwmM6FP9K%ko7dL{H`~oxP;RbPB_JRAq5V3)9O-$7lX=Rn6MN1iB z!UsV{;OmAGlzQ2r)Rx(sYU;guvUn<+Ny_n&vk?~cR6V>s9>_2T&X@L-ZD+~+Vd~kkL%mZu(i)k+!mnW3+%LH4^bi(ASc(d26d$&Fe3*^!xl%tqqKLw zy)ShDi2a}z7P=2sfA3RI+lh{k*f9I{opHwR>CeN3B*$ z(-tTc>d@dMn(;>cJ~M#s2Cv2clY~n-q+u)qoBkqtoWr~%U0Xdqb}no<7Mw{OQ@ZX5 zRnl$NnIjSuq8%Sy77ey3-%SQ>&Bn-UN)YIV+q;7EMFFx=TAvI+yT9Sry#pMvU;?DZYNt`i1R!7briZ0Qbu9Kjg9SSS}gdC{v-3uT`hY z0J~M9eZmNBQ*(~>9@trhvW@c;br$e@H@ePb7-bwsWVoh3C;SLBnqKge{{)W)xfX#> zc1nWrZ)SiVoX`Po_8Aj=_g?;;8y2F1aq7YIdW5LRljZ~RA_D@2u9=tbP%tnEtbJDF z9c|lh*U+uNt5_|6uSZylbI@ZIwUVRxBdeF~q>Yq*0j5n{Y_ejL&Qr}|=ChbB_=6oAnXRTg7$>tSByKmTAR;Ii0~ zeR4d|j(;E!IpKWUef3(eBH{&gD}@4KA0rzuS?}b8yz5b8~MPFyrp!SSH=}QvO;p1aB;dD;6p(I~|fR$d_tnZ%q%i z6ew1HJ+rAnNtc$aeS+{<-14TF9mrqepNtZ@7o@m*AbK+hUD72dA0zAcS^Rd&N7S0a?VpSDeN{c)ePOv3=w9GEw`qgxgSWv8x2hP}^Tp8MTlzd8o>NQh*}T*tQXlz; z`k8ybBAEX{|GwS{mq7eXKH0fifqrxOf35mt;9(yJo6PR$S(iUm< zlj@;(dSZdZ3_w()hXR=Ys#h5Xq&``yFyx^XqxCnon)m>}Keq?N}*Yr;g(@&Vn z)&Fzrq#-VTf^o9VitH-l${_RglhMPd8r6TsvE%ZQ*8qOO=wE9+V-{mh`#!w%XxiQY9e~u$w4BkEc%KYOrc<( zO%Y@=_44{dTK-lvnBSV;H&ocXY(n;)C$d+R9T_EiBq8g_jz}CU<7A{%MrK*x`@FxO@9*!Q z_v7*YCtmlyuIs+8=ks|pKDU$PA-zIlV0+~)jz09sUB-a!r&nS`i=pscGBKM;>O2@9 zJo6rzgrdq)b%YowaE~mmBwm5g8yxrsu9Pb%CGZJ@#-?kZ(NWmk{_RkeBJHc5IkDsA%N#{mo8FV=Ftm8as_ z^-km70Rzz^lP_BOh1l*!#_gKU82ag72H9cUU9WuFiXeIsW?g1=zK$!qB%#+>8Sf#= zdR^-%&bI+G?>KggX52O2Jy+&iBe;KoykERAun%w0a==T&7-dash9ozMoSMMAOyT7e z-Z#BFxOJ<39*c|0i;}G#RQ%L%N<5S1!gu9XH23l^(L>VT9;_VvirR>FNBAPEENtl0 zLStBIyx#k%feSYPDlp!#sh=_?NRM{ujmgbBg>%1V_9}iZEh=kSsi=O}vnS26pHv6e zLXO30WZ| zG{f{#bi+*7V;rgun7*AsMC;YM>~(*yM($H=vdPN0JW5G^=Y91v4zl|mkMU>6`07b2s;o!T)+CsY>If$8?bKk7Dx90kl)2euIsM zrv3hpEdC*~$AQ7JS6%62vCqEO6Bs_#fAFA)d*(t|E7a1+BjX1EKMgE$$un4DJz-TQ zg9AKN1U`PSQ9SX+_$-f^cIqs^iZstWXV(wbM^cI~rcn7m)5}}$otzLfaY~FzEg$KK{`H9Dm1d`;!wUPxzm5MA8^Dd6 zlD_;r4<tsc%|a z9uV2ZmyeuAP~%T%t1uvb74j-FGGaua)Fj;)&BJ{VvGmt+nK(1iIY0b(ccRVQHq^Pc z{Fe3P{CxW((wjJU7&CS)7yV^ zaK>D{lXzPLu?fIBX5S(rDUZcM+Zn+_-JQ4o^r1o58=7lC#9j8bq*ccLM@8MgZR3S zf25Rs^TaYBDRK3aE*n#i#k(3qq0Uv4p4tyl3V47+$A{tt*W&Q~jXw%we|ZZ1uWviO zuf{x^`b+4@#@ESAwoU*K}fR#(Kj@jER|`vV;Fs`Ooo|GAV; zp~ro9+WdZ6Hm^H(s;)p{APaWS$Weu)WN?wI>}$K^ZTWl&Or1?JSxqdN1*y+0f4lYl z6}Nzg$4YtKOk{B}DG9)G#5_b!RZ0_5jHRk3c}e?{K`ZK0nk z>QXEVlf85G1_Dnm{p!&cKbsaFna}JS5Hy+tbWs9GBzb&0`@{<3sOwP8UESQk5Tcx> zw@U+(dIMG<-8~)_3Dd=&8*k}X=*VW8n`zH6GqvDIN(3qa-BZ0L%2VKY54P{c4+k*L z`FsGJX}lA))U3{uS7u7Z3aSU+J{Hcxv64hz%(NH2tC`*m`aq3I@+Y&{vg;v-Nab5m&YFnJf9|+z zwPqBVU5MTSIYZ`}eCUJkZT$nSs&D^9_6$stHn+r{|LUquT9<-D1vfkUMzluq3}!!@ zcz7+(jAYGCT}#DeCVuas^M>v$6)uhVr>DPsR*0Vj-)xw7KMaT7W*ANRdI3)lZZ^r#GHRZu5&~&XJ%zh+@f*&#bZa)yo3~H^p?1 zD2$~jEA%zjHII#7cC=eiKagrysNtnC5-%_C-8;AN^?{ADHrJ#i$g|qnm^@boP}`>L z57{A8&YF@<+L!TJq_D8tXw%5GyAi)ngA>t5Z#9O)mW3fQ*!6~Lvi>|KWFQP#Jmnr} z3+m`0f`-mDO8|W2VL!M}is!^p7%ks=r%E~7-Q4cl(4Q(1q1=?TeUP7WiFk? zyK=eemTj5UrX%T##a{HnJkIH&36fZ9Al@Yhq!CrHZN*M>? z^>i5yba|9mli=Fg_B0Pjfn4pMoTZ0uuy@8hTW$Ju!$h!9$;#e(QRgw|4<8oK@e1Qi zg(1PUP(heHN?-fUf`X~Co~DZ&%T)GZC()Ow9}NcjugasNo(|V-Q_KwHi+`r;awQdH zFVtNkEn}82b80KU@9+M&7PBSz^;`BhL47ZyNgJ$*_*Nn2q6*K1-}}# zmtsjO90K%AE7bGX8;M8H@X=zzf1Xs|RQVa2;Szc20oaQvPD@QkE=2ooLv^q8d>smnhy+WsT=nnDD`N_Y8SF6%`kO8p=?Qq4&1CWi<(%Se6984@^8{k!2d{cf; zHW4~ioH0{1$DtRR)qL&VRE=P~LqFn(3muOiKiljgLm$;M4L;f`uFXOsA@-$*&MsOwgdE$t;g-7D<=_1oWsx?A1v4 z_dKQdXCDkft&9k>)7+kzHc84ZKBJO(*r^Scsl4y(ZCv8nNV}}fI<}h(U3UGi^80p$ z22lPko0BlEk?AOqZ<75Z_XP(|hfefFb;^h9e*}cF*YMNO#49X}EBUU~R!515t@RgI zRD9}oe?qN5fyS3*YML%Zn2X(g{o)$n80)??F8$JC1-~H0G7PP%xPT2#5ZHnNjpPvi96yEP3q$ z921JA7j>^T*C2tWmJ$`Z(tkxSZn}1CM)Hd)y`IkVU{5{l1`M{Wz}rZbpBA>$w3w}I zAkHSJg)#+=Xa;5c(SIcQB@&Z+|6Rsr=$A%!<7f*SXZlLct473}XbeGZKgg^cC2sW% zc^#WJe(NbB{_K~2j^$-j)828H~eut`4%8L z%QKtzj2^2p<|iDUH<6A7h*IV76%<|$3mf=40+9}xR;EUi3^$Vm8AEV8*rN{hP95DC z{^sN-P$Nh)kPRSm1fD>PELii$L;nX4e&a>REyo#mGZ&N=4TsO^d>R}B-2bK8&AZcy zRN?`_s#nml+xkrhY!Izc2q>>_La9NX&m>+oH~Ul6$PLu1-2=G=VMr!nD$nb z?(N&RW(~>(7jk@Wt(2QGsU8DB1YF{wr5b$NQ!`3pn(IG>LP#uFqWFg?uhyfZ!!6*LOC}fg zzO0a({jL2#Z%zn=JhRq*&?laWj#1?uc23~QCu#nByWYI+CD z5kKg8rt{z>+Iw$B{IpVTrMn1-8Ody~VH^cM2`CByxxIw5J9J6=kO@cX<`n?7-fUE! z#^vasZ}M}5D@!c+RE?~6Z1UU4pPD-LG(KC}#ZA8&#MCTbkGTNkAM(B%9!2Zcqd&t& zKEAHAN>-B^a&HMUW9F)@tE*#3>VFPPb-sVbwbs87pJtibvKXmZsB&3lmT+eFQN7fV z(5EYQPcONphiaZ;wIWCFf)!1_`Rxsw$|CN{GKD-;N$7jCwtD>Ny0>3man;iPkvWE~ z_h-p$uJ>jOP=}DDMM?*(^$qHn)Trq>oXj^&62G4dWY);S|75qpr7@)R%a#-1XzT`ito1hkf3H)CDLlEY`0Ilx#AN>T$=9} z>MfIG)>)9w-Zm9?>#uo)crSkDCP8YfA%-UeawVkLAUO?TBA_PKo)(o3QXmzT#j9=| zfX`r?tOSQ<@?JSS{T7M0(!-9Dew{Y)?}DjrYX>IX4A6-%P@2lL=PqMT(p(Wmhm?m| zgAB+V_SS?~C1xA`X}oNB7D$5e6_?VamRq8`)(;G zX0bB_Wj2E}IE)Ko}?l#vxpXuy_HYk`;V{PeeRmAyfa=Z z-zU2ns#MP^xk>|RE3$vK1}dg^YZZ{*R%lubkz?Kk)x6He%J1SRKu@%8X}_M< zaeK9$wNUHf1Fg4jo(?FIoD}k!J%SoBoa?i1j zXX|hfFQ;0CdWt!Obg}r>tA>v*&9Px?hO*fU!a$M9t>s@W-t*NGk5BAh02IiMnRro0 zpArxN_#yFri62*~|N74CbAAk4hQ%)YPd4m@i{m#9Af2Vv{DLAm$hykry#)2)J&F@j zRQ1{jI}x@FD7ArxR26{WkbcyIYqK*mU?7(b3)3ceK2qv_`}XHr@BXDkn9oLAI3|X5 zHKAl8#_|NA+VuPEK!7sc>Vd65QTG*~+R;{C?s%htRu3pK0Bx2GaE;=)$@dK|Ptobz zNco_Be7vP)ujDSmZwW4Ga7qbzjf5;af?e|;&K$k+1c$2tEIkp!nydIeG{^;uH-t-> zmMfRQ&`hg>(g%f|fnHWeQKC&3An-Zm>6dy&SGi1cnS@&wbMq64~*wB|wT%6rhOj8g>ma@9SwE zTAD`MAAFw_!Cl&?Kzi_v=lpA5<2nPQ%qqq?eUspO1fEo;%a89wI|5dflA_{Ko>H(^ z+*fXdVA3K5#-e9c0NnxE%sspLELZ>4CSDtS@Qkfx!TuUS|a+SPzha5^t`cJ~#jKAbXuL z=@@UOFb&U#8%g=5x-0ySW~2LGKPP1>fVm7+c0mcRP-)xdBLqmy!jH+Okt^qm{)Y06n--yz47mY^XGlaT{|A)db_JUVPED>>MmU_#vU8? zvmSfDX>IlHy-7XE{Z^!uMJ_lfa052xT5tI1SJvTq#x-)}`5mF`aiU#jUN!H}R-OxF=wG0^q5e;<*! zg7$y1GxcN>)WaGoUb*FWzkRKTT#p&|3*|%!GLs}zSMfjDv13mB=iQ&oumU}H<#Tw@ zVsuU~UWu@3z05zz)p<#a^m26*Qjg#8YZr*;Nx#GQ)j??>2Cv7CUP!05rJR|d?Bc(y zw8be2WYd}0ARY*A+mfK&w=;J|%ECxAG-&>)1AZ=R{KG^W{-k~klyi-io+SyHug1L~ zkLxhQ2?>yGkppabhxmtX=QHn67)E?KI2Ij?P}98V7oyIbdQzbNm7(&YAhRUV!cdfP zQ;HnP=8Ypz5M^pJCs|yv%3Nb6ETgdgi8oVp!_*{taMuuD)T7$0XGnrNbDtx~yJyV0 zTIdYf{raBV}bR7#ILb7HL}h`b(#oYbreecC&_6$dud@>XG@9Ij89bmWeTH= zz;UQF)R6T*e{Fq$pK$Q1*;k+e%Hgw2D*hxo8%&RhDy10yP{~NYZ(P0Db{p^3t3WZk ztGKEoGHQ%k(;x#cI!#S+v=VbP3I1s9Ke2e_(3{tgSHm#V-%dvkKo4c93;&=`IX<3{tM~%okQ`$U#UiAHl zSTw2Wz}4KHa_Een0mis%Nf^+n;;C*XQ`@DfEZ*@E&L!RLRi{47Vlj{o0X;u{`4VStg6IuOilfyY&uuNZ!&k#k# zpMPEG`NaO%G!OGnlHDvkT?j`{00}zBu;r&*qqaLtrY)b90yL!L2a%*IOyDfEIABj)@VS@|^ zB3>a{2o@yS?E`Wj6O>0lfu|N|CRU^6*l&Jok;$BR`sXiZY)}6!=?z=ceXSRFH2;)Y zy&mj*Q7xWs)8sJHIgpY&`?K#qF&LNm*H^3c^{ebJpd#iO$uYc zQW$aqzj2T468d=!6<);=5kql zxqJp30+4&T&5vq_;9~YHDB2=jh$V4QuLW zmPeiE&@cKWq@VTy)K!gkBn&apEH=)Jxbrg0`{2Qc|CA*@EgjEV{q0s|L`%ax$?RB7 zUX>m{{{rL7YdM^?6q^_Y>KjCkUPO&7ytE3=j=xnpm5bb__)brW$kcp5WN<|2L)*$tbT!wqAuFg9RDN1Ix9Ql4wl$3&aC@XqfgU>} zq?GERt90>bnLuQQ;G-K8Up8r*WB`E`)3I#|D@rU0L%v6CkqCMbh(zB<223^pdjQ`x zT)F3+hr2(S_9g5BSV|q9S^z|LcCY*;T-ch{w`-r5L4n9)EK$*DV+Z0_Bp{KxOkwAtRnB8;=Ami@)QeopU8L1~48VF3 z?Oq7!p=*Z|K$v=MY+G&`x=xw-#s2s2KJAR|jvdp^x&LB6erOi&2DlYi{QQv*b3Br) z<}xnHHdGyOR8i#7?E0ruyRR*4J{a-}i** z+d?#@hsSP2O91?W#=0Q6{nAx)OvYxcona7vUA@_D!rkYOU;0)-T%u2P%q2SR^ZGXG zPQUtuIZTAOGuxP0#xYsV=K~I19($;pkii-G4v*>)Uz&1tm|m&Ph3zj!8k3#);mnGv`?_LQ zGIpOj?9_rwZB!bl@xx2Z3JEdSU;(oaXq{58ZFB(R03bU=e*S9kLMkl3%r}&J)d8;$ zET__ItYQWeLH!KMd@l7gc6+dTYy5A0z1qrEK;XxCCi= z;0g61O1yi5?CbMMDz2y{kZCrJJ+Ils72O;u38|&!vm1yTqnm6fe~0FO4yVM}g_&x8 ztCal0^kL%|EUkybyPu*aq~Lu6h7TJ%j4|lgEh6OlaqM-k$d-8t_{Uhh16i zF)!VO*aSlH2LtF+26`l!k%!KNUxVk=s{$0mCVVO-UKDyVnA`^2hiAht^!}27yqZr< zW$_F;3B96sXUY~wg>g}!ndH8G5rY-4!~_wcJvxBJ#n*z8wO9y$6b1?lF1M)ot%Y*ECvtTX%FVuL zq5LI?Q}Rf!e{)Cw0khz)mwesJd5!sdoNE0qx{H)*dP6-6PX)x>R z>I6`Y|6VHprlZ*=N(ZR|v#Y#k9bPuH5DWs|Aq`c9ouz!0PXhvE*f1HFTy^RIoOlX| z0s4!CC+|-cgAgaTZkV3tFFCT9eHZ`cx=ya6DTqo13krgMvu*ANz6~35Ma5Khw^|5C zK5Ss6qu{uf*8^UNSrVApetU&ON3Skscy>rXZ?_n{A~E)ulJLXE6m;fSxe!LsZ6uoQ zvNW=9nV7g$k5etF$jMQLofS;Z$s_)Ri@g4ASFAD51+-Ths>A5xw`8I*Ju-lfK99k) zNJxdkBOdTZ_v4*3!-9nId+l*r3^}{s0j;?P@U;(G}k69lHU{m z%A? zKBAc~*@Q)i-V^3nuAE&2Vc%i(Kb8Zr&S_u#k{(t$-_?X~6VJIo(hTTYm}-vi3~|*} z51YD=y^Z5O@o}Zz%@%HT19)lhtjG=m4g22J<)ab#TYcI+LC$S#i5hHCF_G7iD~qX0 zak3xR%$$yaCYg%+?{AMqMdbq|@#5N^Vb*fTie;v7O+S?GT-Av+q)Q5m67}a7?@xQTr%I~k z6Uz`vgtDF^Xu#+YL`eXRvMQi$Uph$z%z6VOhR*1@rfA$&FmOYzWr~^NIXbV@549~$ zb6+w*xojS-VmUfP;5hyjUG@UIst!o1X`s5G+Mn#Lr*F&(jlOS3i3L4LH@+WOYNANj zvS0vtxhBoK>C&G7DYGE3u2hoj9) z+L^8=*J0i61%L>at22w>BX9@r9c2Th9~)DmQ$5scqa|K0{L27-I77&XOhP-(eS~y1 z#$D#~ms9h4n|z|ig(Su88%q`weBW^LK*VT4*wZt(BN$x)quBxL}UWx5Z|J^cS9L>LPmhS zCR1w+rkG=_UNOp4zBFW_+d? zZ>9&0ssy{Ky!U-K&kP-(g~nN=h3dp_^;J|D-w_^JZ`C|(hO&HXK9Y9wA()O#YBm9P zzlVKQJG*wX2#!rDyeM411++vywcDw+>W1b6Z6Ec+%}ntp)d>uv8$ZjR5rRAiD@F=6 zNi&gd4T8`c)1xLGJNI5yCum-00lGs6IDN7DV_jU-4c85Lfp02qzjPg5B&d z;moCP^XdPP-Q0Ij=cUT}e1~MW?%_ozAYikroJlaPP!Yfo;yqN2~d1`0s?7M&Fc zb3Wnh=DVkOQI&e2=o{dCxb+Hpfq0g>!We@U%KZbfpFNO|=c6Zvv0JI&O6lEa_7&(O zSryVIJ4|qii*87ylqhBN&OGw)Jw;HFwoS+ z0ttSqq~Rp!NTX<6hqB55_RS3;`3gXk-j_ArfBoQrQg2?5re87NpW$5wk)!0#gHJB< zr0;(o1`irU!yB#fx$ZH@zub{o9`xo7pF)KSpziW6TFgL;B$+==*9U>V+Hew~P|-3Q zViwN)<@tMlbTE()IhU6n2QLU1&?SjLzi5hNEiarjUEUe1tndMBSPv|-fk&|;8V?d= z0A6|=LopHFQ#~F(Y(^8hj=?+Yyh_~d@2bbtuq^guq|>}s8HUz|Qd515i;b~_%>G>> zN*UKD3ZFq2 zwW?7Cu`Dc9+NOUO%`<`7ynSc<_YdGk7hUgA0y@6l27Kd$5@eGKpR24*Z-cm3u1E4Y z#XKsMw)VV{=5ON>r2tvpa z{+PT|vbguniAgw35`QTLIy8@Aq#(*3ni1N~GwO0lYw7up!_Ed3Bo14Gr!9g7O;*0l zitmtfFQGT9V0q$b8wL=yb9s@)BtO}eHz z+DSVAvjaY83lw4Aoh>qYS(2t&?o!KVOyuT2B z5u31lSM5pD52E)Cc)NJ0%~RHQWq6?sKlEnYi-76p-`$#)6OaVJf@UBIBkZV-q`wCi z@ybm1mqbr0^c@L1+~?ap-3k)|l7mIiM|J024y(-(Kre%Lg-=)bD@az^B^@aBH` zY^~)TAgEfyLV>83o7fj%!$cyT2vJ3lX9{&^4j)=Q@w&U|j#1_p06eC{{(OiWr)HuB zneoXt0Y5&ER9JDmMEfMBRwYTNw0O5y3EJyrv;+aYH+QRwk#&!39p}G5iOSZ0@9lL2 zqD*TqGFtaeT{@%VpxaI+bd{Q=yxRNX{Vf6uLQn>q&6*Kw;HMq+Xj!NM5j2Y+2NF36 zu#EAoK#Z(%%Ru-w1*#5Ohu)N`$W8R%VHKbv>imY8@w)2q-&F7_lh>ffh^VvOYoeXl)`ZW)>2S8BVEw zhd~tZz5;sB##$*@fR6Gg1 zWP3;!Za1@6lLM~=Le4tSD7OV+%1a6GKj)ivEY%?-YkM*5>8$0qkx2VK02~KDWrUyR z^B_A9$XequT!JO}Dol^CkpOjzBBWLrMF72no|b%%DItrYrQdgA{4k~fNKW`-cDRnv zV3LYSV$Ie|<`|RjQ|l2lQlRcbvKe++Ij|Ed{L6;}bcWZHOhfbx`h>s9g5RcNYW#1Y zgh{sX`B&p z3jKm%yCCCKQelu1@VQER63d>D%R-AlESTT~A;8a;U{M;0;V+8bZs>@BsaEy{5qbr7 z#<=Fw#h%U?|0pq$5U`T|TeY91OuQ-hl<`3BXusNXD^YNq%;wPWv9Sx7-!;{Tfj*e5bcPJMaynDUajG{=p1uOWj> zOYpuIH=^vq?iIkeBauxNxpu+C29L0gK-?dp!4xK>!Yzp90wqS82RE~LxN)n{HaX7> z8cP#`8c2oA4xg8{VyqVi;a0%yybYx}$@0Y} zCb$7Q@T$$oBwgY7tzQgl7;Id&y>*ZPQXw{3hd z@E=(SxiA>Rn;(I0GYaxtgvU_cheTwQnN+bF*aL^b49Ks+a1ZwCgi!-f1lXGd?(NbJViEe##VP%F-EJHYxFWLI z?PjYnx$@%cjRl{-cF z|C=6i1%d_#88F@s#mN48XnS)0q+@(AJyjkI`+GzVj}If@UC>O1@w4D1(<%^nnE2v< z;|328C`OK{K%AKQHqIFD4|>-}hXHvr4(0HZTKS_pqzj2Ybf7_*0}EwGfG=X##Euq$ z%y!+pYxKHy+Po2$`dB!96-z>=SrcPo)xMX}lWykpNmlp7$C z)9CJKS+5(B_jn>m(EHFkZVU1O2T1(rJd%Xgf;?s5=BlOwV?&&jmgbDjRq2e3u$~JC`ru^QN#c6{qXlE)}%fd7^u?wKNe8@h^R@P#{WJpN9dSq&t|ch zyxhzc|K5*SOr1gXt2!ev?wVe|0tCnf)|#OxJiC2D)$vg&vbB5>@OLVpQ^AA|x#bL| zXCz&&%H-M3)Ms2~Yc^n;NHGD6Eo8j*N*CLQ&48BY(L*6xF7|BS2*JtBuqPg7@DpV$ zV+n`af{0avMCn%sBm#c2pIJ1ZExzI7T6H-KKmz$AN7yjO|E zgm7xZPWGsy58Q+dw5b#WDZA@)9i{zaL7NE4C`B6a_t8{uYqp-_jlVA@5diy zTb?N5YeO1Qg`Yx%#?@QAwP(axqbhLiO~w!19$MoRx|n%PWqy5>9h_JSfG=-XqApDG z;F)9VlB1(&ku-dk&Wcx*wtwLFMoJqu!1bufJTBwF}uwDbw2vX{bJN-FCS(|lB4zb zCaao#v&VZ)zLA4&#kr47hnN4krg7H9SQA1B*5d&)Fl(If*N%IvjH}!blkAC60W5+v zIaHCYXHvIG**^jFOMShH=b}?gfue>v2(&{DRHi(K^D94yMsUX5#-rd8;)k)5^sXrq z9dErnk?;q9k;rPW_nA6+HG&YBmwy0KO|K+H*8^)LLiyb!OzzOw1uixE$4hyu31n6~ z&M~C12z|xya1woV{*;thsS3uv$6omHy`$TE1L{QOhpC4dZL=QFUK2t+JBb)j~x!x;$&phBP0&Nc_RR z`+pYe0xZWs0dU|7uxqd}yiatmhrZCp?7+Xz%(v-?_Xdqgf*G!Ep42$8)LlnDW%dKU znF{!3*kj8)xScm7{%ChE9(F=ceo@!#TVtC{RjHom0U9wOa*W??=Cp4E!{`CSnAUdr zYHQ(1h2^r3L_%DTu-ytv^SKJq7X&&OWvk~UiO9PxE8iezhh zpW?Cp-($+4_pI2H&F6+=%*h!sy-o_8Da?WY8$+G9Q{a`iG1z0kl=$SRN;wc`k+kyR zzQuqIAJ|osbE}GcyHY{%SPn>vnwVY&h@exAh+d!nv{{%Y237H#R9+5?cE<9@fTc|V zkq;Z|@Hu;*Bp#E-?vF&}f1sk6Fu>pW73`eK-nAs%T`}eeM)VNm*Nzwe!MOn)5og~Xswv`yL#ur`9^`4Ct%$5yWd za15S9lsMb@eyCI)h8AP{yOvRstg+z~Krz2>oBV($PY%fy+&K1LUzGoUle&=i@a|6Q zt697})=55mNDRKKn;*BCj|)cvnZ?HjXCN1tJR!PG|HKq%J8QV`sPaujUQr?MeIAc3 zcdxWBd-mw1=+*!CvANzQP3AZO8~lMu8Url?^z)LuG|RTTeA9AbfEFJY9pS@K&_vGl z`^y6vN9v*Z3F!M{OAE_m>9Ii;C`umxYnX}4*CkOIVD2QzHEz7|U3U`A&6g*PDTj43 zPC7}$?w;e^C#68v~>9$pqFgg;tv~|_cTN`S&~;^A*zJOx~p}t2;ao4<~8cb zaSl-h@U=;Q-*}YU6k#2;4ZJs;{iXpGeRM2V_<9E0hMq}&ANY*NkIRWiY9dE54g#oH z7~jvcOmPQxs|}y&t%CabVfZj*FZnu@pJZ*B1#H~fJ1WG&o8INW%<(Q~_%V0btMA-} zq6lI|)oaafp8J>ISdrEF57P|*1)Xv6-5m|k0BS8Y1F6{EW;?aEY)FA_77Q_U@B5v?Pw@y6=2Z`nncr8{v71=9q>Gg5^L8rye@hr z$=0OA;Xp;Uj_>R?G4>%abRfu8rdf4vVT@Pssf;eg+LB&^V2f}jj(QTCemVZAH%Fc! zrWqplm(ITpYhxws$yuBHXd{R~2>#oh&m#wK1gpU93Lh&kM@f-I*ElX*;@Mp)zABAj z6&Sf?@!_);u18>ehpfc7y$4)%u)?#k%Wj9fei=VcCvRsy5P%=e&X%%>JE{_^5Max{ zcU8%Gh8gzd99o@B{(Ou!?!>oiR(!PZnSf- z0)8~)Bg42#sX&**j{@fSi}pQSkUa&Z<4LHjXPgS=$Hi_jri^}qrD!MEee-=i);}N` z2}`nBfgncWTSzq%VEh`r9wlz6-YiZ#hPTn)u-41!gwQP#ZCdF=HaQSD_L|WVBoWLJ zc<4-MnI?1SEp@=K-gVp7Hk;%r@W8|J*95f!Z7OhaxB>1@Zk2=|&Fw^m+nAGi9dZs_ zVK%X>^_fp3p7TFUOpJeX*+p!Pc-Y4+kB1IN$X_VmbNNaZm^SITn7BME`11!%eq50y z0=T#mU>~KY;Xf2k@adTlXAL6LeKyc68XJ5@2;$hSnc1VL`vEssa)9c&Sf4VD16xeQHF z25>@dMgH_mMXmNp$m1nbA^yq1F5&1>DYAxX8881vHu__X!c|j~Nddt%f*ccIJB5I) zm+@;`RH>&HQwy246g`#8R;@($X09oKF7ylOX(Q+&cK_DPEy_`+SB96d4F!yM|Qn$8c=$cAT#V31sC-Yg7;iV6c~Nx8Q4BoKx*qa;uA}WW6^aTj z&%E8qc@o5kj$=+}82J2pLUEBg(6~)boYFT$Keu5 zlNL4C>E+aSwXbKKZs=Kn%kuEUi-zN@CmHXt)Iu(ho8R>jXXXgw^dV)6q+`!BLUbmW zH{2!E;vKGgV6O?)#-5}|Qr>z-G2m)xKKoNs=z_SHs|hc77&t62XwAx=4sw=hAD@*n zOg*Hhr}cHVghX(}wXv4R_j#HCc%mV-W^L#USSSu$>?p1pK5T(OUH)h!INaud%VlM&AZeoFx=O`A#Rr_m8ZM)X4Ofk zo2c7Ne$E}@Ouz;~t&TF=nRgoF?Rm;94#b)4=?iu_q0<{pR45LGD4T{j!w6dgG?#ke zt6T>`V$Q0yU#lwlNy--Vz&Y-AFeCh2%O+2)zTSo+vJN}E%<$T{lE3h@QBAC=EqmQW zjvw&4B9gAmamxIH_o_h4L?ua?^FH4pn|PTMojLC%kZj;VUE7lHILRC9rx2wD+w$Zt zIZeA3Rd%xV_dyw^9fNle8AK^i62X8(a97OSuzJl`)Ik8 zUndz5Re(_;`6iy@HR=~dMZ(r3vYE<(*oQJW+QXPzBY|#%#t|^IT<8D0)fJxVj*=pp`jC&dMCYTE zZVWmtd@Oq}@O$E^^Xc#(LLZY%Glp4B-aDR%tQQVV;A>x2UGVPlp0G#^OY01tqPa+1 zCI`9ZKRjQReao8iV(Dnp@rRDN0iy4w9iI=OPvr>x03%l0b8~a!AK!v>($p}+gXi_F zD4t1VioRx+i6~>^3wN6Xrz+mI%zi2qr+_h#F_%NNRxWepaa%3% zt0=Jo`IQRZTU*48jeP&7t}Bg)s_ny;v8yoFjD&+4WULLMjp0~QmM6qxY%#XT(qyO5 zFt%e!mZ2KiQg&01at0wpwh}VdQoU(B4~0kdp85EGct6dDx&PaBUH|`mpEJKRCq4!$ z6>N?xT(E?R=vqOol^>HO_H13F5BcP(xgQaE)=#xxjqk&A%M!wrDPHjT%D+AplwsIG zf!iGnnyS=W>FTlahEC6$?Qez4B<(uA(GU=Y5^;f|O&3ER6eYOZv*Rl(C{FZ!aNL4a zN*C8-hoKZkQ6WL)tCy!ydCZ$|Nux9|QSNM&M8ByEj1Hm^IeXzzz{;r9EI~a?iupLB zF+)~9Yc7=ISCZrR%Vfj%#N%ep`EszMe97YD^=F8_fe` zG&vuW8L-^IxHHvvANtf&^73^BH!O2_0^Bxxw#sq;IazMW`XH{J$@i z$V#1Vb_F40h))xYH95L|U~V6k|7r29XdjhEMp&oaFX3A+TAPY{ml%0%1bXz>ioWr} zbqv86#wU7+=L!z51sk$;|@4Kz8XNd#x! zzTCJAtSd1N*64|UP2GYeB);W(9W^Qv@3iqg%G6yb4|FGg*4r)nG}%Hso5bB3F3aR1 za)lkiHL{;}fX>g_VC|aKUyST5=N*shxp}_i+H(8Yi~1XmrMH1vJp`0VGxX|)=)JUW z+!wu-Y+krhkExK;dt*^UFE_ZEvA*OSX|qa^q57!1ZsPG)dUE^k9%$H9TC+(@SXmRn zSmSbuT$JRMij2>D78QOfVTxPStRR<*Q>9AFVzONa(WfAH21G!4$mWX>tte&BK=aqXfv zl`Iu?H&w#LC}yaiNzYGJbT6uXnz{5vDBW^rbrE@cgS5K8QhY5`8G9iF4kQ{-wovi|tZ_y=J0jl8tCNP2w|ZDzlm!?<(*$|> zS}#ZaF^6#0A}P`zy)`+OkMXe5&%LW|6eE$UGIk2vnU>b=y_5eqDA*g@Y_Rf}vFq37 z8NOBf?VI@{K&L^${<@9+M$$`b$T%Mn#*1=u`@09U;sh9S_}Au6cdxEwdR$8S3cIC* z0PWI*S>&ZZ6a`eNNY^Z?HTU1OmcI^GT|2e-^RK;>vBpf+_rom{g|-ylmooXfD;3Re z`PK-}8Sb9j9v+J(yNfKkL%W2mjn;B!6j{dOY!Qp+W9vlVVxsJ;9kZzO(BwNrJ)etT zZaT$`cQ*jP_}2W8f&Z+rtXF%DVEAapF1(|pe|C>nkU2x5bwO@v{ZX37T@lz?5ZHXw z-z-m8xE&K%=gQU<2geV&n6tT}e3uWl%k_u$y`${q_L_c04l&Hz_w)SioFOM+POvhe zXg#^za3$uCBu6`Qs8u8aoU_+XP`EvQ854_Z|Nb##;dM0LBSL?J*Rc>N6h`KUabkD* zs;0y>arb~O0Cd*2T;E#L@{nFPYv%F@0NM$1{{=_TKWXJmyXEiKhgpAd~c~l&Rw{}%} z!FZ1C0b$=e5HSX})Gw|+m{_Ce%bz?3nupBFE~^-g+|n67egSl-<%lOzefh^kYRq$s zFcaLQIZ%6V^SZL0Iy&R9XkY}on4Tw$@0ENm(L4!Wtv{j5mhgck6 zjgB~owb6pnD_78QFYz-Zsx1obI`G`|{f`d;-en|f$hr{G$f^NYG0bco7n~D~V(D-SalxPbDw4%~)u7YQIS!$s;3#1kfqkHP>`Ld_82@4g;>xe~ppeg$Z^XJ$= zecy!@oiLW+l|(B4u@(jbvH*gY^9YcDHGMZyh_Zvjk>v_+`FOk3i-*+?<;+e80t%9G zMYG8j-(FZ&c$;|UTzVIp0nx7#xosT$&aIt%j{PEBSxT{TxX>wJtKLtS?TK4nySZ^} zs2*;rh~Q)6DMRtQ_kr3nWcWC}L;st;W+Bhwf5ex4TpXP74}tYi`QUJ);5fx+&O9x@ z_=nNS??*Hk$|WR&)1ZS9hnN_(T{QmA!YsUcN*kR=A6<<~A)yZR2NWH&6ru*XuA>r~ zKPnO>p2;rLE+UB`TfW1V-FCI(=)ns!#K~j@jO& zbxLg~=x>~L%m)fJ5H{;#|2~6agnzNk@WQGC8HFQKHD?=s0o#B-rq<$L0FPc z{1~DIlq~T-C@Ds&Lg=CA$naF>GI$u0YyQemoz?zrUdnHJ(I?K%MHlUB&}8xTt$)zU z6nd0RpdHr&9iAW*fhO95oQ+2>=?cWsuJ5R|syi=|)IY$kmwh$ccj5A~b5STP0#i(W zk~DCsU*{A$nwfL}EQEQAfUlw9VJAImKAuRq!sE&J{kRn9cg(X@I02p2>4ot#JC{|Z z@%~9kuT@GE95ktC`pB#NlNE%eAnKc$u;}*PUzufN<<3v-e#KHP%-KN{OFq$uW8ncL zGX!riRw=7GSyTq?j*6383&ld#!|5FYW(V(If5lm1#7X|&nlQgNslfBJ7n)$Xx}~?F z9PKc<+q41sUX|nG>k&J@lkE|qUZW2)lW7l>Yj3r4Yl{t6X7&4s4pGe^z?suMP5{Yx z>)|p+LWWm{C%rU-9^IJHE4lza-3WGnaakn~DR%@ZH!8Pa6_t`{<&G*7o-EFwn{LXa zR14AW3266KwaQTduucsTaV?rIu+0+{N7)NWn-4rf@-GGVvKK(N>&Br=9voj&!yI4D z1y$L-C&kJH$^=AP5{v2{YaC82oYI3QfIq27t6M<8(NR7KKpqK=7LE^C3?@plbKp~s zdD!t-STX6&3VCqfv?>Uhl_F?j!$~J@)Obnmq`cvSxgRJOR@uS7^NPWJddROcSxA+n zQAG4~UFboxq6xmd_Z`ZgS0(wUEddLD8SwPlIzjpP>BWiTp0@#T#)MM>wd!)9(M{yG zdj?T5OzriQFa2DTIGNIGo8k)FtotBwC(V%2>%a#(zRQt5ry3sZ<+Gg!|58(tdf_e6 zGQd`5ifH;I=;!N6gAo0@Ker8pbtyTw?+ZQ+qarwh8sSNs{%uon3AuOQYsLJ_BC)GcABdmtidN4D^3O?Jg3jjja=?FyQEUQIUNgi2BceMA3zPdMBJho{!JrSZPEvs74_xad5^#>%^jbu>){ zAX#8 zQ*yWEhKeA+*rvLy;HG*%SIUj#vuu!pJe}G|?G@J5RBe)UIn#L4P-Vljg)~h!`3%o?R#uTDlgJzDr zYmhsF9hRRHxUR4{k~bzFe@iqz=@1Z;yC$@G7%#TQo%l~n@?L^Ro8RAEPpiU$k6q8c>A~?EhPs^7lbr4*kjEX9d%x&N zux-V3MdkX0;<3&vqlEt`z#r`6qD7d|m8nF>MbjXW#96DxRB6Ll=K>B(%-_7Kzs8c2 zhM(Ny;^I<`v~Y>^^@{W}_POeZd~oUFbal0G`dT_VPB?vIU7WFjkp>QDjKfv;6aUvB c(^b literal 0 HcmV?d00001 diff --git a/website/www/site/static/images/logos/powered-by/ML6.jpg b/website/www/site/static/images/logos/powered-by/ML6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..49062000537e51a64f2f9182bf6e56d77462b61f GIT binary patch literal 6110 zcmdT{XIN9sww@$}8cHB20@7;$4OOJ~CcPslh7yYOqKFj)1f&yDKt({B^j;KDx)c#n ziUm-VDoRxlgu4UZr=IhibMAAW`|He}%)D8%);oKxJ!{XNospdhz@Vq4s|8>%0ASDs zc0R!G>8h*S8=D$w>FR6l4!{#U96kNXZ~%CE1^Ah2t8rP|9Ot6=04M-Cpaq-&fp_%x zQ8hN!2miKw8zQj-Fepai`nPKTdVt2s+20YmK}u*?*3ri=0KzH|Har{PL&6CV#yGm- zogjQ4!lHiA4MO-iN$#+RKa#M^9$qJ5KXX%c0N^MHbGhtcQ4)68!z5q099{i9Asr70 z^Lcs%Liey+NS>gay)4Y2o_+V{4@`kJPy<}R0R#d!-~j@F7}R?~G5gnc61#SWz#Fo0 zgjyeP4){S9F2EJC6@{dMz#lk6JpqzCLzf4n0Toj0Nj(6+W9mZA03b#Hcuf3l?@ffl`2~_E{Fd?E0f7E005wg&Wezz2)Iu?4 z8S}yW;dkdjKrNg=0AM)}09qRWn4nlw+x~yY_u>t4llVdQFaYM~0nqOQATc9Uu{8WMl{=)F6>aatahB1yty1XsA$VdPYVxJsQolk9|KAhLstOX5nUGJ$R6l zlapyb4<8Q)A3Fyp2Z;m*nNpBb&{0y-abVCGj{kGnX#r?TSOP2o0pkI1Gz@`;?X*D) z0tVeaf|OBz7jgK=%t3q)|ru@|pNU*YMXV;U&vX*~gc%=VEO?}t&1hkf&g3azJ>!B}3?^ULrfyGuy z$aC1}{27A?z+3O_Xx1*|r`Tw)FBR}CN7mmI(<^y-*ymg-r=64kA^^{iZfYej`pgek zQI)Z5d5|l!!8y}AA!=~&ZmrYEDQzd4u7O9gKlSTzRgG6GwBk(VuxsdZ8g@M?Iacob z>lAw9OC*j$F9g@OpR#q=6(W0ARgwqbMw-4SW|{<~gU56rs?Tc=1zHiJ?ib1o4{e>! z(al#n8hqL`C1*+F-V>NoRuT=2| zsKcz+c@>?mpgYx@FdHKaah?f&my8cdkyf(iJ=vTfy=s}b+^-yYytWa2KJro4A?F^` z!fKkgP?l9!6gn!ovj4=V@p012k8nWMs0eY7uzu4!AL3EU5GRO;6I6n^07y6np#U!R z@8j{;#ch69$+l~NpRv2h4jV+O%Ea}`Ob!*u_`jtYY_O>h ztiD!g=N9(Y9IAnv=h(t`Kvd~y9S^RVn5&NRF>lGjXoaFT^Nz#i43yn}O=GNhsa;z} zV4M+E&eii*lV{R08Mbz4#-q5VnXpW$Ph+8*N;a#rSiqlJohVirXo5)*Ivej8=l0Oz zVqnmCQn{CiL&*H5xcYtLo1Qb>p3<8~9>^ofUDi6!tvGijn_zC4UyI$4|5N!}L3A5; zoUgPClcle;o1Ol4!0^V4S9yfjr@widpP>vKVkzKd#W=cu9{zF*irn$S`NEE^8&Rzo z$Hth3ZxZt(J*AF^cH!*E@B%-UqV!GI*5MZVS@*%pz4_q`+djj+2X%vAl!)K!kee@Q zAO-fpkGZ90{>qPrv_W48<;bcHiM*a~<=0<#96rsG$s)s2YORe56d1Ix49+gI)@?!X zql4dX7D7H&TMb*qZir>@EV^G477L+GqWf6xx_rU##DeO+GVL^Gn@o!|W}~+qOVeH4 zR&bZ8a7$MUh6j<2gwPZpfitca1d`yNngjVKZ=A+YcZ)|Dcx)Q@M_re_Rr#yQ9SY0R zh*IR{+&o9x6wrTc7Q1UZVRscnn}{-u{qld%fdA4^CFKX4jFO55f!t$*!4ZIr0nO;o z#f_7cHc1)~ms0ghOfG-&9?8VRD`ARv@C~V8<_o~6s3l~E{*^Z5%23)|%;Y7$VkcPT z*k;FcuNDUQrk=dv6>cErDM#E?ZJ6g|=K@8y^m<-&VVOrwq~kaKP?eXOT%J4TP{C+$ zktmkS?un$T^6WftW7cwJQlb(2j!vN<%7IPSB>%2ajgD}X#fb7$k(!8EMVDN3+@8iC zT)Wf8Ns7EE9_VlmwrG&;E!C@4PmXIobpMD2o^QC-vMBq)`zyv(;e64N3T3tjZu~Id zj40xKnaecyMk)WaWpfSL!aJrTimQEZODlQuGIY#L*VqJ!cTvRgZDnzy_idcTA$K}@ z-D-Dp;XFpuD%MB^VwTM#^>Fv^W|Vm1s2Y+OL-nRX!0Y^Mdc8eCvwia`#}k8`e7*~g zN<$+to3;*;3!ahv)iu0-{S`>MX>61oDKd3HYonEBO*deY}DL-ZR%6bT~K5rLNO!>`%uN_AHk4w{oF<+rZqyjqHTxVk=2L{>D*|{>j z&-Alkt$6CDcibc0K~my8_4t8Q#c=Oy{3qP4xu2L+B-?w^oUsuT zDwZ)Q-yMnRz1vdz&s@DvKZ{%-IDKPo`b`lchI>P$bD~*Qpy{>RfmHtTZJO1v;8~l% z_>`)Scx~JFtcnM{!(3Mbo6y+i zBazS-^WL@&LjwkGaTEL^7f#i;JR~vm$&yOK!27?qb!G1DO%oqJ!IShH3W_v*xGywy z#0oCBMb-=Epj?zgZRhol@8PdK4CS)$0&=uX3hX}$ganC%&xZvj)iWAe&Z{R*cE)ok z1hr$}_XFLhJT{2O=g9a%+T8K>j~>P-*7-^vfF0pztUJO=oV15cdVR`B5D!1`EHB&LL6^EPMwEHj?jiNF(C$g7aS+dVc3_^Ykh%FKc}Z45b%2WWLyJ$X_}`6iS{DH2g~U#FF$Ds3$L|@=((!&Bf&q0`5xf^f&R#3$kIu7|nm+ypZ)+NV~uU z#r?dZae=)NB4HQBNSUWUES{e&y+p>*E5sa~G#&h5QM2x49_u8A9@X=quUSd8KW=*e=!#Hz%LsRrujoOGu~<>o%@w*F zIgBgqbm{tVwWJ@v<+tBtPslBPU6vo51GFC;Pt;rWoEcjEQ9Qe5J2;m>p~I^5mh5d# zYKUUz&B`eg$7^JAdV@WZ*gDN#E^-hcHsBfY|l4GM1!lVyGZ# zgjq!42K)IWj4+U7NmBveu3*-K5|5W!>6e8h%dPAy{V1PS6s}dFtd(8zzbH`u;fs&- zO~~-SeiQC#2#{W`(4h&k`So(;76%MmswOzRZ(>OKV&?Djk}?ByUb4cshhE7DVw34h z7aWJ3Rms#PB+C$AFt2Zr1De7bvgM0qqVK69Fo}h?M=Of6xVagqN@l|%aQiWOSK&@Q zw?`{|ipt88^x0PLKdLIxc*-u5q}w*0c45nQV8$R_#$&YdwACEX%$0igk>K2-jY})a zLw4q{ES{)P#uJHHcg-8i{7uObS|exGtI>LeFZrdA{?G279SdnZ-;=YSDLRQfj=qeG z4EAQ*mDXnY=ge)5Bpot-8H_ApBHlfmCrrDEkVAW-6<^XC!1DU|x)A?6uUZS+S?cLK z3SS#x#E5dsh2^6UVX8+B9t_&kvCI0rKegjHNOm$?Lfe@so;?`Kye>V+Lh5 zEv;6l>ye(~OmsY1W;eKk^fN`7_@k-#5_DzG;N*KO@TQi94YhW${9+QccSgk=CiioA z^Ty2|y0YiS)|jGztmJ()^s;a$5H1m*+ZU%1905I&<>pDwhfhK-*@0K?DekZ8q}aVQ zVS051blvcGUpTq+$&p@ES$h$s@0(1!77>cgH0+99b!fltmVR4=i7G}@14%DnWS&^4 zWdEpiy>r^BZ)R`+!O%Fhe;o8EZ+HTUy2dA8?f?O(XnhTu60PB-g*~?qG1VgPL>ZZ z&P_e6(n9crnhaPLn*a$vCyg9-OpPI<-7$*P#~jnfq)1zBIj+=6#x7)cv|@F6&Rs<> z1t?&i9QP|8R-T?|sh=+FLyEpQCffI;53ZSc1sDH;i`V~FIbgs)i zn5LRh5sc`Xuf>xP#IqlsZx%R=vglXk3h45_!zzu!J!U1cddb?jAT>WON>J!W{KP^# z18IZ6C4Nj(OX6PWaBhcp^>w?ybvV&ukN>fD_Um}_85!EwzR&E{j(9{<$w$4RN-9=! OMXBlV(|`NEGx|SkCymMg literal 0 HcmV?d00001 diff --git a/website/www/site/static/images/logos/powered-by/Strivr.png b/website/www/site/static/images/logos/powered-by/Strivr.png new file mode 100644 index 0000000000000000000000000000000000000000..cd9c993475648f83f63a088dcdf6701d7d3ff46f GIT binary patch literal 5686 zcmeHLX*ksH*T*oEWrpk_yCVBC8G9(%cVm9m5yeoJB14iP3WE|UGPV#V#xEgKGh>WW z!i-(EQQ0c{n&m%!|Lb~QKG*a7Uq07!U*8ws`@YWk+~<7G=bRUJs-yiyFqaq?3kwU_ z%JRH33kw^9g@rZf7?63AzMlM(g+*xJ>b$8ddh{oWm6dsz@Zg~C_c1o+-+#CgMv?$W zKrGUSLl%y1|M?pJzlzgzN2F+mC39v;Q7v*@I!aix`9+xqvbmXbZU~fQTJNn1%l_v>XXMPR6*ZgcGyq_hcedS>Hg&nfryhgZLph8VG(NXOMX|CVj#tM@U$X(x&@K7D%d6I9yPr4B+f*MG zOLcE~=R4HV=<6Eewj#6@oo7J$C>u+!OLeT>4gM&3Z`){EK)&6izW0fBAyR=Q6Y$|O#O#dOIa$3icCBfHXpgK}x z+_KJ7R9v|&t2_PUD^$pQsL4p{p>)lHR_07k7n?F$eRHV1ub6cSIkJT02tsYdzU(LYxrs}&7z_d@p7?Wav$P{~h z4Y+dP$(H3jRm||-V5BWolfP*urxWU9t;;FG;7%g2+~||etpalkcAmAd*5#pt|K(y_eH&l;TBddX&G7zZPzoyZJol zcU;o!u?QA}Wj^?{VZ;4vLuay3RXM-02}Q4*eJw_c4Qqv*TR4I5XB=C5Q^(RZpX?rr z#`=^-i1BTSVnLO!1ZJ(G3X<-DeI9^9bZgaSq*hybGD~P@t>%9)BhMX2q`FWlYI1rG zp-SEYExB1SZzxOMXp0!HN}GA=OfhMxpX`J0lvpuzfD-b-)I{zZ8LpMWk)m&xQx-^u zQizXWeB#gcYcREXOm6h6RZU%+y&pSztux-zZZEI8R& zIPA>R*aqzMwE=_xeKgRR*9{Q!+en2)=~~MmWJX8rU3-B{9vJQo)R@3^y#|fT>nFxS zco{$y(nSr<4CkuRS1FACIoCMhNvi%H=O1u9RS+~tv6Q;h6#j@8|4r300Qvo;oAGZW z{<<5vovSLDwU02ltxkqLHC@Ef^ln5_dbpOy!yoPmF@Zb+nj(`giEazZdW7G)Xw8iI zHDf|{Bj~h(7D25)zCwkeI}JaR^*zumN{2vPYF~Rh2dLro-e$d0W*6MHiRW1ABp*wv zaRw~f0M$$DjqXJpxpQW4^57%p+>JNgJKpsTR~creFN7mE{7TWdKx4wq62Ka7x+kR} zuuPNN-A`as-k6YSJ7@muu;(??d+R$p5I!;9)fq2MzHqSzvQ6CgTntBkkC(^IKIOrO z;)$Khr}VsLTu)DAbPV4&;)LLD@YsXL?+GxPU#KE{S znq=K8%*HJqDAWXEJ~l?b9ZuiVy#O8QO7{9`1szs>6r*v6b@iL zaDE1V`T|{ve~A_e7XSO)Y+j2-u;!{7m~bq;-aJC3XJ)ZkepxSX%7qFD0jQ}-n)KUs zm~8%)VE!bVNX*B&%f@|iSo=k)l#MTNG9a`NUhu6(Gx$~7+vd!LktJxH`yf&OLGLvm zTD4KDW<4f~y#-L`05v0rP@_Ivr#A5^HC+_yT+N|47!cG1S}UeZ10JS0e#YbuRmJVK zcGU7tKUhuLq{~9^V;!|K<=e~g&v}<`P53zv8z2tg8f|L!1*gsuCQ=X1K%S*PRN7Z# zWLyzndBQ+6`eP|}284GqjTuzA(da(@69{+EGitu{+417I-=}-|B9P+>o2a_{$@FPP z431Z2jV6wu*Zami8Y=5({Qf}@XZncp5j03F%mk$Qqg5VaHdwn`Et7k!cEA_2R%7Z! zs_QWW*upNCg-6VxB8B)da6oJ^{~1DhFVwkwol+O~hbY#3ysJkb%0GY&5W?PAy(Hln z9|+`W)ea6vqTVB}vs9+d#uyVm*X(09Zluz0clA`ibzpo0giPh<6b+1JtT6TUwg508 zbM5hJi!r&o(B&jTJjZ0wcEzv<^f>0V31K&)VtC6XfNLoiwnL1aiB62o$)m&0KEy!9 zpVBz^CkP4jq>$v`NaVit&Z~G=!=5$FWuI6;4fxQsVo#!jjpuml;}Xyv@w=z_789AW zTnIyEfSdDU@7>*+PB7iFl0x93VLt$$YS)2-eCx8@y~)ZEvY9$C4k)k;V8Q>9Kxb@9HD%Izg`y&3wUR~n|+^1y8>U9O5prF-_YFG=v<$)X_mEO8$E z&_er_uO8Uik(42kw5PmyOhMTm{A*dNSA}U>W{2eW8!!29(;q0QR!?J~tUbzM!v|;o z%iqEjQGA|h(Qk|~!d3Ab8iaSUXQ4B6%vOe1_7u?rtHMy=Ftg`1 zR&g!CYzWj>aaJ-W5)N^T9Y^|bvW`Z6SA~k&eIL%uh1pfR8}Z=n@n`5B>yI^`Iar1x z3tzn7(Vn@pY}Y}$uN$ovjWoKpymbkBa9_oJ`jao(=j2f#8bI)g>q#Z9wdUKGbJZ*n zynvcfNmKtsfO1&+CR=fudPv;Go#8(b)*K-C>$Zdl#(rL#0dIXZ&kXFqHuG}uSRnP;Ol0Jr{YUBOUY6VqSIVUlWa+oQ%t4C@M%BnGyeEF&nD7 z0Go$t>xWGOwxlW}Dz?6E8GZUX%Pr+78i;9cGnvKRsI8bMt~#%R;JsHRB9{{u$gB4q zGKVm^ZJZD8Lh#8l@#4{(V#?~27_8^RB6GkZs!zub^tg$pNl4uFdz}Pw6Y0``ms)|~ zHwVr54GCO+Dn_YPeJM8$VO;XJ55!%-$@6rblZXlH9%8nwe?9*IL^9x&Q_@q0$w|cY zN#Yw$DTHc0m`XmYbJ4sTQ?H|S4>-6{ipqzm)Rl)Jttwd2Y()lW0=P!lpAx1|AXib* zmSkqMt9+f~$i)jk2;X}7CXV-PR-aHwcb?SO{4 zT*oCKt#kt6&kQhSX5cg>4DFlu)25y1p_tF4Ano}&FMd~Fyi8L-ouU1cY66igUUZQ? za-22r0$TMEgOjvGxqZu?mKDipIQ^~u5l|!H@=t0H2XsL2+lAQpX5r{E_Q}#NQc^T@ zH@zql39S|Wv(lWD_l6;g*bF=1;V~Outv^mRRJev5C>2wfQ~u}nhOZ|O8v|_P z;d0i?r)@1AQjfetMPlz4FvJkr%#hPsHmJu%xoaqPRuTyJ^rKuaDK&xM4I3RhI!4ImqZr}>dYs(Bs5F*Q zE1X=KyZpEUM(`nO@Lcpu+J<3G^{~mh9d;35d~uY9aAyA+ZKzX*ic*hw^ozL5-mc9h zPVHowBqFv?s9rcK?87`fWbAyKLb2$nj zNFyG9zn1M^mqoc}I&QzwCnUm2h;5?|_v66te%#n(>-CQe1!@VSRuKsA-Qzpm>MNgL zp@2qaXvYL)Xln`zgo7kIwcaMZ9=E(!fCtsXbwdDyWNR^`n(lx+xy~fPxs)H>@F)9yW0Ws?MTE;ssNkq)H zECmc(b7ta`5wT!xq>7PxXnHV`L#Q1Ao12p`vI}ACh$3ce)xDB9w)qSqdP&l0)~a30 z03VW|V5wai5I)9t$X!PTGa3a5l2kGTr0y_J+zoq#RPlbwzzlF1n0t|Sjj-i?j7CKe ze2Gt_#85WBBIX3**Fs$8Z$)zYHkS~@!k+&Z#pCNe?nQ{&!GTvIlfB?4d^iix$9Jy? z{HLaN(iE90jFxRABhV%t0Oc1H(tc5I@nm>QvuYQU6WJ8qhQWqYxP4-^=|vt&**1$C zglZ#pty9aET*WDY&?VhnH%QYI?e=pC{7edcBKl>&4->T)%Dv**{Z2^|aRCB!6%6|= zu|;fAq`2kjG8@4j1cx@3jFQfwth)%O{Q!%Cdu)%A#jaXtRv_io&hZKstEXII`!Qvd z(|&hdWb~*nSJsfc*yM61-nIH6(*3m9pz}tAMB~JI0%tc|Vxpkfa;KxYg=bH@goJil zfE?tVhLh^?wYAGFMdTKu^a+zD-FZDJU!P;0i?$HG65wyrtD2#KS`qEHAffIN!5 zGtyeXS3rib%q(GLbUs`}Jju?usu6tbxOl(~73^w#yY=7awBB9OkrP+WKA%DU+(k>t z$r0p!ekwUR?sWRd&ccRmVTL#~@W-=TOJD!&EsTtZ&f@f<4z@Pm-55+>y-wWq1dLZc zDG~RNBnFp?=y^4vrrk-rMz!A(px;zFc)-0B>8eYB!7#Dnu}I8j-zkrC+CgTD9{l1} zPk;1YA2qFfktNwXBw-x*ninB6U~yaT>(#&4ar%lypsNPR47dRk!ugY^1R>?wr|Dfg z?y#WiiuUCoJcrz{=mU1E%i@Fyn$bc|UKIHVZil!tjUISd&(2?l z=;97Hu$%O+j(T?M`kyqMh;okB*UFxh&}Dq>bk1p-=LNyOIDU2vR244x?BGY>_8;(oQ;Kg~xn{sRU@&l@d_1QUkXYl%-`VG(Z_gWY zm3Ri43#Z-GH#3S%4yA20h(|t+oeSR`z1%M$Z&mFnCr&VSlzL6aJJw~xaKbLSMtBmd zO}4y{A+oJ3e$*`2>!M3r_ONy5oaTNcF2)>%1cipd uzL~iW>T&LI?(4kH>-9RX*You}UTLZ;QJ%kg9s+?-!jvCqLm+3se~Bc? z&VqknDo29gA5vR+HF*f+W6XtptK(1Etd+IZAP_%J2qgG91hNN)g8x7uUP2Jaf+YkZ znGAu@yS}a0x(hxy`$$#k0p#fT@4LplcnIXtE7${hU7ykA3E#KOdMQV%D>^mk%VZ{l zO?ne@Uq2{QeYrspNcI#bE<})1H$m-^1dXHun#ev-rrfsJc=x(f<*dZZ&rbw|7A z)80u*GLex2(I61HE#iB@uRWyh^XjucU^^Ltlq(TaC3u<5>mx7f$aB$9oQwu9#X%m} zMB%0Mwd2Rtb#1wpDNn}Ptf7xJjhsW-j~enD_qI9ytub-Pbqtp13s2TpP9}9WF(HQu3Y7iChIsN&umVut7pb#xaaZhKS zzKyDV)?6Zzli|lNN!fU^ehdlNVVDllcgW>VMuRDUh%bigX#&O;a|IjXcTPqjk3&h> z!lsr)lwkH2+k$Tx&ybyr$5PV-GXz$Tc*0DI2}czd*-l2~^pzoxcIq1SLR{#(?znIf zpUR=>3xx@Vt#pSwx6kEtvD z;iva6`+CRDd8*$CKA8+jkSC^M>ajsO{fTZmlR!xRcSf4mp^%TObEbas#F;rn)cwE}p|^_WWOKPbFKDXQxsQ`+zSXD*47Meb1#~@9ag6 zA16NmVTu9fIG5W)H#=JMdp3LNpVEb{0mVEi#$`v2cCFU$%@7lvp6IXyIJyTZQ5c6o zI-M=9$$yK4ld=s?QKnsnN(oi04gXWFWfm}^J%>NC^4f{$^>psC8RwSM`McnZ7JCvl zUpDA>)%`RhWjPtPY&o7^SL(sreyOWrT=zfS43h!T0~O0;)U|cyX6rK^{yja@+t|9adRwhR@Tt`1sf0qJKyv4bo@*QhjrP%P*vt7>PF4?of1T*1S4CH@ z>S!hyZ~gD|t^*0Z)(cG#E-n*)cb)S8#iC81E{E+;j8Vww!t$(JXI@MfoH}0c$tOcn z_zC1UyGTFMuue&Lw_L9MlwFOy{8Xr?bfuuozaPW;PF0+-?7EhN-JbxNEeLR5H5CZW_&#&GY z&>|HLRJA(vCe$whJf*f%tr7=n-7C*7b(mVe{isxdu(5o<^;DL9X7W#dUVXL^?#rp@^+)2CC$ocx*oX|v zfUCb9JbJZuSIyyHzyO}UsE-3l=o7#PB@!LA$edg&V!Q6~R$r|MTkMRs!`Tur9MgD0| z#Bb9LiEIW+c#c?RyWMl-!{S-#na~ z?&+!L+k%}d+z$%R7WP6+3j90u;Ce;k59U6`Wv@OQY!vkTY2bLW=5vY?^Tglsh4)xAC4pJOrT)IMPwo>PpdxyW1()cN0+o&jI#o{~LH{?*v2@ZOX^*}ompgB?BD^7U684U;Pl6_KPl z5wibM-%>Y!_F-9PrGwd9HoRNNMV*|So=iZvCnSbBeN_1q#Qa*V`u@q|)Br9*SqRPM#`KrElXU#cxmMk~0k791M{Chfy+K7)ezXTV?$le2~I) zg420N)F`}*^Hg=kV-7g>5_J9Mx5bp1oXYgy&H{4%M4Lb!xqnrY%f|ogd8_ z-K*@jb~6%jyvVEMmxFnPEy}lxtmjt9PqV*8+hsH))s)#R>E;I#j)N2-A_h0Ofn9tq zqA+cS^+cKXsT*)dX~OF=1$LKUvYfgQ#qYOLWU{MM@^BxzXP=)s(R=dAHStj?3vK_Q zbI>$_S4l1qBR@I$`tZ^EKxOWeb%upL`yIjkhZ4#y1>0FT9U(lJ*}%F)aprf7Nf`Fi zOUSV{?D~QCSgVG^RtlkAoKTMGe)IS_)u+E}FOUI-%oWa@;BM$opPQS5!M&AR{VDK> z&8;l~=x<1{%InN0eWGeow|q*ahsVYUNh$$>EwM+L2TTFTZtRkW3-Y{dY=g1~MCc)+ zVNCZ!pJ*2Eb)e6FPLWi5xPk58%nfH)&1E-;+zYkuzw6-ej95>F92wc+pUwYM^$$9T zV|HMj0QMj&H-7(Q=axQVj{O#_V*|Vfsql;=yOgFI1#z*&44HYfe{{S4_`TFT*}Twx zNYo)|d{NfoEhE3+Y}w1&kj0>*4@H%*K6B{N$&~!Jmcudy%Sb{_t~B(B+Um?-I&f)lQy2O8z3}s6i*EyI-_g=&BU{{??ty=^mPlz2 zHnv)^l^$K}lG^(JN2n3KluogV6U^H|tTX0w{YAGN6%kTq{+X<+N>%k07= zo4(MWhy#g?4a2CQChIgyy}M+N`fddnXz>DYya-f>ix(U;20<#8S8h1UsSUO)V?X zv{d;!oiA?(8;I(*YE_!;SF%W0vu>)_%H;|^fhutl8(K8pN-Nzm@>mdq>2B}F>=ux=5(ftA? zTwzvP!BLWn3g-Fgr&1a)(Cd}*P+jh1rF_Z(^k91X5cDdRgrh5~FgUJ(Wtm5;cv6+w zEQac|ptV=sbgAz3Za-LH9ll@6s%<7WXO0{xI5x=KBf|NAI$th`n(&S}Si9xr8K8{C zArp0+)*D`$NBib*a>!i;;fX;;Lheyyba7p|vU8w){6 z{f;&Y!e+M;I=tet-|&!z>6N_!J1ed@K9+wrIZEF@qqX9t&@5RmNlV(t!J*48MDcK_ zs6DUn0BJ#eD)}0FJ}xW6ZPkfKR4v|9M33C@gPwa@38Put;!C5R%cl`?jU#_oBpKMH zlNqyn8o!;G-g$~=i#N~A{(p-4p20?MYvu0cX~tPY0`1B&ezsZXymX=|5rKd=mM6l; zTF94n=UUH^{L@s`C&@uik;d-qbB-reQ8`^?LPt>#jp^_&aYnP7uRCUNJ8+}zxWF2{2sW1lCuU{~4gIt>m+ zRpI*H+esM50yG!$@G9wNi*sksrsI~EZ5WCAvtjX^V&~*Ps8Nyf_Prh6oUQKihtbMsnuaN!}(4={lRi zYHjW{kqzIwikxf+gGshAmg1RR%mY0{!lpj3X6!%^l=1)bTs)Z|@FvSBCTqz@l zB22t?4^g>U7{@+?16XqkKVo|T-y}*Fl7l~N$i|DRp*v^SJR;}?)n7USH5@Rq4BlZY z(5W%?sljfjz^)HOORilI+imlDFtq-hINzU6q4y9O?Xi6yx_rGrDEW`!JZt0Hk@8oLiJZ4k&%t4l^WfkBO?Uo9TA%2I7FG)5s015Rx!PD=-P|-3lF~TWEG{a_ zP0=!ooJ$!I!5_ZNKG(JMSt6QVhbdnx%8CKcg;R!sB&2tu#%;UV3<9t2UW7aK#26fY z!(0jbwGQ9PN6R|Ho0B{d*V-Le)pC1n>NC$AUGZVq9K%03#%rkV#a zHys*NhRDfNalg)_=a5dw#hJn`rHrs5p}xA>vQ@FNlwKaWA_7fn9+G(l4SNLSXujXf zI8R--p8wG7tvkOU3kFvdOwa^bm|rv6MfE}KbW)Dntotq%>>O(3&<=%XB$c0DyzEU| znUZaXmr=vEmo@mju#%X<;N7=PfQFqe&V2qP#Rt}A zw@e)Lh0eKCI@O+y2Y1QZ_7x6KNkiMG5;Fof|eVHBq9OAMv*uHJr?1sLxZpZwac& zCb{Lmlg!M}wkj<)C@eKP5mRV+ZfQ`vV58#KUIR!0%tF|98d4gECYX3N!!u&0eL|HW zbP=sC$wmqpE%-o_0^NE}F2S>TDx>vZTKQg1@3d82NdiB&9Kt4{i2aDXsJj_dw~S;- z@T^KX_<&NBdSO+Ymn{;8JEYQE3*0SqclO+_rO8Ok#fkg*`?Io!Ic!_wn-FKLU3IRt zHIFCvBujeP_pSHH1{J3Sm}035({17tQ$~9FL>tR>c_uh+cBwWO8S=5%!&k~Y?*s${ zWSEsdZdYwNs55bC^b!>k`k}MRluO7{$-#ZzbF+zJ2?)4cn_lujqJ4YuTX&Bd;__yv~%GI3P@a ziz490>pTo%T$%jQDQUtFGZF@bS@k{>+BT6atYOx78@>oeH6m|w6ETGSs)ld%s3!8= z>B4a8{n{+ALkA6GWUht%${0Ubl&9Kfi=Mu_;gP3buCe<1-uu5UNOPLPyx&96@ffCu zwasG~d#j8PCyoG4wZ5L`Ry%LdK?c}_9Bn(T1lH~6e5Dmu-ame7a7>D^lufA#OTuN| z9X^w`@usc0KJzI839L#)gMIg0f8I@<+{o9Ne{R4uR3+6MpT2neD)UL65JmIEj~`TM z0>WNscRQ?U$I|^z2Zw4R=0eNgVqh=A{i3&O6RR(_{53%y&fw>jQMx4IjiD(4eG?gA z%0BsJPDgdM%x|S=wJh$v4J3yu2of4qaHlTgrK07>@FQn}`;nAu=XB4u$o zXw^OD&vKMEJH6r(Wj)^ITI?HqA?ly^xJ%J;Vf;WHCPjU5ru{kj4IlA763g_jGLUF912Gh~H5)kzJ>njEO=GPY2$UeWTR-JQ*&kdceSH5Fx zNmo}T_lz|+x>P;QT`k2GvT2*@RL#f~_RA4QugXJ3JyQ?_c7F7+pa2J>EVVHT7f!Z^ z*MwW`w6w=_xhTYc`o>Kc!Fs!b(B(MPkd}&Wo|q^DnnWGneL7?LXRR32EvoDXSi6=Y zLeWcup=S}YjUdXkKl-?%abrj~jhXM>U!HKUi}@f=lNU16z9*-dQ>ybrR80hakf;gS zEXc{R2|qkMoKi)pi^g(wb#;v?XnFShG`29Ip$>RgR<;ty{rV!?m0<{7*sq5o1PeW% zz8oG`?f05KHz-=B3%nyOkBY@8S{$GgkKj5Bq7)_|0exY$gXVtiou{WuPZR7QGW~l@ z_X&z~#4+R*1w~n2PrdWKLU`fDI{XZDi==KD3fHeSAPcXcJY-c;iXa8w*`t z#&6R{w5meIsG@m%g{VTjPBJEu804ZOtcaXUarpAB<*ew^PO>wa&6(ViLTY_?kl$!1 zXI|1qv>w^Q#eY-}706?gGP(Cy&9gtq4Cyk2P2H-9>7Q;#6Y}(n4c(-4LgHQ=7uHq` zhhH#FH|<1jtX(}LLU_s$)|@V6bx&DBk%jz|cz|ceasG<+41)DgQAIwKsw~oV_DdSA zXJKcMJFs_lo*bHR(U(EUDKKv)_>Nf`c6N*zxwtMQClye!g4Tw#2!Z?kFk5)3dE=hS-kML<@x3^H zfPU~>>|5$c*@*JFmYc6LgKk%3s9e1-8kYTeLXfiM@8To1F>3idtNc;8IlC%AHKjtH zoo9$$yuqJ=cs|-Zt|kFV?*l-GCf55o@g0BPy<&7wva+KDLiP?t%Mlw*Y<7W8-t9B; zt~t=$@aA4P()oOgtKhw7QF_4fx+a>hyWeD*E(Q)`20L>vZZ+b}^8d^lketzx@4eakf&ak`%nUMC8G5LM;9B-_jn)QKIXE!>#k)FbF4Breax8sup%=SD!q66T{5a;6RQVZ3l)W;R% zXK#N}sq5BP8hVB5t$XeYiOoK~XpkELk`KN!UI_W=kM!APF;7+5Tm2b8;+3z-y^n@2 z1RX4C3M2(}_zQ()8uC@EK5Bn)-gP?*80OH+2e)fiDZ{snBKmKybJukzR-%F}rCksn zvo8Yrd%YMxLjKzG>KE^D!E|1cx^EG0UPs8jRo4AlIop{O6erX_+qs^CjAo~~@BZM5 z!aUX4;`s^(TM@!5hB|X$a1C2Aq{~BjV&cxU4BF3X582eOj-zQwW#hi?(fCxL$Mvp0 z*~^Z`KTWZO_*8wgZgakgBfjzMZptm2j(l2PeO>Di`;XZyB8%SJUnereE6P(EXYf(@ zCL=0R6Xnsb6Zc_K%A@*vI~?^XX%pk)`^%G)lg&SVFLdZLCRXWUMM7nHF-r=tB4~;0 zb_vEY;&as&ibG9@cEx-JZ*jim?-mrs@{Zif%-GoY8=4?E5fH&6gV}}O9-2r(jg*V3 zS)32CEI_+(+Fd|Qjbo}sE0#HyfdB)$evU^*d~_KC2`Y%*HJ)(NpRQ61$a7u?^IN*s zEzPPoBBH7qo(wC2s_g7NB1g8))4mmHe)!7LRxF>GmnmS&lSv`e$Z=8bv5geeb)UyO2uF09Yeb|@aacQEQ z3dQ`T1o)N?tQo9Z1E90g;o(bf#a+xZvg<-+kJ#Hm!ry|C`Hg2qiaugj7(bW+k-2Pp zV`JZ$(rwC3oftN*Vt?g4=Vk7F3)aHH_g245}L4mtj?_6_4(!x!mRy$eSQ1| zCRj?O46{OSApXz_#;WaAY*c&y{{5CAj)1pgqUM4WEj{%Fs{wa2)JRZ{-Cn15LFJSC zvXAlObwwP~k~{06;JS`6uXCTJmZy3vfFq6C|FcC7-y(s9!&Jn#T0+^$7y$BkE5nx`(&!NQWaKYBzWuN0xf9vB6q$O~qWr_8@sDR@Go3gXQ z_rgQ_O2cdx24>phm9{rGONuwzbM5wm8XFsHJQ64)Mv#XkAv%xxB%)X1JT)8~9Bk`M z4 z-3guxWEz)cK(4LP>b4&hb?^?9S+-E-JErGA-arg1p_Mth<95wLtKbqBk)hpjcx6rt z|1-?4G#!rDnq!0^&`9bvxQ6j20_b0utZb#-;1m!;*;RJb>WEWH_=tQU-+;*KJL(qfi^ zlxlnWoZuO+r+1Q!XhCVGgOnKuzA}rSO7#4roYsKu0atm`D>0{VR`O4L*G{T?@r)yr zy3#jSsNdB8^~k}={>fKOq&W{+>>as@8u91ZSS?7ISi#q*)O%r)g@YL0ev`N&&k$yE zPF{#Zt?#C>$N{w>Yo7?8Lv>2pTWGtoOZfn6+p0J!HFbpoT;De6Lo0b~8`KNC@t5=L z_(4RDc7dux)0K{K%Nu`(xnEB%;-_Lo-JU=Ch+qg~eTbalL7f4KPTRyb%lqFtB80(0 zy`pZDzO`CX*M>omQYZbIWxrzYTc(IQsxfc*Sl35t|$d1| zU7IpdJ4f5L>H+tTMITL(G2e?w2?(3hI_}9RAQfZedV6XKfqKY&LMjxNE>;1W6+RCp zizlQ^OiaYz+g!A6j--l !ikSx>I0UYpHz#51w}CqS}r(T$kc@;RT!c91&qaUMVq zNQC#!u^;$goHwRsf!kgv61(VjO_mS09~lvFiJz)$T^W^o&R}sb*6mueDNllW!bSmullv(F~3 zZ~s7@8PHnP--I>S8-QSEw|b$CiVDI$qmG!W(&afwDs;VaWg#Bj%%A067`R*tg&pcR z+@5Sd5BLJ3PZ2>HVq6^-b#LRLyIdtB54jvJ18l}lG-2-w>F1<30f95`!8OGW+%8Gi z|0&RH9egXHxKpg6CkB9j7mWW1Re9VKx~DA_$2KF^pjp2)g!RY~J8F7@^9jI7(4Zjz zioaD60Z=7Gfi6lzLQ@UHhk2KLMyy=NvN^@d|P{qN+)6DpKHL#WDVE+^?fE2l|uP$RtGs)_h@Y zgI~Tk52+$&EI1B$ru~PTAEw{+}K zCO@d(AQM|Clg9Q0@o&mBunCHf? zVW`}p$A;s&##Uy$wC7(`IP8+r37GEKB90z^Yh}5n&^suau_q)X^!HK+yk^h9RCn>9 zCOtiUo+Iss9b3!cSH{Z0C?#Uo2b1Gp_kyZ&gDkr^S!)jDjSuJ?7Lz))r!Hi0J%R8H zom-3f>h_f6y&R>_ozPqM3#v81Ro;~|^@65(6DwUtY3t1}hdOwv_cmXKuRJPuCF{)W zRDXd^DrSkD(L6qe1LkhW@+b+~PJ%xipokdfczq+WYIr!6>H7C!wfSYXS2%{M$Ez2x zRW+&_7p>l`tMVF7Q_JO;rbF9f$VlwJzNYAS;_U2Pn$x7WfcL26Bsc736>-RIwy41u z4HPsx=x|>-gke>U5^UisdE~QG07}q^6&23<-GP)5D3BFg1!?vTL`IUHFaD6+c=?+h z8!4mpp3AZ;%wrxFJB#bjTPC{8%8-`bd*aUHwLKiLdr(c@TSgf@ z81*+BJ|(0%x;$~FayezME>sS-7CPu?XlNj=D45{`VTuZP7H~U*c%k6XQN@>+5SRy$ zzKpz|;EHw!VOSNb#z^a{t*Y8N5~>sXk!VLzh|nh77v%7{P{yuX8oR>6ZRB%kBX}M*A(9l~sNvT9Eq$-{vWrW3(W_E1ynO5|+ zg-k{f-qI(f-4i0B$SOU~#;MNsCl`B@k9T02N*(Fl&lL^~t@~()qKSJjg?ACMuXEp6 zP*PGZcL=bqErYBh{aW~XNb&hwA~eBF&qys3$JH~fja#*oSp2k60WlkEg;7ThFH|`f z*Dg8ATM|)(%0@AY%<(=a68nRr=#;E7UbnUNMT^X8_GVsD5z1^K6k!Ur|>{QgGus{**s zTw;!$ImIkO{C+sckPDVs87%OOG?2rx#>ti3ymCIMW5;+Gpa)ZPvkn{-!E!hWEz1?rTjv8kNpnA*ol&j zveR(Q1GzzR_3O{XWDjT;?(_Kt`Uh;zsFB?cj{3_n`-DrWKS?6m!h73a>D`~kuBBhx zubU*?et+%CC+~kn@b~kpUk^lv&L_8v+9J4!k~NkY6V^Qe23}-7SvqkLRL=b!GivvR zDU8(8JNeK=jZQG3l(32F4N9oAud*|mlBITqF#Rcj!b;gG@@?SW>twVOGkA+9PoC_k zel*-Spm~;0(wv{=(a4$Q?u|MIX8JlNJnW_-J9Y~e{-}=Md++K6)HgIfe)m=4T~8>a z!u4Rc>(;UcTVb%O-_vx@12b#{Z~09R+}fqfJ`g6)PWPj9hJ*0Ypx%0$=a;N9Qx5#C zt*t?lWdYLUk!Qy8>KTkfL!EV{##IZ7u%Zg?qWABuE*TpHQj}7o9rROz{a!l(UWPR? zy(`~kRls=tM;rumFDrwsK#omuxI+)izpdQt|4^lGsl%-Dv26e$QjrJpiHh<~=PWDD z+5UPEB$YU%Z(-Yhfi}fn5`eq_!V79+bI>$;_UIMe%zc*vUB66ob#ELtM^L5X@sO6t zSi)QaYiE++l$y5ASp{kOw$+wRItvlBaD;mZ8yivWbvJg>6w|X0kC<0$VqP3ad9Q7_ z#a3q#@n)ioA_s&alZlSsSoa3ScM`;raRM{e8!5AlZw8P|nH57|BY**708p!He?e>^ zz23j&R;N9)98K_Uxq;#=MxVL+w{qXcTx6*#@WWtI%bW@CNgr4^)k|t^^&A3A5`2oi zWQnP6+Q+I@R+AADw)4n?QcKSKjY>|2a^YYXd3AzkV#{c%ohb$I3!kfX?bYi|^1WMwXR=bhiLKUpaC|L=!oi+gPsa{by zcMRa%^U4IB{XMjw)I2zZHqce1cs=UoIw3JLZ9F0!A8lHCpe5mc-%1a;Mjr0iRo@PVlwu+|;p(vJJ2rY?aBMUQrdvjyst_CRWK>pD= z!CLhB{**eFz!5Mou_BPvDF|RxO%NGC3m^+5!RVJ&>B3+1%ghfRGpBnm;J4wL>Mb1! zyok(%+0YcHBza-b2Q%H7myCAfY}Y^pG4cDo6c@RtDL>@p%uy03{_K1CcAwzH!OM;L zGXn@m)x@K$?4o=?`1%FE7kuriP=yG`M8W8!PnJ9ukMu@tYmmyn6QW2`_4|Rr1B|wb zjSL?#y>qAV8e^j)S7_>^{;YExReJk#VnQFPvX$(ds!9L^Wm;!C$(QAA)Wh;-;}=DT zRZ2AFpHJ_!6-#6kN0x+0VaBx&>ZgftIkN-4Pa`abbp+xErXGEOgn_zaiu%lq782S_ z4iG=&A%|^VbxQXA@dGN6cr@rseS>?4r%Y%GB(3}bMZ^Vu2<@6~V|s0h^e^+>QZLW} z7Ofaf%C*xc&*ECV(+%2(uh_mnF270s>HTCUnY@DVf%pXh+^R2@5~1e;?lg1L$?TN2 zG`SZWfh-dVWP|%VX=c?=CLv9cuj-07TgMfpLfT(4`N2f96w^+h%OdM9iyL6aW zACN*BQpafClOHe!?>A4+pJxLdM6Pw+jJkSTnvnB#Sso%VC7PG#{e5#p2{qsvjmPpZ z8;TdgP}g9D6;8F0IW`{3PbPn&r+tL24ZPMSs97C-E7|?A(*zRFJzx6_{yxYuD$W8)mEH3mI(1uv|a?9Ss`8 z541L%5zRW<9@6Fu$Vm>InLL##Xth{yn62QO!L8k_b(WlRW}&Xvq-eh8>i*RQ+qsH9TzLeP@ zb>Fhl4&JHt_Mh-M6gaMTcxXNuv$EpXUlK3KUu)A46BlQtCjGpOGcibzSl z&U}E4>6l$h00mG`c8;a=#^8E;s!?VK*B?OFYe_U}DDeidmBzjm>0RW?XhJ;)SjL9r z$FCJioY|>p|9(-$E^n*$#QapWg=2U4hoF0?HdslQw$3!wp*a`q1`(NM$#`Qp_dD~A ztt}aIKDzDO6^l?c_8UlaII^2O zp0baP=pzk=u=!eoEvTd%?}Zphi|Y^gl9`u!62TRTg;>j#8A{kNBc@5l@jdo$`Za2Y zWGxxgk2&8sZernwbOLesS_{8|FWbFpimJb!H^=-P%ltwjac;>6R*`QBarpR>>YaIl zN{)lEiOGsu--ZcZJ!m1y`*uYtK$SH@*>-HbDZ-%%owEa8EFlSuZ{)#u@|ed!XG9tW z#6ZJBczwPn=$UI7C|_HgBes>B&bST#k;>Z`7QvEu^C?uF~N@e0}3*5P03;$4Kmk^;albiSw4yujfWQXv^L> z@0WN^O}8ZNJ8&1nHkKPBJU2dL?UYoP o7+0=%4#%87ffhZkd*~|HkkTP-5URSyT zxfwk@1}q>Qz9kIPxEv>*48@It2y~-BPGZ5}y(ah2`tCXT*jA4|pfRPsQTtT1!M=G) zu3d_E{{JPiAk?PhWq@Z7>JL^r?u zDT3;nAWk;l6H;~?j!D*wpcB5pz#vN1wtA$z7swFS2HT6wCL!x|Fn`iR-r_p1SEQd- z?}@|Gzfxvy`XMGNI1=FUu{w0 zt)E*m@S|L7v)xD^k(`Evwt1&N%!*f=XHgF<)+Gw6jO^`;w<=_`0P(qjT1P3ks$rT7 z=3bKRU}9|S4GzZ>#)_;Idmjg_iUC)A^x)tiJE*IZsG|3>Fnzl6X6={@CaxEV34Nsh`s;pdEaaL%s&mkX zLB4O1n;+{f_Lf_)7WG31QmK)NU&w=4f7P@mUpsq4`dH_X;U~v&HEzQTW|09>ba+%$ z6d~8n?|BOI6SzOx-&*?XXUvw#$49NSNcB9u{-{;2z{ATTDx&Yc)%nwHGEm#KV=^fQ zA&>V~sDK;-XVR4MzS6A`*-|XA=7AFjH6P_sqWBdiMC4TA#k0DjpX+fI3SLih9<)Fp zmOgyOl~3xj$Ib?cg5s9{DihNsfh*_3l_kGVAd=q9M_pD2oCQ1-)^y+DyDUO*(3gz! zF_kqYhEt5ETZF@|u+jOW^H_w7X=%@%ZmyF4=Fb+8UVF3gLkEfO z*NjEAmBF^eV&XZYEGqn60?4n3GPC2(UnPS;5`=edcu)U~Icc7Zl-r;It1KqFvCEMa z5#-;kPH+IJC!q3(X3)jD!0Aqfn0`@Nd95Kj*Y0KnbCyyJ3$se$BB`Y!JB0Cp$O9-O zyhY}K(8;(!$ki`?gMVt-)jwrcV-df>s~&rv=%d?7Ac8!Gy9P8&T{=`oW^$uqb8#~A zR7=Zvg7Tx^JPfpvln+R^4t*-jVSZVOCF2@>+pJ|_#H&8m!591Z)22nU;+Y8$$l}sG zO-RS*>23KAtnmO;}k8QR`c@ax((bY0xfs}VHFUm>f% zvwZ^;_UrM_<5C*$hq8As_5wmtb7P84oP%F9r?eJb|c7?v4E!nt+zJTD(*jgK&s87I(cJWwkL`z=U>}E`8Av?+OJ}aU; zKBJ*ds%(>gCDMy@{g2tpFl|Dyg8ho5F`$-iQfCZ>cdA3lAi?6k?HojgXFzdNc^8D- z-x7F$lz(@d{^m4`4_DbkWjE_j3#X2hS}`<*k9KYz{BqK|;$$2%Z_a(?7UMy3?oEfN z*-vyPj=lUhyM8vIRi)eRvyZqiyZ8*9gF#cbXKrERRsY4b-5cu%{z`6y9s!Tkz-5XeejD+OhULL9bpq%Qp z1NC{l*PY+rCq!VCSI)mU12SE4kb}t}dTykkO^)dTUIh4JlZ&DPu>FOW5GqH{b@<-O z6hD$6ypZgbINJOXuN5d#&w^b`=o^HdU#+2+TC|OGhP=zD5O@mHCGqp~V`03;EWu9} zGQkm`F(9nA^Y@tJ&`%{e?rRN4!?n~cET%43mcR7`J%|E|*LFNfo&E!EQw|vv9XGeP zw`K1~lukvvZ8=K@p|Wq1-b3zti%~q(X)E3|S=xA0R-y}7u~1nf?1iryNU1Csk)@Bt z%k|2Ukq7pb)f=D$LPFcyx*VB~`!$8sci-Ma>3 zZ^3o&jITqu;$Ts@3FO97s-rxiJ2D@i^k&Xh{<$v0OZHh&Op4|}79hNA~n>-gvA>+AGjT8$8<@u;I39g`Ikv(gC5R5o*&oBQmp!6 zYHtD1>*J3JJl) zfuLuB&7^pJ6+DG8G?4nBq$Mgp5-_p>De(olxAWh4%Xj9N#X28vkyjzoGN+|17`8NA zns>b0^I+cAW{N{g+!MWbP!}lVnE!OXizKi6Kt^Li(&Nee+ z-Z?u>TPe>(sDfSOH7|SA8dni6t8LK8G$uj!`@6RTfHXCdB|3eq>|ZeUBSE@r+@tXC$Sv=)c;R*O}v`LKZK`P1Z?~f7)CwKJ!`E_~9X+Y3<)0_@Dr8 zmZA)epxQM}1-z@p8|{FK6^ASz9`Q3mLSueZu5B4RAZ&E|$*tR>C(5Juyji>Q^JOuq z;$<<34MYJd_dXCg*Sh+{(1X2MciAh0Jt<|6=&U%-sqV(eWaR!D5AKtZ3u)tjuqbSH%`W zH>vtSmjaD4kuwYu}hwYtwTjn%0=4*QRVfl7{cmqr&8=zJJ}M?T%L{l>WR5Vm~u8QkIzS%829eguzCH)DY76ZU03c?4f&!WB6iG_~7fdc%J%AR_lh1UKw zQqg9k7(5f3^7hP1NlTHxvqI70>W74StX=Koe$81+z%kNpCxM0mDZdX1LH)VqOJ^BG zdA77iVB&$hSvEmQjdgWqs>lnoUpMZZ=# z9bNeEaBn97B*x_tlmT6!@nJh-Va3{B`Z2KhZ9+C)%Z>N;gs0AJNc-ZLKGk@vm79fj zR6vUO*ZEajj$>@qm-WD)*ZUr^YyMpc;U4-j)1ZVUOW?VM6(S||(~HTw#kcJwbxR|l z2_EBE^&iY>kw4DFPEAG`aiTNo36o;iaslJ7ey*Md-($P6wpH3-dg-`_W6fgDr}#6c zA`Xo+Rs(FWyxEPB)fgnFjqZ4tqiC!Jujvc~!#lyo8HC-AQ7nFRFv< zXW61i_;qH!*~pUno$*_YfS>FCK&#S@r8Hb2f{s6_k~=iHq+=u?`xP1|Y3<==UK(Q% zc?=+m5FEpV_wVrEY@X{*LmOj{Z1mF62(>gxF~oN93BC3ku(=SxQuywH_S}A54Sr+5 zeYch~{K&2LUnz~yzHmT15@jB&Yk)`Z<(Eq7I-@vm=1mcU_s_xmDt)5y9Gn=(QIIIw zyN_m}g9_k!TC2&_TCa{0|WcD@MVQkYb)Qve(0f(H8RcdEgP5 zVZ2h1=lr88FJ|5f4enHCjtb8ZNvS$Tq8V+#(&#hdAOi%H33G>40Cq7Wql;VX%|2Ml z^Vz`^H~m%2OUXsXprMcGZYqrji2UmM?P1OkR%eI?bXMtec}H+B>B;)X?Z~7j3cGY| ztM`#C-#?h88Sv2|x?ww40iFBa$gI*?kV!Y2oZDW0Ei zZv-#brZyFD!erAW+#acMsc>ne-!;Y}IB3gxMAUS0j|N{wDiKBBs$d07Bpcn5Hhzj0 zL6uRCmK!b%`ys+B%O5jUn`0rS7DE|Re7H|<->Q|}bus`6f~U&-!gzmgq+r*;njYc% z!P@xSc<|c_I^>uZ=+9h8(DOy4~m<{@31bdb))O|$CphD>jm^t*G zl3D`bPCq)Ck^ECl^)Y`ZXkd+eV{`K$FWZi>x`-b1oqC=~ZoW4aIQoh)$9(wSc-#ib z5Ie+3nq%ih1h&S!uu$GTQ*h5Tq4p(gc1m>4u-Y{vw>5OyIBI6f)75uJRgPaf&T*+2 zqrekRt@LTLJfeOymk5t-WlBIf9WfTTKIf%0Q;|-`2TkZBhmx3Fp^AsM56_3N#XC5I z67v9gH#F0@E~+%C?~71zdsQ2tnUY1u!vg%ThoAC+@M(2 zo(4UT8|l{exeG&@6#d-bmGR+G>pMPMNGW2ad`{`n=Ko$+snnH~RT~Wznx=E&@KVDU{}vr+R9JRlk!zIR)P>Hf?&kTL<{)d^Fc0ZoQS! zORT&V>b~_`y?zo7$hJ+!l0Q$Vj)PhB(qthnSkr~>9Avrq`f4h8cB-Q)4_-5Z>`ehX z$GA=3%yzRRBybV5NRv4W9x*)wXU$rZCT@3Z|&Z$mb?~#^BB#> z3DpT`^RLk9l7U}*!B!R+Z)J4Zpu~7HhiAnw$Q0YQuK5v$ylV$fQ4<%dSO?GtqNLRj z)FUn90aFyO>&2&aIv=(DVs^Ox(Sidr(JU+BRYq4DaUW{Ndvme3u{nBr=O#)u;1#HK z&_QB-2AFPLqQ3FpEL_|964O-B*0_{2@gCqEcGuh3}@P-Itl|6g%u8V+S2_Wwyl z`(Co83`K<+TZ9(dWnWWdyQ^VrBVjBJr8^a6xvVonG4?$qgiuLHmLbcaBpD0|Q^|Nv z?)yIe$N$Ci`thcY88g>4bNznjbuORp7qezVzjTcm*`P|NGGkpt;A$WC(p}7~MEFR& zI3vX&iU(H(+siFiatmBrH@s!;`C8XY<-Pmj*$Si-Mn}hqnmxg|6 z+)CB1S}cQ7Q^sPbZyM71yS!&SQfR_YrTZB{4A7EHNNvz2YK`7VEh{TKD0n*xbK^*P zV4z;pkd0a6WTg?IvZ@cy`Fq2SSV%mL19~JuMLFkbARMrwtdF!L;V%tpAmv)Xe#O3+ zOdedmh`EsjJi3(WkRD5BH6&ExwumUZw zU;lCMVL>v?*{}15^i(d6q8t43`u%=SO3y^bC^&yt<^m1&l#!Bj)b(>@(N7U%Wh(2ym) zvpa4Q_x+;o^P?a8^=YLXU8qb=2@TjdRg(NcB5k3Yy0=ike!A7U0h<}4J zhQ(r2dSf<)*$#Gf%{An;De?t7o;d0bS!=B%Btbs{aaS~Piv<1<(v>8QB#4CP9r%5a z-_?|dPFy`L?=vM&qo0*(EIWF%XSP`lEd4$lTJYXqZgHi@1TqXe54JW|Sk82f2fOi2 zxUfn|U!T^SCAtZV19_#IBCH7q!QHKEXIlr_D=4&ai=amt6Z9{gpzoEIa^nM8bUG-O zn`$+VzmyCP%hBH}1H(!LA7FtpMcci^sH z)_CJxolYUEZ)#|)m;Ix^OH+mLt%>a7=EkUjH@K89xX$9k6Bw2~0c8vUc$RMkUALQN zD4_mo(^YO`2x|WvD2y2S>7M-NXaCwNVmh6k@Lq6`9uHy?TXyW(xEdU@tR-Sz)}aOlunreb3bHn=lzy^?{Mg|`>Edm zS`2p|o(z`Dh4`=NRXr`T9Ra8WjEQyl@7-`QfcWf?pGId-HB$T{ps?^fR5=G zRw33a=q%Ta!A@!A;?PcKBdNo+b}LHZP=R%wj?y3KhxTV2?+-eYvC-n;#PN#hY{520 z_`L9%F9s;q2vlO3v~$|Cs>U5&xI~eVPo{_>QmF!yTtx*1HPs-_9}D9Anw=8*e4v{h z{K_&b!!uhBuxeU|_3(^tA`JULt5Znn9m^f|kVxpI0Hk&@L8!8a5wxGX?%bJer|1mE zk|i<`TLvIwGL`@p>oFF`2T`H)il7UTIU^gd;HrE-)!(TWGg%1c*$8aAU1TYpFT{Q$ zqEa}nRbji-*7N$l;CDe`(QypP5_XhsC8H%da!$j{taB$(AxADW)!?>hf~j=;&^W7# z+AD|fx?IuTou3vk1q#jQiOe@v$u=Z7%O5S1%@!U?JjfJl5F|?Fu$ii9AjxtEUivb` zqiE0y`i&bTTV4hLbPIu6gX0T^3ci7{m0y3CMRY%kt;h;ZyrbuD+F>i^yD3q(@B4h) z)A^;;DDL6Pf%cPDM=Z-NMvEyn>9^h(+BD}k|d%cDWfYfsH!PE45X(rg;eSl29`FQPBts-WcUme+lwS=pak+ux$h zv*N}zNQHT^W#`)++zt>zD6Jl(*ygKu94^F_zHQl4u^5J)-`wq8$0+BY^d&mrqit2) ziUoG+us9zf^wj`Cs3Y=bC%Q1UA6?mOw6?QLsh1UsEYNT+gk1|I02C`k_2)4f@*AD!sd6QI8A~65I1`um$h+?HtJzjz%|%Be_55dyprFFD zMOA$W=j!LTdUocEd2B@4#&rHcsn=8{44e#j*1R2w?qOUU^P0N#E0`i)7AOLdO8^;4 z)53jkQVLHg2;Q6CpB<_{ch5WZuD{(lM6CDG-D^c7 z*6q}&J+Py&ay)UV#$0L?A8Pf)SjlY1#9qs85{mt0A}<6zjRql$Wv_x$-vClQOCQP* z7i5O6V9E72=gXk$idMCGuoIj%9f_Ya0)eZ~=6Q?#G3Gh+n{eF{kDcG2&E!{Je8i=$ z54rN~iB1QskoWg!z-jhZ*x3s6ntP|2t+D^89L(Yv;k%6vV-cCe-f`5kllL2oWqa}P zF2fTgd$R{a6Oyptm@sOe9KfnbM_Dwhr{kj3-n-ai*j+?s0rc&Y5Pn&6_y+F-FBkXf z@goQ34`^n%cnH5Y@0c$W@B$TL(2ejL!LPmZgEJWNm%EACky0KP#-vtCx!Do%hckWL ztB!oBrkF3Kw^bDlIt#b2Z)$lzIX?c&Z16p(1p)?#y>eo^W{ZC3Q<9Em_wD$$Y(T#v z7Cyp*_erIxr52wzl2=d=vEy;MoT_zfrP+99o6ym5$<$(#cPQt0_{ehJ>)Ts{ALCP8 zuo%1P@b~MQB1G!ME=czDGQTOC;FRNy4_IW7yJgIa2m-y5&OGT$jVAjrvr>G)VOUku zw&SuT%TUS$&H4wzxDFEj`vLj8d$)+h>82-& zR~zMRFE4ht0X?b%DB<$E3G}t7G>v+q`dNV&+vq-a_{dj1vztH zf=iFUL|Y3g%C`m9*w{&>_#e*SMfA949vTWg+2a-aE^Xm_V?0M^ch^mj#N<{_m^N*R zi+H8HEil1-Tu56J^Y+@m+!u?$k!mK?1tS945N!t*e2d}(Np?WK^yT4cSZRI z=_{i4Q;&><8rjs|*fO61xP18~xX6p$DtXdzll*0^rwp`7HTvRR2@8~3OEW(bvK)V{ z>EUcmS~dpb7l&;U!l|T%+uBcCLHhjwh}PZn=i68{DU>D3`PxAl(c^bSs#?pA9Ocb*ZZMJMmAO!KGs)WIIIQwSR$vir?A{xzbXFiisFBwj z;FmAsufSm7K~-CB_T>!Kns;hzdG%~t3fLJc=FbSuDSm6~3LUgG7&Q z_vJ|b^^TMQltLRUtO3S^<5`~YL(8;t2P*3XOr~&E8jPKe@fff_+Vd& zOSbvDc`^Xfst$NJ)HdtRi1?bHY*sl!(O)K?pFKx!r)QfLe0mb2sBQCyrrjKf_cfDcEYY2MdOkU@Fxd@CEEV<9f(ixNk-$^}Hi|+7@ z1IWhYtFL>aSarpcG>da$60`OzdCWDT{o|~$O*N0k!YK^MGeoWlEr$v;@-0}h_@Qcb zXK|Lz?{-czmO|tVNCkGdh%+Jlq&>7tvJp93mpJ2c>vs=-oD20}P+A*;dlJ3| zYi30a0L;lUvAWZ^k`ZxL+lP%hTr<-)`6EZ>nzlBPsSrEG-g;*(H8Uy;$2~x6dv%W* z!<;bU#kKkI;3kQX1rPRCJuBm>Mi=~j8ss7>a8Mq3B`hRb^Vfu?@pzEq<7@@4e>VzEsSfin^1GaXB1UPh5~VP~HdIigs{ zo{6tWR<_GjuTr==eQ2mQiL!Lp`AYS3MU%KWkfI(!P%)solDcMHerMI{pjXQ4Y(C~P z^}t0J_3GpaXqtQG5F}e+Cw;8nPPl}l5jk<^IrlZX|3i0NgEp!a{A#QtLtiiU)``1#}k&kxWAC3BIz@bu%g0q@fQVilA0$jMq(H z%%Uq!rP)F%u?EDjytu_w+FiN&#GU)*4~}PS#5esJaQO2|bHm*B)^C;fhd;~XN$v-x z7vow6)gPo(>LbT}mC^2sWgLls@Qh`$&M(ALb{N!)c&gApe3%IvOAZJhb?A2Jpdqc0R_x%0OwK2+G`h+t&YV9( zVQ88qo=>3;jj&YF2uRZ}W+5F=-&H$61yVa9T7N5cr9B~0vRf=28wr5wx zg9KLgr#|SgdCqKmMWCwbav7uGK^H?4zB}F-m>+Vhb1qq?k$VrZ}0zi^8AraEQDzqj)KmQmw`5YuH*X_-=k<)KV zeJE?!agG&8gaSVwqTKPQFGKcV7_qO+wfAXUNJt2tlp;#_JRAWkebUwz*BIJMhUJ!; zm(tOtrX2^7uwc={JZFkL>VkR1Eqvz;jzO_SGI%J+&+p3~o?ASJbxS!TUl=(jh?Ij4 zF>F;aK5McSda(d(9l-niO(TPY6TvIwZltdNyH$ER9He&Edu41N8RK^M@N1eRWDvev zw3V(uvu)P^`8tWn7yS5uL8r-WcB3ULIw+uk(pxXerwjKp@#>}%l2{D!*B^J?jS-s~ z4sCubZ^l+6nD`Zo@%lZ5gXKTYz+ zm*J1S^Fuk4tTup7w4ts&_h2TMSeA^rQN03(z$AG;q5_~)(uyZZIoQqhZ|1N2j8>N6 z??1{cl`!GV{QN$$AEYcSXRED6cv^auk$efssr6oQ6Z$MpE>KL3eHGNAR>Q0ybnb6j zCd{5itX4+Vf2-E&VBV`OhoLn^Q7I?vzC!c5d)mpAA5~G-o*1psyXx~*%QiyB$h65D=4y{-5*448EC{2{l7P8+vuo%!eT>r9ntc^~U*Hq|| zXSyh#Yb4d2zFxns>eq|QUl!E+vWT7?TZ)t#LXhe0&rf@R}31e0ZSuwf%p{4s!e_zOXl6CvHF4;=lMj;qwAmguhjJaUcMz0)Aa;ep2h84QtoeB7HMD=x#a5< z6z6xe|dy? z(xgaDEd4iF>#*fpKRhV-`)ly_b};JOP!oS;Yhfd?+$p7o%qOt=>%N*~3C!4~qB2_b z;OFw>xDGI*pl82-ACnQhU7p-+)}go9wB(6!GHNIOJD(uSl=UyWjL<6+f0kE$MXS34 zn;PV{n?GFfmAu@2(Ny`|(~iVHU+>Q<5{6cf^0=VOk?TXx$tAXUlUmllHluP|&C?h3 z0@DLi!1u8GyWpyi@cMroup^nr*Z6`HF}e`Y5JX}e;t9wie~ob3ljYy&Gw=MS#Rm*D z=HA$3oBij$E6jrB_I?zeFn`JbQ_%sJ;OIxa zr*}sUVMVU|bY0~7Kz6KHur6_@K4NN1mx1M^#IZhY_a0`(4#yL ey}*rlg@E__-(N6Vc=*=J2SzjvH44t!2mcSk^>If4 literal 0 HcmV?d00001 diff --git a/website/www/site/static/images/logos/powered-by/Twitter.png b/website/www/site/static/images/logos/powered-by/Twitter.png new file mode 100644 index 0000000000000000000000000000000000000000..4ca58962cfebf770533a74fd9782aa09926cadb0 GIT binary patch literal 9561 zcmb7q_ct6))IL#`C`%-QutM|}y{_IxCq#)5y?3!zCsyyhtX`vAg6N|6-g{@s>Mh~* z`F_v)2fTCU+~>|S&z*bE{4nSKFk$Md@&wN*o};0m5hyCiXriHELeS7Ke&Ap{F&A59 z5>LRRuBtMr$|9xY)$+hKiMvO?)ASB{M8bLSuSdpXo-JCkZ`01ekKTx&IaJvno zXvS5v4(N?*1@q{Li@&=Afk{{by1(vk`j-Mj5#b5xc z^&)GA{ORVbb?9#CEMF|#MciZ=X*1Ay!wI7G=Ik_tV(}GK$r^GrMV88oPaAC9I?xo(hH3jMblJdDm;l__n-++KY;#~PM;Ld09bY7(G$ts=CVlMP*jL63=&XvzB2Znx*n8| zV()zilv|%bW3j97760%r{mA5k6hB8|`VE@^i0{iBRWu}LK>c`Yu)UjqiVImmp^L#= zwE*KZ65!`@InEYDeRyMKq#LC3;my^bX6ZJ3_q%y3(XU6VEBlDh+~v?*edV+(NwIo~R?|4o+H~7T{o7RDd{Rd9^d(&By*$g%PopQ2u@mHPF z9$eH!S|3Kz!aka*JkI}my)9?*HazbcR2G<3q8W5yYi3LVjorK z=imvkiS=hwLH7y-LvGYDB88%7n4?bXjR7ylK&{Yvld!AOY8JKa-(aRwux{;KD9T8a z;ICBmhp#Yu8fG$&@O+VNPmQK4cmPYAm8$*rD@*k9z7CUx;P0tf(K<$dp6rrDs@2*9EGwJLa$PtnQ^Ts`TGS!<`TE>A9O)gbQVBW93jS++NK; z8)`7M9a0YpJw8d3b(+2phjW3svkW(9zwLR`Sn9`7b)I{75}X^@lFR%#)5m;I5J(%i z_liQKJX`~`m$&I-KHg0;QC7}Tr+Qu{Q)0p{_p!F!wyCXpEW!r|q|k1AD|gt;9ZEsK zH}Fuo6F9FD7W;d2ry0jPV(EoA?~8Lxwa;gBwyN;BQM1-PDz}B6*5cHwEM%;cXtQsVp#R0ULUb15AZ(@tf`BK6|PPOX=5+yyT@4( zMeP_7g&0`bN;dXyE$gFJf*S)4xq~CXqbh|!to3lD*6VS4jqL@{J2OR^5{@|qllVO| zt~>x``i-czj*7C}#X5G`kh_Ln)y_yL$j*S^qsDDRNcsaAc*U+HT+aQrpo&HQ^!2;o z*!J9((j#KRu^9@RygFUma;pVEo6{6#uw_csUt6vbdvWnN&8Z2PBaRpXQ6xdO9Bb%taKIdAm(qbcj z_6^w<8>7s*easDK!OouR2VPJu#+`!ngqIu|Muc1I9SO;H?e2>_9=M_)4WsD1$UdDU z+k4ZGemxdfFPppR4>c0q%$l{3RhhpD%j&Wg#68~2YfMheWC0U`F1;tnJ%E)jeP~>T ztJ5yqXJz$A;8MDB(l|cup}fOadd_EmbiTb>Ubko`TMW|CyKEBqwcKK6hjb2#_QIF0 zUqkGkw^z7Sab3q?OAV5Aq7OwnGkdJE2?nJ_iLMf0Kb|3eHEJQ_Grj3&30B;J^D<-- zioa|Jb(L2i*7hq!BnC+&+69n?Gk0&kOMLrG%^c%51YaDr6W|4@(ec#CQ!&Rt-G3Gr zcxz>OH*h6CA5?&}ux`Pz@9eWstt5!XW!jz7O0^k}gXuYsY@OTQH~NMSac6=*!L_|M zNQbDsm9p=`bu5~owKIg!f?l1yI^$Rzj5LK!6Ew#UQpry4The;nsUs{_^FK`qS6~~wI3~j_V8(bb# zHmhda!i%1tS?SLlYuF9>Qg5d2t~|jAY#0YUc*?r(?hv`Y^Qjd-T$H=fbSR$(SM4ylz9z{1V|BlD zjXc&b9ICral$-RD*}d#NYf>oqC6squ^V(ANaa-t%Y4nLdZroBG^A%1`;fdx!;ZUPl*`^#QgZau%)`whI0hmk_ z4o(*8y+l(j-J;w%tThzLx3Cyvm`>2Kc0HII7Kt`~ zVn1^tUQfuxZ=9VC2SHNfy~O#?*8$LPjhWi`GU#MB#e;t+73d%)KSsL0VduM%jkz*o7Z zNBf^7JaqdM?F{hJV3-d7%#1E(0xr};YCs+YrWxSr1rEX4dr1HM#!{0wo40~HSwuud z;&$b1*Gz!70`XVz(%wcY!B36zZil@bD>Zt-p85rJphgF!t}Hv%IyPsYu9mw^Ixf5Q zTNC<33aVOAeG$v+xZcQZ=f3s}pN@*oDX;Kh>+qr#l~Fo?+SjxBW5~un=0qon{*J8+ z-y;U6oCgZ&@h-tU#u^kK#K;F>gDRiLQi40p+kP@DpA)?S3(3n-8T)O~YHtQH3PE~3 zJsj13K{$Fp(Rg;fw_NyDSv`j@(q2vkKJ^oI{qB>}f(aOV_IXBLWQ6Jsz8OGmm#7Z= zmqBMgA)L?R(=qRoF$1foi#+a=xARWWQ-_>hgr&)jQ|sbzs{Ya2B7Mo%0|y`eo$!RI zNtu&`1pFcCPN~u(O6BW&QllTOx!W!zEaq^|lZzECmwF*#X*U$xq>ipj{Rv0x&8Hi$ ze*{6_?ftwQ&JaJ0JArGfdX>pTpW_29OLu28y633aRY!^V8e3+qFG~SSq9f^eW>{m* zU)mU#LkLGh-kxAac8lWn`bT}s4yrz}EN zbzIxIGX_^K#MlW9M&e;oDiv~%9ofjN71b_dBAIqlso&3$_u>`lsbn&=(?-bQ%=725 zQzuBZTYTsy382}Jj3;N+vW)!PY^tpE6*1S;?(>oB97nAo@9?y!=h(Qpmn$qw%oh4c z{1;sS`xu=aZ|itck)U8_x8dY=!H0CBIk$CirEKKL$ct*h>ZGIH&Aie|UGOq~+m#Ld`m+KtWREB+3JAYo`i zB%&8hoNAa+#h#jsUQWQ>Q%mB37x6y<(knrb%mWY1$i}$OnjroMgk#Zp?vnAf%oPvM zr$vD&J;R6HR1XpS^Tx>wx?ZYP7P$5M7lS4sR0$&Q{;;O|durc{QYF?sI+iGo;VqSO ztcmr662H2V){3v7@pMp&g)Y%6ep8V6%bo;qulLYPq8nq7IB)*5!HV(@!1+z?ZFb>u za~cPNqnR^?I**usXI8B!!g=C59-@XDM<$!?DK+L8<5A(cq8wpV*?EgDElL!r=4Km zVd*5)P_RbDQ?0N{$c=zlSLfGWn1Pe;emdhl(cdjff{+a6ihtvLYMX#rtu2cuSv7?0 zAguJi^1606D9^Q&U#}SbSHuF?Y*_8OS>_$}?8RSoejvH4^0wA`NG;^ko6kLpPi>Kd zLI&*2kDjrasEmuKq z4V+l;T;%>guPw^EdF=M-x*P@LOE*5J9t;42Dq^ z>Fl{@0wIE60s#xLZ_8{xmVb<#vO)rw{F@!5GJNYEXQYU6h}2kc7NZ@<7NqiQEy3{e z0_*&#!_zhPIhQOkR_n!mJFm8|)w@q$HW3>TW@ikZ|^!gaIrk!`jmy*UU;K=)7QKFH&Z7sm@bC? zG>PBm&Z(?y>F_sWOxSQ$LuN5#&3D5o;%1wxzA|{}WKaID;4-Z^q$SP$;^=y^P8xGF zL0-g!{X@0~APKyg(3kgV;uL&r!HPF9uw~X-UZ|Gh$1d)u8?Vd``{(KWuK|7WE=o=7vv7qUt(}o}fYynWW3v{$W5_UV`P(dqH)r^^cd? zl!ohp@wEwkSR5tHV8~w-nUWIaSpFdXOubv#u)2DLr7~G!9m1QiF(-Z5*5C;$e=5Km z3ELbsXtLB;kwK)-}u2B;2GRiW|nXF+h8 zFnIYlBa*>S97ge%rA6)ujLe{^V4)sfBGC?V!<~1szY#A){l$xEOu_|9iA4QH+PA|L zzVD;A3@C)JY0`M1S#rm3#_;abzuU|27}GPO>sc!b7;^7OoQdV^X)jtP5%qufLpN1x z`S2w|HyzItukc_iCKnVov22Rtfm^oJsZ_cWr%*yl82nz8-=AO;k}-oQ4x zzggs1=(vKC8t39ID;xc7^hO}so9I(*XElJ5gqcivS8&l%NL-AlgpZtFSf9%M;t|LB zOSI^E;k~#`p&_4xDM$2mg&$St3WuB4Lt>GC`>?b-ZAae~dMc*499+hwMdJyM+m&(@ zdafj%2_Gqpb(41l@cY>v*=C>VH?)xIeD7FUWS=Rd{PN>N3OF$hIMPu}!7-DxC9Y^a zbgQp)Kz}fpE!q*4{J}lxhBV9^=#;;s`I#PV-j#Kx5Y2e3LYood&x2~#_Uj%8qV{yk zJ}{#-9_=sy&|Hni+Fd7cGlRa=%T^HHESX#4qwOxA$d=9o@SuJ)F#SaHbGXiPYLi`cNwnqmp{?*%7w!0qw)vJIA=lf8=ev=lK9X;QuQ8`t3^JJFpfcuU80enLW6z(2Ibw` zZ4iOo*h(Vw4+1jzxMGq(I1B2plenaZ!<+Zpe`3&Ww`aw2NspWJ*Yx2NWPF9EU1MOD zheOPU#gQwDs!|$6SmRZ{Wz_rE9&L_kO>l6746d{4!VmF6N?6VXRdA%;k|4{2{^ZSr zV2UG7DTA~=QH#`^isAekLXwy4_Gt{kTs98pI9|rl@Fj2&u1R#>5?H|K+gm-8H2HCi z(+1T*kizVl^@nkc1cdkEi;E7r;)=u^fJ>`gJ*Ftp4<~o2C)FW`8JSrK`V-8r26`v` zAPEHF%ryz0{#kLB3cYCf=$c9N;??!EAJtRHeoNAM$g^jB*VB*QNQv}ac(bz<&diF| zoE}VhMbtgZy2Xrf*0it&e(ApexucS!|L)fgs&NGl^G@wa>r0!cHvai>tBZaSU>|>#^ijVY;^qFT8S^uLIYZavJK5 z%KFuVC|DDU#KjV#)i*&XPW?6Vu>*XUS(L?=x6} z$*xmK(q5%+APU!w$bxUy-lz2J;G^2t@bJq-a|%JZix|b97L=?nk?5=jTo%yvgjVQ~ zJtby0+94N_LA2>pn-jfT@vc5JNq)0TZE)Cb*=8>Bp|!DK8ovl9+x4m=1^o9xHJ-`?g}TRL zVE5pZ^$Znx{der;hKS3RKXVr=ou~&9goAT!m@kOA>r6`9t_VuXSjowJGcFLf$CScR zEjZ!NnZ{9I@WdjJfxyo%Ae9YXY;eLb@SN*Wo(L)9A*UlDwbxf&1O*}W9nG1xIeglC zKd@gbqk45%U`jZySZT2N?LaJ=t2c4-?ASV?i0oRUKbMDg zjJ)CRw9nsu4e31@IL|OS5hR>v7eK29F~F9APf9(RO*Yr3`d8D@Pody#6Q^~=Z9d9uxTF}J2g8juJk#y;X4DBZuo zpOqAriv?TE_tuG#;$(puEy6g-x7ax~+SMj7j+%KN^7jMUm8VHGFZ{#2A`{#{ z4al1YW`b_a^Wc+(7;CCgiZ9YkAR;7}1X&{Ma-zRCnAna-Wd4aIxZluTidJa>DDCWz zKPHe<+F{)uks(rCr_73kk9XdxSd1HuswSi@+P&{vFvDIN8dxwZ;y&KdGYbxIid<&9 zR8Mh=6l(ctOgt8)*YZ>1%Ee>VVESS*wGWDC7{0sLDl4f20gXZbEEOm@H=n{GtY5( ziJDnJgo{x|>OfGR8|3IRS4F9E8gtQH`iJB+Bktg!+fG?l@~(-$>v;+I8%|YN z-!Js1d`PD!Y)AIigx!~El-E_s?4RK6tTeg68V{4GcGIOk4c&8V7pmY`rUKZ{$RH+B zL^TtI>_Kjj>;$J0@D-(mO8$kqgr`u+46}&Uk~_7MgXP98Huw&2p7j=Af{Ok zVl9oNm1>Oo5@gYfDqoZHzT)iZc7w2rK~p>3AUt2c|E>G>UXb9K#!HEsZXBAI^N%)s z6=5@0;u9AHkUh4|kn1=4IlnSvXkKr)rSP!$hIendrQFs{ycq%apKx=BKu-xodxd5ruT%3Knyg&9z)IDIWm%i^BhoMM7Dc72Kf*1~+a zXQi&3fTrwi@uJ8mTXx?8kSj+Um)f4C1rixlL#mtDX~dHPmg-L!K4}uOXO1}OHaoqQT zVt$P%ZVwct*6~~r@NGET*4oj4bzjR_=3H7sL%=V|3n54Xbvo+6T`h{3-N5Xp7KU`OPNiE1||KK6KW z+hCa-9Y-@I~J`3k&$s+qxSgvlzWOm0f@I14><5J_08A z9CRnxXhpvp#pJ^#2jDrEOB`{VS;ZRA?oHwA{My` z67!ckxk0mf_dUS22AXI^2w*FSSEsU4kLk2p>2=nhM90NS;iE=ZTaQZ!q&3j!+vw0;?jNhzfm)?mWf)duri`}# zp2a7%#`xS2J2yHQzXsB)D4F9=F(1*Io6uulrLR6I%}M~wbsz4j2w?c+=QFL zohM6x0b0*!WqIplaS&bJTr2#Tad z@Pg%C)9w-5lxpJk!&*w6Vcp`qU>I*m%o=MjUt#9CtwAQZa2ik={&?Q$s;>3Z$A!Q| zm6`q4!kb%Zr67aoq8Ho&7z^mRd;n7T9!+I;0l2~`=%~T=101Lcu_^>bqIcamKh$%P z*CxPPuNqeTF+nL+$NhwmpRMu@qMxbN`E^{xm0{H6l7LmZb%X_kT81N?+cx=HYCDho zgze9u@t`c8@FQCd50|~Vh@>s77ZQ|?t1pry%W+6=bv*yIOSRBT(soRFzBK+v(62oz z=9z;Kl6y;cpnJ`~BPMW34v66Z@S=hspHQ;XmCdxh^H@6l4bz{D+M-ca;YO*XD|?;`Wh%ST7FLA--h51r0p zLC~Or2{TKKV`~7#=N7FxinWc1uRM;tY>Rq^Hj+aKB9RM$DTfw^`aDn#1~~Biti4OIiTVJX?LUOr!hnaONYOthtK-#Ql{e zKeqxJfVGgGeC3`t(pTe*Ik1Nan=`q_UC$*8( zji!xk0fW!IvYL5Seik=0Pfq!ugjvg{Lzny2DpsC*YdhERI)e{^jyh{y@$uWK0&*qhTe=QwR-VWQ>UWAUr?pPSZo-+QTlOXnvH zQ}HP5M6aa$Q8px?)$*&U1~Rvs(KN__wVW#d`ZlAPbL4;jBRTCbvP8&`ti&>A{zoaw Ls>)PK83+CkkFJJE literal 0 HcmV?d00001 diff --git a/website/www/site/static/images/logos/powered-by/Wayfair.png b/website/www/site/static/images/logos/powered-by/Wayfair.png new file mode 100644 index 0000000000000000000000000000000000000000..62745a08b75cbfadf2c59bd0b5965ff1af774972 GIT binary patch literal 82679 zcmZ5|1z1#D_xAzhq9_KSC?MqkLrW`2h}6(2HH4(JbTbGd0#ZW^FcQK50s_(vN_R?^ zbc3|yd}qAxz1RP@c^=Nh*>Toh@mp)JHNh%MG9<*5!~g)0z+@%W008$ApX>5p;HBcJ zKODSVvUsZa6aWe%uAaRh1mDw{$f_vaWL8VunW5kqFgAB7K;0^mXwP1|^M2Kk{#ZbEa+1bcCI(vagZ-aa#!szGC%a zx~VbY@6ik4BYQhPb{>{0ZXNmb9C`Ir9#1+cfQ$IM`C)b_4WIMoWXjo&bHEsfY2@eO{CBeY5m6u9)y zSWvZjK2H8Js}0{^#vsDLM#%lG-Evu0uCz{@xK%MGbb%zcJ+lZa$Dc_P7P7hjyIXTO zzW6`={TR5F=G9DiO>V&0A9K6AXLa5f%uM%`4{0B+KCV|hjDWysYGxfP7aaU`Kv1iV#VE?O=N!gN}WnK9=$MZe8_8w7b=h`STYM-Jfs-3I{3{p5oT zg+3L1-c_jVk&mIa9-8tSnKlkgh!|yKS?AQJ!@~*b6%O*YjmHF-s1=J0O zD4L^qv?1+y!;QItQCVu-?Ys8~{czXc&BOddOU7aD__i}|wdBA@$N=*=<9W|yop8O` z#rCq5U7RfLwgoC;;J+Cpsma)oVeOUQ8nKmUiV!5N!XezaeTZsYzT`3kt}KoX#Aou9f8UAw*SBuPTiTitbAs%=@gS_h zv}lMkrMS-LT$7sQ8u)dp{V?Od_pb1hxuGAG%(VETs*kf2TDGl1{Mr*uL#zgl(u^e- zaQAuIi~PF>eg?OAcc@Vaoq<;aP>Iabt_}auyR;0rVS(D>{|GN05yI~v=!UI>Z!?6Y zjc^WXcK*d4I`YJ)TY3)g8*qUQF%tY`^#gzrOi1Vn=*8(DbnW$iPRy;;8NdS;i2v9V zkx%(mTLepoBCtl;bj%a;qCFF*GhtOg+<`L7A=}6LP~yBUHBy78r>iGwf7sxxpGx5n-4vbjChq6yr%@$q z04S5jqr(vp*TkX_Gg2{tCi?aXWFHH0)J5Sfx@ESrOL zLyu{|f|lpC=eESGluLW##~$qw>RtoH@#w%M{BGic&4q<|IME8Tb~@(6Hc8 zFLD8b4;-i4nN9`Y*~mp6a`~YqSVsf}OYGihvDX_j<`DrIq4?r|j{)+nT?J+hiATWq z<4%GS1Br7?rH>5930y-#mH;?;PEBJSGrUNs+cslU)M@N=77S%4@*9Bb-Te1i#77h+ zZRh%YZO?t#M*16idF?me#;R@E^O$dLyAhbQ3rwu`7K?ikts<$KqNgs zXNv(g6Bonzy7RuR-L{DD&z<^8r-LLmOCJDgK#liXHgS~b!*kH4-dMb#fCVA{^7QW% zfqFv(TG1dG96OAeeA^cd*}qvE#y}rue5`ySIt2hBWHJE3KU=xc*;%42Df2Lp;Mu{NHP=)v2>Qp7S0@2mwf<% z0GLn_E;r&azOBaBwiL?%QvL@Tr7%|1x_J3d{K9SRCkrt-)~x;MT=SxXxn#nYH%F##)Wck}NK5DeD8+6#c02rDpC3;w9^ z+Sc_NDSm?o1V-Hd&%~77yd5xpppM#pO4&vb6rM+U>R}A{UC$ZF;1o-sbA`r_Ma-`} z!ja`N&6?!D%l^q)aG1a;77K|KRN$Ta^eB-IFlx=zP#&A=wuYrWv-+N?(*%Gs;fw#% z#jo4gqMG4v!053af@*t|cAyad!3=(|h>b#0m>R~AMYyfnH!tu<_z!3|Q_0B``}ccV*C z7zv&eTPT*)3MLT+ez>$)dnZ)tOOYlm@0Nd(TaLN}kaPZLY{lDiJHPvUTij;4vcQX? zRn^iz@5p!(-0tt;B*eTh{6ISQmp7{c{I6^+ zmhg^lt_Q((9wur@A7Ix2m3ZynfB?5JKO{ehhfXaH>y-y)G5x#>aW2RBFy#cB42-5- z-N2Mq!smg>zhIQmK~Bz@A=A^!n#6^&-l-X!>R>|x0L$~{;>j}$NWZPJc5zr($Q96B zy?Aj|>+h!q!CO&_d5drVA~|+3vi*7W+0oJ=GoeTgPpRX@2Pvmvw;MZ=3AO(eg^Kn3VeSgI32_@d0AlvFMOn8t zbr491<5J6N_uymVy8!Vu;eRohpPFg%FE_~@!=sAl&=s`pSXn{0?vvb%EMEoCD9Tpp z40LmowZ?Cd!f4$*u*k6Ka|YaUp|-(258iz6L{O8t1z(*f2E`9wcarp9^$O)LWKCXN z5_=HLX&t-XaWHnEWl*`_G32v@&lK?iu1V8Tzpxt-YfKRTSy5;64ywt#>VY4Uy7P7b zkM7E@V5Ic*R*G`v=D1%XAUibN`j0nbkT}RFI}i)$=U;NQPg}?v)XN+>)Z9sd;MUD3 z(CP&DO~goL3}5HuQa4&=0~l>E;h0|qGA$?v3@9Ce_~QR=ni}h1Y^j}S2t+7vYKkkI zV<-OY(%a+X-Ly6II-7tr81CP|5;tC|hAICuwZ~7LJ4|$XX$)rI2pQ4(wnt)Ry2U=< zO=qAPEijLebV|IK5099hSDZnSEA;tpH}L`T>wgN#r@VX-h^nIy%(+9|cGGq{v7cvt zOv1fTm0Pu#ldoJ)KI1i9l#_K=SPaN8OTUiNczRPrpp06E7*Khn{jZVnLq@7um9@Wz z2yIwemy?-uBn@fEatX0Av2{N8{cd_APFC2gXlSgwUmq_9*%V!f=}SLk#s{F({{el! zG>Y6g;mLDJ{?edBx2BB3!kdOzoNSZ_mN_7w$4Yo*Aioo>lmc?FFJEr3?0<^F z!Ppp31BYL>3dQBz5KhOzU0U}kN(RdoBpG{Lvm0D zK^gBctmp(IS^xieb=7mTwwAZH8H1B}#~eGymqRzJj7_{H29!f<&;0|*R+|C2Z;!&{ zD{I7H`#&qv9=;<7QdN@2{+Sn$VqMIZZ`yt)1ld`@&LKzPF%-)=f;xJtf~o5-J-PkV zezttHr+&%vAehFSQsFY7a##E3KUl>tjnDEsMxQZBid5L5sCrFrt$=NKJ(Odoo@WZ) zWxKLF_1j3>|E02VqIGCtQ8j&6YqI?wKzxbN@1F&vLJ#CmlGw8SAwpmIRgn~XjOHSz zNyhdq&N?TuLcD&)Bh%wOJN32c$jd>n$4bjMvFRP4!+Mj=Cr4`gsuCYKhcfQoTO;$9|7cy zZ(2j%(9kNkfx7oks(z|C4T61q@N{*&G7-&D+F91L1zlQ~e=>X52*`LiECwSMVWTQC zz4m{ZkdJ9gz~K`nTGn`Ze8~41%)njl2xi4G+{rsewK@faB(;OwyE^M=Vui=@{}CrF zB^Tm=`S{(HbmmvI0MyFC*Cy8a25djR9<)0DNADFr3cD<{UIJ(+8GovpY? ze{~fIG3ZOV@=<2eBQM|f0Zv!b|EH_JHYZjTfMBK2iT?hDG%>RM4*AX^Nmd{p`T6L~ zU0R;8Xh@V^FryeVAprftPxEg_KmTQ+xSOs$rncya`oMkXLIZ82;wautahPn(*?V$F zE5(;L>KVYv*#FR^{>#C%&xr5u?k~;T%NxwKhcSYepI#F;cG|-)Er=z?p&JsVyxAz$47A@Ix77t4A&p)J> zcsyo}rCNSG3?lSq9$~0iLg@ER0+C1|p8c@=F(#5AKgh(XCX^0H1(var|7d-loat`l^(frSfS0;4g(R+jXyL@D&Y;8NQqWR9A8ue z)7Y?0{~aLlJ-#@Q(WLMDnA^G2{C2MPITEZ{L5yI7pO7qaoYg@a~n#P4@AAHy#bhWSBEc~7P281exVR$Fyg2S zQpfV8T?LzVMHECRV~X^&213k?m+GhbpH?J(uJYs8%yq}gUL8IQ0$5FpVQ5j_6c#p! z4~1=>_`}&Qr#04*Qnbpv!vlzY^#3)*DyjZK0kDq&IxHn2_k9f-iw?=6OK%VHw-1+P zOkBvW<3pvu*yQ04GSC}HHdwb_55@yRUVt9_|C&lFTr`ec==~J7j1<+BHFZuU<)R#K zD0137#?@a1$Z7hhpSyK!T0jbYm?mC+6ZO{Y)U<{X0K~L^Y?M)g4XxVBT~v8g!#W35 ziA@^DD`dfu=F1C#r00YxyQDtS17w#9lib-Wp#m*IviLv?p@{w*YO}~3 zC8D3qaK%{*yuB&&mv3|!?^r$YYAM>LuDwSqS_ubDb;$q$u?fTXH<**Xwbg_zYk15j zxdxd*{tMYW{@1lr@mtGT7d5ggkCY7JNSc1)bBTKc-qi%+1p6!W8q4d}3Nh?g=({4g zTC!ep1Eb$~!O%^T`>siJtfx;CLLkom$>HuYm!VPt=xQRF{;|D)$~y_tLkA~+@t)?T z3Y7r~5X~~UX9_l_44bVVdDcpr`x=WXUO>rX4}&;sO(w$)Y9M=bK>+~HhCWW`lcja8%F+7t*|mKJ^$8rW_S7>4 zmBn}kS~5zYK+N3k-G7Lc+E>Bz27Kc{IId z;yfhnQO#YT@6WaRld*5CZC%0xC(i#^IBNjg&dbXd87(>_K`)mBYrHO)Ae_M`@L{4C&unDO$XDog*U0M{Lv|NmfIh??Mijf)7MwBz8QB``{U2LMgSpTksmYk{XnKrKgEQ!~=H?pRFmo`{(SE84S|bd!pG zJ&a<7tBTOxRcm0HHs@!mGan2`AHC zQoM#S`)L?4o9*r2(ZUdwLQ&55RIb0e6WhpuK94^(-+z9W9s{m~_}gNyVciFEkT8t@ zMH``fOjP2;xIOn%lfk3Iy}&i-BhIY9X{xgOC-a$shXoK#&~kz;_4}U+yMw0j=o18# zMX>$ZBXfZDF^>7b_UG*i)urlxt9!pc?`E=Hy}&INY=02*tEu`J500APD)Gg^|G%4x zky^4`)ZdEakLQ($hARH8A^x<}3IefaMB=~uN#fD{zJmXteE*Bg_(A>vtFJ-V@qe+X zn}4nXlKj6B|L>A9IB3`k-{*;{Vzj^-t1S)RH0JS-)ef9mJ-qayX zYfYPXrg8DMTtldNyFS;3VCL*K^n{#Tvt>&?*9lx;CBS@^Bn}eSc1QQ;DNzQIw^zeO zaiZ+MDiJp*)K2_fuYWqf@U2K79bmUPh=4oiPEOCXqbH|)vM0BcB@USf zuU9PHIr*RJ_mhyuGeZ53C#2rue|@t5^;zpiN{sy(CPB%;l1t|(!b)J!L}u&ss9fcj zX2lxTXQzOCzm9tHAefY&bu{T|-b|9`SHUeN{l7^G1ir!l%K1NC5TcNH`DD7rgXle6 z>$z`o%!yw$ZRBW+9Bh~R;+oqg_Nz${w)K45KJ)UIpWl1YKdswm(CD62y1|!~DM_C; zHr{$rlvnPTpl4_XsOmJUv2J%FAN_Bd-mISyzLI%P%i4*0~BD#mAOx_-A z{Z0m$+kX!4clHd=h^x~6+ly8Y0q=py2vh-8tZZ5Kc(2!iOT}^Bk;p}dk{>zVQss#Z z=SAM+%v7$AX{i5KQ$OWTh+lIqc89VZXT!Jh*6S`-pB3oKbpAOw;1dA`032J-8n3S{ zyM}|*9-OvP@9?Y~wf9mO4`TRwriE>LGqK>SOXD0b7fqW`5e|nx_c-47px{zYpi$^5 zDDc@E8gat#{W)r%;QPyt^RpJ$XS~j(vGL^7i(5o(8VTD4V;NcD)7#r7Ecjhwyfl@+ z+=AD?cl?hOgdg;?0N&&(xB?x9y3S;^u2ZMBJUfbrKM1 zJ(!c(9kptVT&b}Us6Kkn$b09vB2!7^yzjf3S`f}v8|5?On$y)XJl9FI00^}bb3Jl%1mWz^wbKA229cbz%K z>7mY}u+=@zxiC_piu6eJStF*OpGmJECOn+0B?v~a(zvXn_<8ar)#;J-wjd;T1AxV{Wc@-HB@;{C|4qMXDW zLA~K=N_J`nO9X2B0ZTOVdYAwV>#&FLn7i>t z!sg=cz<6HA`Egh%(S}pQ6}O^O87}L1w;g@X`+CO8DU!jIjHf|r?Hn%ll_y#IZyJ81 zmS^KZ3enDmou^T~QdqwZV+Qumuw9ZHH$GCODFDW{U1_{~I4U%!oeO9Ufm z>*uwS?1JX?!;tTZ=`|STq_vG3yKP1|hIJn(3SiK}+s@Jq*<+?$RW`I6g_&NY|-;?1KSCN$UJ z(RXDrjI9^zJgI>FG~R=e*w_^24ACDr<)X7I;?MBaK+tEp*$wTi(C#LpZX8T`d!vUZ zus})ZM<+JTjl2)@fULD`U*1hDl21<8xM=&6CfR&ZMaUz+26ZjXqhO<1V%JkP4{P|v z&dJhArKIW)rxL~L`HeV8y8-dIu)BM-LONsEbd9=`v>LSOaS&#C(X+!}gma{E405_1 z*US;&P;v>_zl^S!XwrAxr(hjt`->%a`uJgKHiPxpMeS}eoPIHzzECX=0|#lCfVJ*IJXl>9b50WYVFfjUhFp^i!ra>EHtj*7c39#<#EK4;~o0H4dS{$ zqoOO>(4IdMYm%)_uo!(h}3J zE}b#1mKDFzZ7#st%S@AJcaet1`pmx#Lo8_{>l|>t(LUEUmOu~-SQ-^KO^V*un{$zj z!loJPYL06?C4!9`;_@FhcWX6t>*?(I$I=}|VzeFY+unN3BY*LUV3beIRiv|Qe2X0s zD6J@B$1l^`T&o@l7oTE$sVSPxFi5Z8Ld33jSpReu0ryQME;gbDh^4CAFX%_YMMLo^4%Qy`G2D8cu67s)b23{u0mi zG*9DhHs~z5zQIs}CrzWCR_UGS$+LoQ4a$#@ykd%Z302BMu$dpb0kHYlx@v(PQ#Bv? zbN*)UBnJ)Zq!MP@t9L{fcAtgR=+J&~Jyn5IQmxKbRw;)}`dqy(+*3Roz2sX1Pj4!0 zz5ii!J#aX#qm~7KTz>F|sE9&9%^g+DY1& z5-!UhE@Tdj^SIL+Wy;ztPIb4Oc20-mD@<%nk9+@0@U;r=1`m1aOI~irKk;^S(W%;S zg%L&mRW@K}z8$8xwQc?#cHHm7o| zN;GFl%zZM$_n|9OmD=*;30hY#km-yQ`uXJDll&k@e*ihUYNR`yDVXy}5vCi*BN88P zXn4AmTOg<|!Q=CgK09Q_bx%3^QdznCe$$M4eZ_^cWPfqy`(z&0spN|?%n^AzW1f?T zKIm*~HQjn7a&F%6O8Lw_Es|=YeAGWIEX;vLbw8~A0kc+y`f7Uj<|Z;+V2nQFaCINf zGv?$tmHcMm#tV~Q^6)!aQF2>DW@}0$#7_F6etdmwM`dG2ChBM8POa3F^_nge31eDd zl;*^_CsFaqu`|d=N3aLwoY_^T;u)GWH0(8s`RR=JH+ShFgNt=lY6B@`@)wb%8=JgT zkK3;tTgr9E2*xm)xmBDgVhX#_$Gu>_l2`9<)&KCs_XFkKzoyr>h>$IZm8Z<1FOtby zTO4aK%5sU|@nei#z}ky-dzt3Is49$|SkKi-pI(}xg5k^-^lFUcc&CD8B7c^MX@AR7 zM=c18l5n7*7n3f_kVRUF)6bEm_KG6E*p6<&deOQfF7+i@v>b)X^r?Hp5V>%hmv-^W z4quHXryV$ogTJ7P^&{0#Fp#U z?u_h=h$>;u-q)V7b2>JhJYOnvp5U^me{{;I);=aQ`_%RNGkd4wCQ#S2$)-`6s4&rN z@7BMY_r;^5UCJ4=X`>nG8-2xP>1<*{MaEWQVDGjUt>`+rbpPU)h;grC8_nSl!JwN< zEfn+Wm;AjSI+O=LJ1sr*5=V#cwj8!3DbnVgKw$f}yiE#cm7T4(nw6GeT-g#{ErAYF z^mP|t%4J-3&j$-Tb;eEUP&vo$_-UTnIJzn?d-8b1#a`sBZS0`~%K@I7n0TgKVqq4M zHwUX8dTP1v8MZW>uPSJ+-oWe2-)eT+yj{?|W--x1mhlKoWf@#dVZ7q})g*Ty_j(d8 zA1AFXZ(L#X#nahdvLnT!_V#ZL?VMstb4Txd7H*+tsEcOb zRzO4*saLMz@;3!|4+T&u3Y5Z2jz>*1!(S?9eFs+3cXC;kT=x+cAzjJktAa_b}1@K!&ed ziiz-#FQ(rhI?jv+R-$#*g#^7IX1>;^srvb0UOzi);7{%tGnG|%Us6qdcE82e+$LBZ z#*h{IEs95EV|R_zeQy@Aj$W^Sn!DVyG}lig8FfqOf*LDn6Fd?nX{T^k}*6xOOD(BkggYNZ)(KYpJV^L!dBrem&>_3IsLt<;z8tsZeg-Wzuw7&E^Hvr>5 z%>!;8F${&2Y>giyDP&2OBCRdpR6nwdKG&@=oY}cxt>zGDJtFV&&S0%;?XG8ceK2+L z;hVQNE<6lNTiRf1#35Vs4m0YUmS>P?G1rOP?#43w5}~ky5Ns-+;cd@|{=4*B^|# zRL>5~qORx2YSy%NiN4%)&FLhvp?MI6V1@WJO^NY-WwzDthN1=}C|p}$ge`CTGP zG74Oey)288y6#Pe$4TND2`)(C{UC^-aS%85O-@jP`5!(|(4CcyW2oh|^kGJW0K>)gIR>%UQWLm9ga)IA1JKM!kGWl-{a}<}0kdqTe znMT)kY4d&LHF=aI@htHyM_GaD-1m zu=%(!);UtXl)scn&v=H6wt8bD(Fg_Re9d3(nf@Kl26Ng9;G%E%ghVp0bsvprg9%G4 zPNyNAL<7e6Juc~J_IK;w9<&{@P4{p38KXu}G-iXwxMV|0A)H}hl@}g9YQV3rh#8$l zs^`gUzfnG9DHd&~*6yCpGNZXUAY}7x`_4usg_NK0J!kmG+5S*k(q_c5XiPk;k#r>6 zQMlB1x>*dFtNLxrrjyCg`q3||@PhHTHyduc(TuL_o1RUA$D?ooi~bJ>nEp+jpB{{l zcCbI)!>|pdRb^q<`?@VE8Z2b-^dEk>EM&L#F54?ZVvRbX$K{wx76ZjFCT(qHDd#30 zenBHjD=UvZTQWwvkN%3nKdHEJoiUKpLeP&Kpu@(0Mr2$3HVvJ6onoZqVE0?-Rt_PV z*gn_i(n^SCmHJw~5x z<*MV_yjmEQ1-z*I{1he^%GYtOFxz*j|Fy(KeQokDc?sXU*K=&QN31JUO<>|@fe z9vQ`vyY6~VmrTrfJ^4k5%kFKdeO_Wc7oc$1xTfIp{!W3rYO^lNzuWC`rM!ge3Cr_ zM{O%nww=<~jZ-an3c|>5gXwfh7Dc%wtCA8iexL3l*A@_6o6$%mM~{=C?6d331qAxO z@8Ci4V}S&GM%^V{C3;<(dSGrwU+l;L+w!}ay-~dg_zQwW(lIIITeW>H@*J%@xjfHRX+{>LFc-S&k zU%I}gpAtN@GY7J9%-P=1qH-Sx82xc%E?k(&aIOVyswOCLALE0;L~a__BW2l~ROXmB zuaTUpa@eG}1lMjbKC_V-p_JSHz02X)0rJ{g3zbc^ycI2*=B{x6@qr>J>ZV2OV7goO z7ci5QH?|)n&mCA+wio&*NsMzidWM#FrvQ|H`}GaMm7V(lt@p-?8}sl5m0QO}NWNgT z^u~bDM)vVu>sGsxAG!i0htRBQ^tLjk6!WHBzP0t`Id6ZT*<{K1@QUZq1`(WN-uH=mCvBN}SdE#f-fC%#9f#r+2_TAJk z25M4irhW?5gjY}7TujqUv3Y{zua2+8jm~76WuAZHAM$M9ps=X_9yKzQek0an@A}=! z%+E7>OzScfP%93$j<`*qaRcQ7)>ZVRiO`dxUDfuZ40-aMxxpBW$m3TXuMJO?+i%z; zxJT#ihto@ym+BUiybIZs?DvQcQqJd;M~M^|{JJSq$1{clQB1WTt#Q-B2kt>$E6sS> zG2`NG$Vg+w$!@axF4L4(kFE7>I!%@YSU_hCwW7U81>62el^Ov7aZA>_k5`ZE4obE= z4q)I3ZH;cwIeS@Cap*B67wBz#{rd3d;MN5K$x|X^;6~dP$1G-~W&qCh8o-%){!4u-3) z=3p-F#)j9y3Ukp&oF-g;#}M|3azJ1sK{#2&7o|q>RsS1}&d$ST1mnY>6%y4G5wa4< zRcQ%viWut(Y0_sDjq)gj1|$588OFJM_s5ncgO+&Xb~Aj|b7Emm__c9&fk78)3S4vys}hCf9~cIygQGeve%)X%i2*K*cG z=?5Fhv9aGps6mkLv&Y>VtJgJwo;;V(eAxv?$5Aeszm$>Y9LYm!GjMF!DD@4&u`9@} z>9vCQhfUk>G-_uslpNQtQUqhX54FoWzTM5lOw@}zOOJI7%${J}`kz*1+SWT6g zhGt`Zt9@H6|8uhA@oiFCa_OMWrQHkwB|( z7I?1RGbw6S#He>>xY~r=ZdRyqmciDw5(C>F`(JJppBh^3C`$P$&m1+VHp(qYAI0PB zTl7+ym`Ez-q$;C>m5cL$I^W|YG0fbRO$0wF*0z^NY^5V!oot466+lKV2nF4&YB4p# z_$eKso9v*?>of&BW`(QWa{My_5&j2vaoRH6A753 zrB+^#C@&aDKBad5@gOvGhtj^v5`G*V!H(!MvrlW5caCv$IKyDRJUVOUoo+KFTG5&3 zuj`uZaptZ@LIi1XC5Km>^p-9X`23QS_g797)z1bVdMqsD>}niSyDcY#COd`{ohW|} zF?`3dQ^wGj;-beQAhpl!D^SyOjevKezX-=d}kWtPK(x^%Uab6fdpI#CTb z^o-wpPLQ0BTcE{7QjOAB(Yu=Yf)OcRhvu^y$nRLY1B=C|#^drEUrJdwN5C%(2B6<| zGKG@owxY}83+lhyZMODmKbhYdIiFIPG@PDI4Cc4}m}hA|*tP7skIDy+d93V zt%FoPF)N~NRo(gV%NSm=II|sfA6V|C6@K^v^+v6{bG#@di{{t1aBj$EgB^}WWT)Y2 zzY3ZvzZdvUGcwlaxoaqxRwdBj?SEF#T;fAT&;@g(G;X}RydUz}jy;O4RQrA_DrmQL z>Rf@6M&!v`5{JF*R)sTtYMRi)06vEo@F;Db=sP%?M3U%qhKGjE6&Y8=k0*|oYg**c z@)YZkV?PEFq6||dBbpL_xz0g-ago@3NwCV3nPj0Df#G}`^T zJ6s`HIdxDqIULcxbka|17chTzh%o1?t^1{nzG$GBJX5}ZwtZ2OMbV-FzstL``-o|) zC;&BQHS7+npP1`?%a*%cYp3>|?CEEf#jLOkz*4s8#X)ql#-)nNU)v6q(2D>C+Y|=E zcGCH~MyWxBgdtM~*qawd)OU!;F7>6;goe&PkgAU?2(DJ_R5;D8Mzk(Ivvl3RgG$LM zp_!hJB55`{_FJ6c8!Ijm0D!MbOK+Db1o-&Ra(|3ynHV>hR`zUk;LUAm9IJt0x#n*U7dzZ zaSB#1Z7Kv?_fS*%z6tg}nN2jGI9AW67*}U(nlO3Ug(AEhyP5{e&%8wV@lI~m;-N`) z!kSetxeqGNcchG3?2|dxMwtgfDkp?tPeV9%zHtyy0gvlirCVR{BuSGNs3*iQcRhZc z{9zpojPCc2=D5dM_#|WIEgkmTeaco*cgjIc>j?7p?TG{Zy?uVF#Y8!IFmGU^@5VZM ztzK`1LKRnCH1h)+xoUhcxlQ}}eqMnZQ|{4wtmW>{_92Lxa~B!5PJ!qa;jTWeq-OG* z2MO)6Py}C-J5}oph|fV4)vYtP=699tcVyf~1cwl%M2H31go{U|o@>4nr&rN##A3aR zb_<#=UH6A(_fd55&Lg>xs7_%ci!QAnnv!CHb&SQ-(57w2y zGrC*3b7n|U3DUOHy2N0xKuql6y&&^D@gT6@PXP^UZDsdOT63u7DntZUKy!{(L@r3c zrn5cjHYURsse)|R#jML!ImH@HvA)OK)!r&{+1r~xj|Zr?uhHbs%+1umh6=*N9&eYp zbql-Ra3e{T|G^Irhz`q5sX8q*A>JYr6rAGAKMSsv5rOT{meDI97W{9J20%u--vImH zHy6n(ZDrCHMzwE`B~Yy3F;%-scfL=1QdSJc^o?x$o^AOuafCf_GEG38KX~4161tj} zW{h-i$eP9jYde>8Ke{G~;(Y8t&nvhh1&w>>!;6M<1fSb@A3VEUACcgGMV0(pRS{D% zWbhiO+^TI>%$~u0b32Kmq+cZ9G+jV(0G6FN?4DE3oGi@3c1Nb1kKHbQWNbTHyXbcw zVO<^;ENIrAfs1KXugUVB4&7M~NVm4OnkkF6*JsW?k44P+_IF~1mkIsMvPmf3>6+Vk zaSK1n8)hcXzWi#i2f?r0!T0vBrm{DX18%j*xjFZG%rXqWZKpOi_*7mf22$2-$}~6z z!Q20OZgyB~T$5k5RuutOVAS>mc}13Yn$yycq^yG(k@^ZFnqUnS1+09q0kr7_U8j?a z%gu~Cs}i+u$D>M#o|im7Yh274NNXEBWnbLoYF0RN0PCk^F%O$n9qfBgdcpimk_8t- z&1<*MS$)|p?FXP=$~v347sqzJ7K9S{Qd5iJ28X*SonGI{)|2V8(HvhP<^0deZB=Ix_6z)R!q5TZKyo!3`VF zjR8k*lfqL&nGei6J7jcO)^fAGS zb9XUgVJC#K1T1d%BPY-QY@dzQ{8)FPeY=;KesXzNus;#%$3NU!^3<(mw(u<lPdU8O%~PH}T2M{jJ-F?=oHEZwJCn7fBe<#L z^&%+Z$>7fdjtZlPFxlqkP+ zeJdV*#H(Y%5>Dc-Sf1y&f*>{l zBd1Fi*sv($KE9m-o?M6xKtC)iVx6(;VB#<|a(j)(W!v?b4+&*2WUXr~_C5Ea)f6 z5mJ6L$IHJu;)*V=Dzq@a?xW8g#)v8rBPm@{Dr;qLirYbk+JlkO(UL^ww|kU=#W@o~ zmVeE#@T*V%pgObX6nh;f^;)P|{Q{*;7DUn5ToPEO3!{4Z9IVo5pwx>V=1n*D`dIt( zkw6?SqGeLaH25B>Qk@}kUJkOY%q=OrCr&&LRhU5-zQ>G~Op{3WX4YD8cX?lQZ~Wlo znE=dYKR^qz*ggTtFgzZQuJ&9cLL2B)Rdl$GBW{x=TvRpF-uL*HOtIg&`mAl@oY(n^ zb6V>)HLt_$>ZSYDGSg31mRnzQtafBc?a&I*TeFKB(7c@dVB+_h`U)|5sf)YZN6YEld!mgiB5~prXV$re+BZ+UO}O=nv5MJzE?%NO5|>#LxzC>nCO5_~M3?G( zxzUR&(d#of#nn2`aUE})38CX8>79=A>EDQrn5|7KQXc2xitK}t*`Rw*A=C6ac~|do zX4TJRZq9m5H9|`xwt{2*hL*B0#CFD&j<^F@r_e9^t%5iiPx7dYew~Ri}61^?Et=!&pr(gvxC~#krn9JgY6AZ8ITRb28b=M;-eeiXv52j?h z_>S{&xZG8)U&}9Do(Q~oRgR8GgQ;GF*F|)0hw(ei_#5(SjM}*Dhp~f_#(>mz)B(L( z^2w7N*6`0OS`OtHx4sZ4X)M#J+2;_8TT&MZI8Jmr{n-AVFLJp{{laHwcHUo2OVcg0 zatgBPjpC0u336*TISY64A_I2 zOB}5$FTRt#)3goUXZTw2F#qA0O(Ge%N%3eL63~6g z<+BMNa!-sTqi9u& zhGlk{%t_kiSL!=69fU};mzSMAGUD&bq+0KBfBlAdN-8e6j9MZucJWl&Vs24&;DZ5E zMcQHa_25gJaEuVm`tD4HH_#o-5DPv{4p>~vv6B0#Z$$ABzxxUR$Tg`CTct!U1n#FX z|6RR_1@nUvZ#K6rX8)L#aT#)sf884X=x5tGExeCPY-earEbG^esr6d(EZB({a@6_Smuf>< zQtd$3u zGUaz+JHZk)eqh4+r`&w&hvn9mN}w8<>}1zNYaRxWbW%RQJ+PjPOBwKh);BJmg6wII z4h0Y@TKcH(fpD&(dcy??Bdg4V0iufkA(`2#W0pd}o@MlhqXa2f|B%uMFgNxYh$u+pk;y{5cGTg1H}Y+?^y?BG#V4>}J7+xv@J(4zExZ|zCiXt)w3yp4Q8LV{EikM~MBLw(dox8;jotsx{ zvR*@us>Mf6sDuH+{0CavyIh|Ai+=h*OwK;OZB#u~9fD1`!t#iuv(hYRB-Vu;LeksB zN*t}1W75z>Z<7^t8ldO4;(TfRD?sRd+<-KB+8WzggCs^R-|pNq1Hbfl6@tLq*WL_Q#g&d~uI^0)3Ab)IJE_ENj$bpnow`|^v%lrKTe_)v9LDng{q1E6+ z2;XkV`b@w9GD?M+tfX{Vw*03=d6|F=%8Px@q&$iIG*Jc=aW!KaO{PU>0E`A~>>D6P zpH1F-wO`0bGHy#(;K*S&s=-35fuAFmVgn` z2F@OHK}AUz(-|hPy-2gXm%`IRvy^^ms8%y=F(8VnEiv~kJL}Z=1hrpmZmCBa{98Y? z<}RO^)DDJvumW#+(4E-ZE2)P%FYyvfQt&sacWD2eu&a&waYZO8#BkKzJ3vK6eGpd_ z3icn)_Ta%CF`G+qgmME5bHbjxid+J1P1x8uSHyOZ*?0tchYC;GL$~Bs zGX+y(<^A~Q+}zL8v?VRw36b7Ogp3?6PE1FgWG2A@a{;&R=LRf^->K8qeh;CZB)Ht_ zO4}aZbe+Q*bZ2T@H*Rt9aa%SC`_JXomuW;!%g?aSdhZiDm|DvgMNMb_VYIaVFoKXt zGL3C-3#dI!3n|9r27_+2f%KG>T9IhU_v)%tKOf*BjE0?m-x~ws(>EW>yn**}Xt;Z- z|1<{NJW}Lq^=dePYK>m)f*kFzAqndo8p%MLl><}bbb2b6d@zS_mEdltRj7PTcYQt* zpaP@Hsy;3Im%lma=}E{yNVlqwk>hlpR-Buf_l+Bb#3}bQ5Q$7(Sl+)fbe+3!xn)zU z@rkhRzugsd-Ev=}6Z@OsU|GY!8$Qrr1cX|E&is&dGK8i<&I zMbWo-`SbDHPym7mHa{|C84>goEt+`CZPD`Dn6eWPqi_xG@oRewf%taAHlgLRa2zIuw!_l4~RNL0mL!P6r52h?STw;b7DIkU*^$zbhgyxlc|cLC)9K zL;F3vsGQlSc=ag^sE-kRaw$o^B1tPS^Xu*rCH=F0`nsrPzRH#LDCF(Wcc|)mI~5iDLYvNFyCJG{O>Wx6Wh&!JGZ$=lh1SdHzHb$s84e;r2;XbT54)x+We@xnCc*@{xW2cf9)cPgk|Y5aIuMIBz8mMLV)!j*6L%j!@!E+m zufsXdU-lV!_Y+=}C<9z!7jd{v4x_Y6TC{?LxazgZqdRw<4r2B^ZSAO z!Q@P1V!o}f;GtREko6oY0sK37x*#<}U;eRy^viE+Fz3UPYMp8a#;+v>S{K|LAMnFu z|B`^Z|B1D2uuVW^LYnxjR=4o?6>8rvxB$jD(XA;$xNMT73^r(Vqw6BLoC8@IW9($7 zw%^v)0i`0`j!AV&Fe$*ijnQdIL#^H^Zs@-vV$V9?%v8c)f5bae%+o^gcG*~pD@mF` zq+1$?aK`>vaVP$VpWDun3hw^)iqXrv@mB^dB9LAExak)_)u{xtHRj9dNAI&C+5#ur z0jb|5Os#xFjTY-KtEG3;kLhn-!=KQDgLw}0wkSS8%Vpq_FZ7(}Sbg!+q{qtnAnV4z zWlT4w&Ew%c5DbGlwiXKf9Cq=jZ@AFv-w%FIxK_e`0j;%w;ImX~ks+VqC4|CmHnnql zecq@7PlJIOxeREVJMNxcq{|k6N4Q6zoC*BbE9o-#K>&fb1`y;0cgBc23ya!T(8plo zDWuJ}bcB!vjPnEBw-J*{{H&e0>Xi94r)#0#pYz=-3LpRWelX@L44_-e1?KiMsf}mL9$WfAu1z;K?UwwW)~` zU&EBXb;=$~Dxp#G?R`7&_%!Riv zr|XG84glxhxFB4H@qazs#VP(o{D3e^(APe~+)GZgPD-~cw*H1vuiCI$1Oh)*z3A@- zRHZxbNLOl05cEZBfOVm)SW@*O(6Z#OuyaCbOS1TZSc|N^4)Ch)>gOv6C63OXh4|Pk zvoO!RxeM|6FfA>0)^-2LERHM5xL&_>7lO&_jvuTA9XH^eJq`3j*ZU-=w+a)+AmH*Y zOjAc6`Ei&Mrc=qgnVJLZ;3MrG-RS1ljgYaAGD+R-_g||BQ-D+0)9rMR#mUzvLUh(Y z>K)4t4dg7%{-?t)g!=dp7z0FG0oWt6XDMb#CvP#C*Dy9OV9)7Cf1K+ep0>OC{1J0U z6G_%L*J`=_Kqp=_c2OpRbD~O^>;bpU9LshUhtOk1VZr;CQK4A1LR#;vD>nF01@a~0 zx0FDe^&si_t}trYWdndswQN>Rpe2O|Nanb?%@<*8%2L!xoiot($&E+Kh{06njkb?& z>j5W`D%!Ajx5*`hA=-B#cOTjS#?Ag8(SfgfdJ`E^D;t)1TJ4v?du(WtFj3jO>rm;T z>oq@oTWd?{hE@Ptj^mj5$qE4uBZ9G}89AdpH(R&6-_fCryF*r&zrDoe2C$uX{+Y)WA}>Pg=dA%IJJa{c@tq+pqk1IxUgk{ zWVtD%V)3O&P=+4{YuNc`;<5Z{--F#gFq8Pd7=u7Gxr zm&$h4AYof138C#lK~fq%bX+r2_NHMQ*%MvYu@qgE|8VKxBo2}zpV?=HubZz+Cx$M;O0Y4`;6d`>d_q)B+^_wrm}(ef zce>qdGsP+$H}N}T7kXX;^?0@+9_2ztfX1iLm`86gkL2SGf8cImIoLT;|J^9U%L$ZV z7hg5HF_H?cRxMipG?l*;E?T#!Iz2LU%cT{pN($#x)2;R#EOGRN!3VvmD;T{;zjae; z5Gfo?Mioo)Cn)>&Eop!wTjJCzyoZ6l7A1VUwXQH(_cdGnwZNu`^0=EViD8D|`)agc zwY2qOQwtG2bX?`*ZS8rZvQNyYG zupG17U7@|4TRLf+gfIx_lBQiKcq>3#P zBTU9K^0b_$!HG00qmxHvm>%ja14AhJxDBXZ!}10Se&#=bqLcNolJn}QL?e+-?5pF< zKGH7h>lo`wacWhV9DUBp3j%Bkd4Iu~ioUJIA|hRs!B^uzh+B}d_Z;Go@Vp`khWs3M ziSTO0iMuz@&dtjsCaiRWL=dw?hdj04-4WG)-_zs7Gjp7AZa%p81kUMvWXEMBr8`bR zgmYqB%D$5cPlc?Vn@2Z_)?X9bEnf`G4OJ$QtPA<7E_R7tVQy}}h*R=X&Bv8E#+BI6 z$kTr_()|MnkKKz(facJrH<8-&LJ&AsPzcu2olMf<@kzNuZ^YK-ikY)SY2$vR-6EE- zV=XwXiRX)Ex<|#*<#3QJEQP@7GOjwPoy&#bfq%0god*9L*iRI@$sRg1fGGnB2M#p6 zhKUWiT90PFzVl-Mk&%9yDJqh9E11bTBM&i+$n-=}nK-ZN@aUB+xqUVId}-eEwxBwe z)@QV!M(6Ib;;mt$v10Wp2gljIILMx@CumdisOmh8eE+6oAet!>KK7|XqigfZJjbI1 zDDgkqydZ^dC+KwePNd}{A{lDDSfj(exr(G4eB8&`Qy!e@Kk)ti6t4m}JwBlZk%Ryt zBXO513Gj@VR+#jj&2;B?dp0-UF8rUrr3VL}P35J{fCeN-BLic1q1X!0|7@d@a( z>3j&Cy+u#MzT3K<-i*?fMyae)t+E@VwQt-z_~5aU>odSY0QWe#9i^iK5H*Fy>2U-R zL2)ilUml3}S+F(U@l+{iUWPe)u80#(+XAa~0Kq*m2S`jwKo|N5-y~IptEcoh$e2>^ zvolo1R7Q~)yxDCwL(UvSUyajon%tM6h zpqxfeeSi46dhXH|WX7n~XaOI9-)`SaJe5i2Y=hxP<_z7{RCG+It=fO`=?lgcrTFhX zY-AC}=0tEIbh6A6K9i>Ve~NO+k&O(CIA1I7lyd*)HTu6NJO3W|1^vfYHIa}m%9=1Y z_xZ_iW93S7c(AuPSKZw*LX6-@>tBXjE1K!=_s(N+&vLx_Yo6u~lRa#-0Tl3r#B$`B zY5?jqZ6&L~qs5j8AANJeAS52w_W^BoK`@R9NZuHu%Fy1#r*XbqGwpB;sFcyF8ANDS zfQ8NWOAQ#ZY=o%GFshBXU#5I^c@{rHu1Mh4B`ehUYMciMDTOkPXVKq2p*kUbZ*vWt zp2y=B@~X_Hs(4=x!!<)PYpj|!%D-eE16tU23%#4vkwcNhZWkD+Qc{qV-&M!K%NX9! zmeJT%L{e}=_3pkM2}Vwthxt-~UNlq4zA9A6l?^+1hgbUR^KkpXn)waq;;EL!h0HHJ z$)e1!yRjs$B;eaaiWOF>4k^YePZwJ3%vq!2-X5O#hURW(^l>fSQ>o1Dp+k&Lp$Y&Y zyE>xOKN?7DzZD6$e>*yZ_oKHGKtzVdYiE8wC5)h*@^Tb&aX($7N~J=+rZU=V{I8}& zsH7bCx*${Me=QpyRQ{vz=2Wjj0O>#^?XtAWeDi%MC67=B%M*_2&P%SbAzqEx*R`7u z%3JFFUP4F^z+EVvf=i@3Yp*-(yjhhA@bL~^t{YjNEObX3e5x4f-t=(p5!?#|E)8hV z;%pR2b18bsrvpoV$Sv#Y(DE1X4HTX++*riGt&{e(DK$c=mCSEHDNfUU5TQ9wRFAB!j7pTPx)Z=$Et?uC~_&vWFYk z<-0*OJ>>TYX3Ed&gQ+X$_DF#k67M9aQA+@vvc5h<-z5D_cb}A6wZ;S*g&)R5xS0Sn z!>c~Pz+t7ipx-lFRVztnj&KlO0q7{f(+xP3hlmMUg6I&Wt7lo0wB&J*gh)Y)H>IBhHP|D~g4) z>AaWMKU2?$bGB29tr75=GwV6e!nMG5z=`sY1KClt_3r@oJQwsC7YR9D5!sY$E4}FC z0PQs>vLm@t1Uw4ak-MSZf$ijgP0X9oMEW5Bj|>{_LntNb78O!3@G99nn6!r0Lo+SZ-gbW~mz| z;KJ&hAEH;5A%{u@o|3`0>O|8AJK6A?ZIqBIOS2tBIaf}@D8DF=D9B0kYUgOM@$+z&B!F?9( zZG%^T4L7xY&s8r7yPh4x*5GwtpWi%2Sa^Bb|LfEEw>47>?<_)-WK)b}i54h3EUAfE z-W1PmKL45hG1`8~kVL#( zLqSR`yklL?iR>F}**PH`JCPJhhj zi5Cl3wUtqNnSShXh(DbzV+4wDkn+B?NWt=9(?bC4J~QWpX9CPS9X=0TsSUXdk@Wyf z#eqG$e%B0AFrzc&EB5XR;3fQEUSFgH$}W_^|9KjQ3G(MHftj$U*3abBsgBc&plZz{ zoG+clb$}8!v0}?g!jTQ*S@;vabMz0Rt~qXh?{w`OS9EhpcyjJ$My=q#O}fvuUkPV| zoRxqfe)d)@CV&vweD&xZaZ>kdFNH%c2Od-?Ko^kDT*lf8$82pG%uI)icz!%BX#2f^ zxw2=bJ)58QHrtt+Oa#hajUSD_h65qqE&O-zgv|c)HK;*OJ+sf@fpve1;=X#-1LA zI&&*<*WLC$I~v??bkBwCQ^T$8WOo$|ksOn0%s8`?kE&rk|!J zC9SFkJrbXL&1^2=E_X{Zq@dw&+URB(BqXkL_okn;{DnGP`L_EPAcs*a4(s6@a{y0` z3OUX9@W!5XXdjvz2_tNfWdQzIYVAZc_BHON=|N^Fv)MNwbRTaW0H|u}P6Z%S@ra}} zz>}}~6!fwgNk`1(69oGTHr}#VS-=%J?A5M7KZYByhF5d}nInEurc7<`&donLKxQIP z(}@BA6%3*SvhHz`HWjzTDs+$OG8TVEo?45J7{hl>p@6I|9%++!JGtoNbZn&)d2->k)1|?#_}f$5j9KB)$)+H%<-^;72z+BcLS%fEjqm z?pFR8bp}*JcX>ced(KZw5hDs z0D2K50%sNSTjf}mEvo(<^=1aU%k+eiYcSDE1g@F0p%PJ9xcMsY<^=|P)~BpjQtRZ` zO}|$F`5`1$^ZhTQ%gZh z^3qgA)L${9_2Zw>z&K-0IkO?V{x5wwho3jvNeuioy={reTHT?AlDfNNWuE%Z(ke{$P-w-|p&L-3Sjn&>@ zpV54378dMm&1u~WMzd{6ErDKF5y(2O`$`c|+>Qm0d9wn9e4d?;@E9Tu_SY*f<0AV) zi~#Pbv7gdT4&dUZf7KAoJbo(JN9mLJ$Yv+vH6EzJ486mNSbE|*x%`%9Cq3-nu1o&4D=8x(rncG|=!*JN zZ(#D|my*tRMIwMduQa%)ae$0Xia?qJU$&JI< z-6A-0?Ib{Ev|L@!y|~A{ zI_w9EOwr|8?y<6Ts`825uOUDeRowBsfwQ?|!+|06F1BJ42$8(|2l}OpY3v@DY~$ZR z-(Ogn&3?*QccbHI5|q*?arm|MaWFj@4YG#9C8vNehZ*-WYfu~V4+hDho9O))j@Q!F^66nc`G+W+$0WP=|qKfS9 zp^?Nhxwz)vQxbM1s+H4Msl-;mp(LBsJ4Tb&L2P-66^1Wq+5z`NFOV;PeBS~EgLT^k zth*XNAceKAyZBsi#7LhkhU4)j>8ut2$x=hk^q23g2bRtwC?vU;^r@}trwDsSaE?}$mRa3*V*x|B)k>b_NXR9L+LbVj&Qkk@UCG81gV z7_816BUhJ9qC5j6ntffvxeFP@#d^v!aBTllG%H93k4 z!M+o|-*~P3T?!NAVVQpUv#syghCur{$Az5A@OQ zN;bEe-WhNIEvK?Y?3Y1nA>cU6pb(Q$Mq~29oiAC!`_ocU8sct46&M4&31<%>{Ja;{ zLyg+V6!1$4-V92`&c~4W{QJU7@63TqhA=ych#?pm^_n&uG)l_x%^#@2Uk5vZ>=rTKOhUXCP>! z9_v>+*t90Ji6aEjnOK($HWse_^p7qK9;6I>Vk3TZBzqQGT95wifPE-LnMt+Z@&-L4 zK;iLHLncBMkCujf<31jpi_6a_(eW|_+W`^w3wmFt)`wkS07%u;lag4_#Al-~ihPDk(&GUUIi+$La+@jOle9two zHIw4ghyvZ`&@1@&F1Y`ytLmwNqrspictAE4X zIU0$+w_5pB#5aWgn<8MwUVdv~AHEge z4h4b%o0*&-w%zxFuYFbA*zj|$^XG??)L;ojg>P=C{&q3aPA z`F2s?XR%BYuOm98eyPD@1iWwlZNAAkv+=-2L81p@_Gi`z#K}vJU^>-q*Gk1KY3SfW zrSdXP_L9ynjLkVTyJJmeYE)S6n*a5VS}Pyxmwkz#AR{&4F5=X3`t-~CLmO{|ikVPz z_AR=4W>v8gBVRjnfA|fE34mT7%h8~__t=oMShMF%NJ?~8+1?%bc*gy60Y>s^UaGoZ zSA1FxS!BaAI*(7NIWXm;Je+VC*ZbrLXTuX)d~R^4|Vy$~L= zpm{(0gqKfcnvD%TvE;mRS=Kg0-P`<*c!k%-0p1Ja&?pzT)mX$z z5$oOviTx7)PIl~C7snEpmg`r}m!1_CM(SA>?wzKa^u?&--DuE#y=?=nKBIL2D#YXOQHu#B##?E{#Y?n-fie9G6a}QAeI8P)D&YD>A^==-+02Z4huTP|CGAKynaU9V7?!LHNEUS zEO5V`$h*F)lt`;SJJJvrnQ z?L_;=;B@df3J^osJ*38JtcP|jF6;EqWWEy==M5|7apE41E(v3+gyb}KQ1u&bcv;nf;H#s9Qoi+9t1`GPjUbGOoP86nel;CF1S!-<-5MBt^j3jJiN@iabP)jR4cO{Wp(f@YY^;Uy&0q}ML&>e# zKt81R`wPW;99HS#EHCOMgQNGKBskVX4A~(Qek@X+U z4LNsu05;5vjz+ZbksLx&3;54mM7M%A7cSNYG7&y6fb*gIotGhiuF}aPKbvj3@J#Vj zAzIS^u&@0_$NBLqkj&U%qhbF0Pt)R7fHN^bK1RdMaB*wjn)q46ry+dc1Gz7k-!CTc zCfW+7z^VIjuLwD3OR$VGDe4^abXy(=k^(GG)4VS&tFo!V)x6Ab;*4oFT~ ztyll1lCn~c`I~Y8ZNMl%axH24n%dzuWp{tt?+NgYHmTOOiRes_B;-A}+t`w9$Lr#G zqjrbDye;;x97%SM$%a#wpZh8U^M;fUtFpc;3?YF%5R);4Ps zw?6|QvyYkY5ZTZULf}8=D%z_xIskf20mmM)+nf4pU03m)_8jRRQW^A$J*Hr-yh}M9 z3GMM%aCu~PU9j}X$JApn;2jEqLMvSPs)IV;VI1E}7sxI4;Tf!9_S|K~o$fo>{yE7` zvrdo#08{>ET&88-9^((~2pu9~(R1xddd)?BfbHS^m4L$gyV@=+li<8c*ayjnHt9WNkK?{F}R@? z=+7GFIaZgbAb+s{9ng%XLH`p!mHO12bM>DM7g~_3q_-F9+0Z66Ax|H%%VK@5*2ZD zxViZYh>HfGS~h|sfuNU`t{uRKs1YfMhyqjR*3C-l?M6l%zK{IE1~ss3{2G_+8*ruV zIXoZNpC+ExTY8gG#pZyX+#q2kQl?X)IQ;SZF&L1k2k(!h#SuOwd}%%v_^ODG0yYRr zN>CjO>Wfb|5aGY(?ZL>MXx^f=h@oG-o8{a|0=EG>VDQ*HPxChcV3i;09JQlo#DtWR z0$|A|UAEs{VT0F2>(u9LL+#NKM90P8FY-7QKuqky=qNn%u*12;b2-W*RF zVoWqctkLs=Xk3Qzq4*VwfPv0s^526XJ;|dTbDa3qC`HycPR`ddC^wYO8MaG2ZZqRc zaq9IUUz*u$2;9(5iRRfQ(2ytsBHqpo2gWd2KdDl>^8tt(K--m!3rmP8BzR*05;4v))JY_i@_XlRO>~#MHGomOG2-Lik zeSw06-4J8+K6~P>75?3vLxLm?&}GY%MH%_@7J6e;v|#$TpS-p8=IVm6|9){IB5^eP z%p3AgLt;Br2Op=b;ZR7Ckb7f|KrMf-#4;$(0od6DPQez-Cxp-B{gPM2P<(K*xG#CF zW2#HBCnSo!WH-qfvpwB5G>T>0ot~jc>?A3d&RdW%b1F*C+;SrFPRDTMYa+zRx_x7; z2g$IAVK?=iy0z5@7SpLN6UaaCxT<>L(SOrD@krqNd!S^WjE|$wxX2#AcSTu8(XrRI zDle=fxaq$eE@iIyk1^WdTgS=zB?A*Nz1EiDzL{CJ_E4*(KC`P8fPkgssJ&lTnmA7&fa8sdSRE!MgtSK>Nxam3lb zd}ibzQ8O@>!CN?2V*-t${N zK>cHR&GAbz{7PVG@IdrmMwpvDfVixnE&Loi-oS24(ZHwm|_kC{X7Vm_(g4=oylA92u* zlN`AOSf}Tdo0;DHLDvstpQZ~{Tm6&bh2cX!-yIS-@KtIX*)FT?UZU#;sZc41i?9CA zNR~!h>)~pH+Zk{*U{~9mI;p6DQWnIKc!@Wd$m1=faB*JaaDaM)|76F{=&2KK2OTfn zt>leHdQYR_>M>>J-xrtezN6W~rXXJ&5*={(LtiHG7@sh}f;Y+s9;P|k`^oXERi z;#6Ryrqn3H5b}(nW<=GNEAI3WleBzS=Y}&-Hw2<7Yc*i9q@Iv?0yun=ZhHmMgbGp0 ziNlWof5yIG9f}r}Rxh8q*ngkHb%^`%rX|%mBK=hZqgDky$9nf*e#%J79y#zS9?e;! zF^iyAB-=MHf$48^-Vn32#IND4Aqt@NPfgr?=Lsp2N^6%oNzTA-F!->q@(>tA8W`LF z)B_A4m=BhsYmq@v&TH)Q58k>G0v=uWnbKY{KtF8O5E*TW6<5TL*-3B{y5pkVOV7Hd zcBS@;hQyZ1;kAqGd+)jTyoZWJT^*6MwBm^0z?hVwk>t1PW)L#c3Md;0T=UHgDZ)bTnO*Ap$v_ z)Cu$T(Sj=HxPiJj7#wg;ddUqUS$U~;5iv_*4adgwE4{cGrj24GgARnV;goqCojOpf z9-FYSoxwP%lYPRNWT5V{ZCYWT;F?Zc)r-dvC8zK=;=O__AbSHBf@Ee%ZICEU)vBX;|_Jx!w+P|lyUX8Go>pbz<) zh;|P~$2*?C>PVt3P5%tH;rj$Zjj%?%<-+6zEgTe6xfMQlz;eZHXYi(F(^MZ3-F=;S zRos^tC!ci7x61|UMO;rxJV*2tAO!!|NkzcTWm1r=DH1}ueOE>oy=d@ZyqQkmy=onk&Q_RL;m$yt&sHI5< zv;rVEyP=p}e}uaXg`=h2qbz(X$jZw1R^*cI*YFkhn6&O@O2Y0<`f+Xs`>VkLCGUIv zzLJ~(V>Ie*DzWl)HI||&Uam_pv5#*B>@}gR4_nrXiVvjyVJ%zBj~Os9882BMm#_N! z8$45_b+xUIs!Ny;pv@WACkMZX@n0)2Q`wa%Dlu4HcCD&W|E}}sqr!tP+!uJ1ml=nL zgkK}Rd=m%IcO$pw-uLsI;7h;Z)9;RlmoIP^UGvRPX?}n7h{J7Td$7<_@SWuP*jLtN!)`DgU0;LzXFLze~4Mf{@NZ9eT~uYXqMr%cxo%R<-rodv!36#yCy4_aV$!?TV=T)xu9^xt0@pq7iNUatw*33Ly^o+q{;)&h-h!ItmO*eRyVZ#nh~Cd3H^Wd^r)&kANsS@XWDwwadknvu?nQaV>0~Ib1&%48# zuX+Mnh-T{mdE3P4BagiGHS)JhaU&1S2Bv3Uv)fw&+h0XtxLP4`CVQrAm#Hx#NE{A< zPBlE?p+*iiEZl)XzOLS(FnRk(t9~?oi2CCaf{?WKV*Hq!*}L zAE`3!pT8!NHhpZMr?1(NCL1F3hyjldwGbd+`HOJnA}}R+u76tN!XnK?({*=wgMjq- z9|?*(#tLUF*5I8tL%5@A`{~S?0_I=qFayRI?@}Yr{ZFi~H1N5K33#8JJp5x}tMEi_d$>yG+Mo29)`{3NO2UkF zIh%eTNoNs>)sd~l<_U-EM%Zitede^0%pY%6YEOPr znZe<*C2NK8;SNW8u~%3zgtVS?F@3S9!k_{M_ySbTij2$8-3ZJXa>S0OI_DkI`6Btb)Iv@l{GCWnfRLyvO4sRtcP| zMoh72g;S|rLI!Dbyp{SV@m+0crICdH9id@S<*pa+8SbOoh9CoD+v+=&e1rbbSrLG;a0vaUqva}=nAnWVwl=BEn@U#LSdaTfRN zd1ktD$B$e7uMTHV3b{eRE`Ge+y4YQVRkUCR{9INqc}um80;rusC{7MHXipu+ z*U$HJwRd`!{K1Jp-1bqc_anKZ#qUREz!uuzJP@JNfyf_={P621&T-=PPkinhq4s)+ zFMm?KKBQ#7#LX1j=(~6T`hWz#3!tmZ5YTK8qGqG^QJ#qYha$GW8a;U7-vy&fyMs~e z{X&fVb!_#6dM7lYZjZ(>S^v_yrIbAD-BrbW_|W}k-pT08ejCWI73||d&%mF^0e!3g zu7TX&w1vJg`_qSapjn8{&v5n9OrL z{vm-E4DEs~(|#$$dU0}Vyz6*>W1M!e1J0cFpEdy{cY~Wc+9bg-K|U*duQYHqySXqm z*&|x-7A~x#=tYp1LYpHWFTBYxp2V|XRRzjeExc^>cUPR5)xTx;S5yHmUIhz>XO!Sn zcu!H;=g(8NLAIzDzQ-O?Z3hLX*pNzJcmzkZ_0M1)v|+C&C^cHLH@@e zzmew>qjXva$eCgJW7EdTX@Q4K%JJb1;h#W?^S^2T0Y6d#eW=oFyp{x3cH~SB58r9B z6d$2;yD-^|p1k}ov&cR{nCatn+_uoR>u+r)|BRo^z$`AgKOAS)*8c+r^i<7wbuh`m zc#!hynD`*cAEYIfg0Y^&{#hfrvwdWy(5f6U+vFa?*ci8tp+p4pmG-@PgO0cXzN%Rr4igH zLGPuC-D$zd^>RjotWlw4dFmOPw^t8{pRp9cmak&wjZ_0yzbP)d8?iDoFe(3;f9!+Kc1kI_g-eR8w*!_0kf5gPZG>>P0{YpN*uKK-DPF)xz2F6Os`QTd{ z{Xb)l;(L$&3`ReXOnn#itb$vkvU6_CGhE!Ww;PzF&cyOPv>75p@fxT1oscKds2}EK zoOPV*TOiDb@kfFnP?L!3@gxiIIH6{3yfs{)yGV$Ohn*ATkuv6ErWf6&hF`UuG2%W3 zt&Thf{tfVleaC9-t+nZp;+u?tKX&K)x~{!~5f4 zae+q;HvONxux0S$TGidMmX^vK{8qEVurO%a2U;%!Up>0H{wbvOS#&hgTqL3+ER?;5 ziK_k;gT(I!ERu%%qtT*Uq zfiQxo9;PJz;C0O@!1<~M#5oJS6#qR=C|prqs`%qik2pw7CIwcp>Ej*SJ{1LFdX1%A z;N;qQwT0`oef9kb_k!Bi>*=37HMSw_F*Y>D^E_{u$? z*(3Ou){+Ux`?`0p%p4bXi3 zqc!BA%|K0o{dJD1v0!Dn>dD+aNke7yg0E4o$a2FqY_M}smrcq;rS_D8ic>DJ^me-vc_gCZUxXC$dX~?=Y~!cr38x3k-9zcw@&#Y?T+zwtlZDu zIY7eA_x^krQSH_L(eoX{kW|zL4LY%SE$*}pmK+H;V^QE5y<)q;gONyP< zqJj|aUoGWzp^!Y$d#2Hj`uOZ1J8DiiqV)Zvw$qmVifqO&j(Lc$90$nSU;8Li@&!`#ie3HDPJKI-O5MF1%D26W{J_Uat7ZGg2@}?amNRGZufEihy++pIUcr9yE`tDqGWm8Y1UDU zS^vspd7q8f&LhXCL0St9mTIkc=>kvxTlXwU;Zo*BcKC4&(t;~@^tdDl-)9k^Hxqi% zgzp6}HJIPsZRtg*YTR+wYQ4u!4y=5-_C<~b-^g2fYrJ*6%|_H8|J7PP!BJ_!)XDOr zW5$RrWLOXK#*hE@e|Y-pu&SHq?L(JzOCKpwx;s38NSArtQot>Sy=bqWV(=mrS#hJe6YVS}hB)L+wZxi10*;4G?;GPrL zds)^x+0p!tA5?u86rJL(Dy`T_v3G}ne*_|EnNgqMmsnOEy-z725QOknaKsK!#$zTw z`!9=}Px__a26F}p44}O@=YTYDh4O8LNY({{+#miixY?ju+J^BQT^b&i?+oOy& zuj)$)>TrFlZ-TcE5q(h?qnd^}t9$l8PuNx9p%vyI(D32+TsznAlb_$iVDZdg9HIBO zY)znoj+63!{=-!Sm)pts1xY3ykk)S?0OT+6Fv=p^I0v3*jTEq=*b;yx{K%6e|;fB_!pt$?nT>!Zp z$5S3l4@ftV+rH}hz7Oo*M}1|q#1JImZ9a38a1;H}0+BM0oAE&Bk|X>fKdroY!mPmo zxaF;m994R*&z(L`#Xe3XCoHd04K+8XI@^Qjd$G#P2cK%^gfU%L)$qtc_TES%yIBlX zTe|yeCVfoQ!N9M_5T?pav{ch0ey}Lx=OyI_NFM{`tWzYAaY(sBr!bS3Ix(?1LW>O? z=k) zGSk|(TJ^ITEoNi|1HWV-B7O9}zPX#6tt5QtZRE}S6&eS7TN%aq#UHU-5ox-4^93|K z?cX$4a(JU>hBQ%4BvNnCzz^%QUq-*P`E>E-tYkG!Tu5a9F?)aD%6E{5VcBj(TyZDE z#R@*v4;W@{nT&^Y;F@w%UA-_3voP$#rD!s~7ZvtA`@1K6B(J@l_$F@8dn85LcOlW` z2XABrVfe$J_t4(7OgD@xei8v2kz84ihiIgxxuCT98YS3Xht zaY4Hetn2vkg!rmz;CooK>zw*u{4VncsHgFDR^x$G_)IDCzh7Z=sW~} zNQmFY0kf*gmGu0?)o+YD6z6w842>i({1Wel>!dWDN2-*oIe{({!v8lRZflj096L^w zW8u;17JG@K*{{;Rp=Z;JL9EH(70?G&@P5v;5MK>B#U~y5Fe63VvQ0KZ8HFO?f@eDB z@G&78RUN;QtK44Tr)CX5r{TAxUxDJf9_4);9s)6=`yOQha9aGRe!(_B*G-9l?a*-X zvr*f!0j7h^7ELae%x$0k6+yi@SqhU;LcstPI z_+8p?uIWkm12dyjVDAU_5 z+2_K!c&2feoE9K#ruOd7`QJl>S1JS$d#;eq1sH6Nytw4d9HOX@{lf8@^IO)R5ryn< ztNAjjtU_@U0-N8s(zfM5!~VOSZY$irxZl~+oBiZs7MS`mCi6j2N+1OHget*)<+W9MHKr**m$r#u)jMipv+x`+j zJz!`7n2Ln?P0a%-M)J}$SYOzS&R|4lql2LO?dxqHZd54r*xwR+bepa=LCz@x-w{K7 zYvo$$%*5|7Ag6ffil5fEg%h55fG_&TsD_vkCTyM=g6w-I)2MFzN!s`5aGL7*ea_{` zm(S#);@_nylB9n#k+r^$D-u?|ZKK&q3KEAY#)HxmZ0*!Z0989J{oo|enrmPq)RwJ^ z`q0t0P{?J~K4#?rN#x#jwU$j2g6B~j&$&Kvcg5B z9tlF%XUy;i&;0DX6(qmkCwJ?(tT6m@<@F0AjiDhu9xPTY;wXnAaa%D)LH(Dkrdz4Mzy5Z>-0P&T9&DTM`(I?) z_(EFT1QW&ymlTprvwAOBPoht^5?0H{B{U$e4sA=Wv)cvMC~4pFM4cZvr!L?StxjGP zB_d`f&j<(AAznNQymk-9`(85Ja53#ZOACECzA|yNe%0a!r9`EE!1D9dR8>Dh^>78E zT57zJ$s0CaWy99*GpUXmTsICtyuyxqH>ykgllXuWoH6`62tkHACL*dvVEz-52?vO2 z_4zOIRsu5?TiRU|{0Zc=+HSOv9QuoZXB1PdjCWg9@eC#OGqvYWyZ|&Z$AGJ_4@OYswZ3l&fG+Mm6i%-b9 zWRKe#{+K?g?w;Naa)gKs$WlPBO=*&?G3I|xGhNN~>~m29H)=K+2?VhN z(kx9&wCUrrmNJg<5e-B4tGI&AO7t>$SiM1k_)hft_f% zW)wJOf-LUY85GLWG)bI3GL8z2iL4v=e%6OJUx&r&itCtiSFC7)9mumk5HxU0z(8z? zkMHK%Y+Q?F6x{99!$2|tVsk|^c4Ks#H$m3FO&uyVjRloPqY?EN zL0lNei2BdS1;lGKC(F>wkVZq_vhmxIg+FXjP-`#;i!F_5h=B0o&imHR#@Xwv#q^3h zO`wG)(rZogN{cveVWsu(axs#>Ghpec1sFABZqCZ_w!1)p4_LQ+qO-O_9EzBcXWGu@ z1~3@>i=F&h5ryIlOF>>e3+UcG9?UQ(UC`>>qR|W-hv}+%wf6*uXON4;%f%#EKS%s+ z*{Q$7mCN69vG86d?Cs3=p(q!RhA_)v^Xdg}Dq5Kv+YFU^2-V)JuyMnIEFtUgLUk%dF213j zz}dr-wL&z-Ur`4VGoPA<2EixnC3vX}L4QX}dVM!UPpOBCUSWD|0%b-P|B}fw_Ia9% zyNJPxz_U5dSysR~I3Y+r{Mj~3VN%zHzNWnEc4QshT3z2YNRHs~p^X=cRdNu| z8G`R_*Df`>wH~)L(ukN!qA!-2wT;r&7{;UPu4M*?dpQ`)K&Q;B4KIWbv+E?7M&hw*v<19MJLzZXhNq?l_t(U zo!{>2%?5EhE!p%&%<@OSk|_#k@F(o1)B%b?Z~JtYK*dzjrxC zjXrJnPs|_IBkOY7z+kVXpbxK@^l?%@iZ#PWHwYRTN=Yc&NBO&C# zwbUp@GP?CD?Rm<;T*o{i+}F9Y{y5Rm>ppATQ`c%SQn~inSLm)Fnl!x=WGa?cyEjiq zg4^N`k@O^JaebL1r%X~p-;#ZTgr(Las1T7S0=AUKPZe4u4IX6L2*n10e04I@OqGzn zu7^*aJMs*HCaVvBCm&Km2*hm#ubv>cXnN-S`dXd-rEj{r#{m5ITzN-7kJ`H|dWEit zf!QX_Z$E)YtJOcsrK$llqAvd#vWk(Bl*%V!A&H1jndnH&Dq9eUOcuwW#M{y9SA4~# zr#r#f)jvv`)qA>~NkBw>!;|g&Pd-rZ-hc8b7R(`*d1ySdxV2!(8Wqy5+o?>riUv`~L`vXzJTgPs|P!0==Z77vez$Zc?b zdrRZ}8+j~wT#a$6+c^WoI(7r`hWfngb^K*~=P7NCl}U;f5lA{aq^gsTu-^gE;{z=; zmL*T3VC}fE|HjPR0*9~%Pv8-LEFB~W-HgMR&bAIKJfzjDsbS$Od+aAeX8dork8}TB ze+S5Sj!2slb$~#WLNvG{OV^8sga;;1&TI_%Ym~aox7F#qu29Y&k$H91~t64H^5Y0dB2RglNoHj0zz5dgtiJ!M!u=N zyz+a2^86uZENh3M{qBcrQQc@a2qoli&OfxX%{ZWjKfFspK8}&XJ@vpr%|kx)M#%IN z96GQe=8Ub~b9o(Dm>*cIw)OD=Cs&}GWVYY>r_IrEzf=F5D)e)ADDJ(kj+S6_4!PUF zw=8*&b!0~uC)Ju9)ygNx?U_?@(yquVW#597o81I=HtH~t=wCUik~X!GwIjAhMV_^k zcb*HC4h<00GpadqhPTa*jXi$X1dqkPB>>RQ`$`4F6+qXB7sa_{38N2HpD7NRi_)Do)F8Z~Xf!TYP>sa5Q z`b^~4l?%ly-B1b17mMk(vLH;C<;}UuQZQ3eJ|)|c{QhPt{JC{U5&JOC&*^$e<+H2d zTU&Su=;#(jmDH=1Y;_hpaX~;x6Vk15a@wK7icPT++q{^Q? zk}2F~4zw**{bEPe-v5mO-$rdj6&{zhn;=Q5jVw`vtd#Ou^$Z#2q%+jur)^p^^@kjf9v-g<0s#~Q`-UvNZItnm8jK5+V9fa^Gtn3bs_IMBF?VR zd`ftSlVj!wVe53nX@n13ys&@DA}b9dmn$@1-E$2 z?wp)EWPhOeQx4y2-^H7$!#8a%jo>MD)IW?ETUT}Xw>CDAJ05*2T~gAj5-?khgK0QR z>;|3=s3P~IkW@J4T~1{sb&iHdnO}$|Vb4PuWUa???ENHrsz^$6tne-;>^|1g$>=_3 zg8IQ(wi187$Gr5zeFfhmFh$IYyz;|$Rj%~*lQX{Ou76)6 z*9wfy^sI<)f3+gfuly6V7mg`9xT(6w1lcwgYN|=f>%@xhZ^5V@tx@I1QqlSmC-i?Nhw~c!Wd7j$3q%PrP6l5zkn%ZtF7U7d>_7B2 zF#Fgy?7AAFl65l`LuI+MN2z?KIj~RWcY5w&$PlAt6RF=Z5-ya3%;2Yw={hZ%=|_Vk zuuw3oUhWWVy)PyS>orQ4aptqOK&(xL8zB;Or~q~AfoGk8rfZ4B^%2-v+9;{=Vk;>2 z5Lm%een~;Szw%(zn9C{(kAu>R&n+)+{{zf)x%wB)>_aJ|N$%lgtwJ6uT00qwKnK zVKlFHUuT$>YN#oUI(9{ive+|efM&ID``(QI<&pcYpCRlco{F@D$=*ay{m@Wn`AN7@ zCD>ZXWxE^i7P&RHaK_?9lzuMAKSU&xcS`698HN%hrkAIhlY!)}ZqK4v-}tWD)R&BR zRS?ZOE-rtr3%)&W#_1R;?6!gwCAH~56ga#&w%kcUNizq%MqT|vTJdnme-Mm3h@f^l z%7n6zQ9JqT+w=P;Eycqr3`>JU&h-jvu5Z~c`&ccGXgVjH`%83-hBt511Eg+9iHWJU z3np`gCN8{c==Xs>T1V>B$4q*8i>K=P6AV#`+62#7YcA4g;`qUI^{_y-l24J0Cref* zf47^@`d}Y_S2ZX5;%OBu%PSEF-)&&5Q$3-qtf}4o36}WDYzo^mW?{XoPV0NV&7g-e z`8`iS4k8l&w*1G0qPiMH#^S_Y>RBylWms`^@C<6ZJ~P4K_kH_f<3psN$Fqn!+=42? zOvD4_IgjN(HK@Q%c*>Cb=awYr)PoyD ziX+~}KiLHle$-lSea^?4tR;S+(*+is<6x0Lz=oCZC7$$a%T{ViQK1%#6B^@-e+&2M z3TJ;0U-(kCJEK}A`r&;4`PGMMYmx_i1jw6?TEn$G7=9)*3C=^6IXo^AnSMOW3{}1o zMGM?8M!`Ah>gWc+20i7&O{9kjxo?7JT1%gtOa5NU>3u=ZxyIq8p{%^^R?ouW50Dp$kMO_MQjkjqZ(1Z=n%j_!9UX3Mwo%uS*dC4A#=u}l zYLD6GyMuO%XL8RFlTbRB5%K@Ga`E)L*3DMc1Cgo%^kKI%-e#|(tHW%vY-}GR#kfMjPBjXi)3kJKj$oAU++#T4rqJA?wag@TGy+oueflnqBpGb$izl zvcVOKb7c(5K^aayaS7{&U$Husr{`=+z9b{e^@6CWUf{x)_ia`M&1{U(D-@uHognk-)$A)E?%iw}RRsBk4NT z3w#`?(CooF9w$q7-FMw|5p~exT!4dU{I~3(6In4zja!#5lVK1GL!ONyO-Jop*|Cz> zoUw*$`D{lj_?bNqbNOPOK;dB(RS}3-9Vs=Vn?zL%zFc+V- zHbJz9$2cwYzW8Vw&qVo^p@v~i|F#LQ-(pqp3&~%FS1}FT@*A@gPT%Af);x8x$diI= zd%OrEvB8>Tr=1oGo(KXpGh(W!_iwGDjnlVBG^(<&j;&g~K&CJVura;uRC#=A^ao^1 za7v%A=v``y1j!8QO&-%e#@PjDD`IRWMtVxsZO(o-xQYfC|EogM-AyAl6k*Tmygzjg zq;5C$4(;K;TST0tk7V^T6iI}(L?SMG2zIL>T!~WTUL&2<52`wEsZ=uIK(2F=48C^J-DIHK{7_!s~q&J9WB*6g0{_cOi3#oyoU3tAhyL@yRhzgeabR|T?sf<+ zUa#t&^XFJ!^dle}SZj~buepf-VPXdy1*aysb64{gIdUwBsduxS;XsgM()fuzD7`F* z@yc^0Ua}RJ(Vu(a)vfItT4b-A(mfwowvP=UydVnBUL1LVb^z83*iRe3%lmY)_gH(9 z7(?E^0|?O%D860rYGqeUIwMZ9z;MuR6Y9p3z_0e*;?U!>H)iGf2%Sv|CbL9Bv^BgJ zKdKr~rCf5QaNlCpOdl30w2HI>3D3~KU9LD8|GNqO5GVB_;uBn#dpr~ho0iOo5SpEr z6FZAG&Zs*21M?}M9L1z-5JOY~ZDi(*TK$^{fHk@yxF*lMWH=J$z{bTDSIY_%i@txN zYq$AvJN-~a(&W!R+IYEK;$24OFm)zVX=zJ(1KqbIfQG4(urvseVGL@HRH{2P$-{_9 zCFeru)AM6rMI>Up{KGcjkmC5bDY5f#uVnn7g10&r)F!+#SSvif0&C3}BaHAV2_3En zv990NNTF!^xBHd&Io*qw}8| z<6VYQy{{w`p5`>Yqpr_K3*Xy5!aTr8t+teH{t=jKvwX-$ zbi(FmhOQfBJ~Z_DE!@IB zgUXgu{CaG*;?@;IT>>ao8q_R43etueTlS1nuqtOnc*XdX8zn73P2<4G%u9aS%qw* zjAs(jNg4uA*Tef*t%pOdcy+662J2clP@vh0t1>+#Q!!o|8vC!`Q4{_PpPzc)3PU`bR)Y%f-hHc|RqvVhp}8X(;K2#Ab>u>2AUH&D#d$ z#W)bLJ77|~VFqs0W3%md$IKwOBo;;;xy|9{*F>kEfABNO-dH6ciKtxl7C)_QLZJM6 zQ-@~u?XR7|hJAd!bq{r(q7bgX16V-53II3mUJ{XzIG$he)A~=^++65J2Hwm4ib2aF zE{nVOoR1u%HzF-XGcE=0?EBeY1_Hd(Qo=a?o8Y1Yb=VvLd@nTq068bKzRV^rT`u+N zsm7`@CwAs@3;F5-27<`BHnNbO$(B=z}i3)(nrqsckOpcYvxza)i4G?D`L_nFvm;^@OWRu zCmDf=0snz35+s%{O{WR zZOfgdn6cZOE^R}`nZO}I5JFkF{flcwK??2{bJAEr?NOb-TN*PWQKNeEj^Yi6=&QON zsyCgFc=-8~J)9c7y+MZxUZCy4Ng@S)+iJm~l-nbY%;EB*>uC^zK8txWb(7sTmg2)5 zG{yzzjqt1%e_VJ1gL#{1P4O1Glg4ee-YPg52@r`1jvHLSD=&qI%h4{AG|(Dw>yE8R zwO?!VsnRMx=2;@? z98hqn1-99<^?8P=Sb^-|+{b6$9eisj*hYFl=#il^)6?S`O6xaS}cnpyHMJz&k z%yxeMeWGh>X!;1Y#t-=RQ!!178Z5DSi`WEkmvN;tqjY>hbH72s5*vX0Zhod^QSWvV z02WYXRZRy%_poYp`$cjn1zZ;VrdZ)2DfL{ip6R5c&$000DoRh z*I-!KI0Z-9Lwne#t@nOo{38PP*jbkkJjWpw&&;NL0=Ez$e>C2RuRAV6GaA{CoT*kd~+l*{^7aAg-N;o(nQ=h--XMQhisF5kTD2w_ex( z5UWl=DCi z-F;s@(TwHWo#|p7<%=Xa__7YO{F$qJdYL_hO8ofHEsuBHdAu}l1ien97#iu|J)S}f!o!L0)f*Cw zQh#&DtHh97FJ#qNOln>v<)^kk!@AJQVoCl?IO^+$*eYaLlWuhJ$N$f~$x%*4N~JOg zZUaL^kR_&hKWbS@{tWb&9hoFhtZVA z`?-%`Y(i9M<0HgJRE;^N*Q)oKVFE8Pn;pp0+$}9O(2(d223nw9_I!%w6+v z?c9hQ()c$8{1t{F#0ByFD76{m-vTi*i;1cZ#{sVwb3%W|TZpn=NZQSbf18y!# zZ|BV)sdETB6DM4>$6xPoL{~gIeHLpQ0^V~OnB&ua8}bd*fj}d&JcPeYXYo<^kKU%| zNT@Y`EGa43M}hQvF2tiO+h6wBE@&_zp;y?Apa~9tj0@y__p1rw0(XtQTZxyeYShk zUgZvXo_sJxzjFvzYLU}0Ez^|MfQfna<6Y6Aq6gn=heF34ef@ zy_3E7G+I^iZL+;{>#gU-78Gox5BdCpcIu!k2MSu#vU2X!IED+^y?EaKU2gb_ZFEHR zjfZKUPW38w$aD)MV0EOEo!YGt1F~e(&~eYs+Jzj8rg5g_ZL1KN{&Bta*^Ob-+V*$JjZ+iNUV#J|Vw9++tm+PGfApXH3It~ntQ#OT7CHt5KRtIM%ZT>CNmqcp~st-n-D z<2QjygQ`%2$icCgB&nyA^bHsGqArpVv9BhgPC<6D3J+$k+h{UNw$h}xscN+h?Fid_ z$sJK2_G7rBiO?5gLYzssQd}r63o=cC_M`aYv|L+hf{u<*AQQ5aoF`8Lyvtr#+~gW% zY8oc1SD*I8s-xb9%c&Kx&W?n-(?eLRkOo*J|jiYPAK+gaY)YXWO9x&RVn-jFQgE4={?AYC$a1)+9y9 z=jP})E@L5x{1IUe(;6`3^3fzN%no(8;WI1s1;MBOvq z)iwIYlTL+%9Bq!RCQOr$-zk32QPQN<#Jmg0pwQIp&c;IVErb`JB36%D4AlKUOQ`t! zXfaRf7Kr*7Vs}zP9BC7gI{lfmv2-inkQ%~Wq(TUD;K70KT&9u^q}FDdyyueGH_WU# z1YV+h{k_{>cHU+FH+l$j+85%lY5hYGHE%ECz$GlrJ8jx=6;FQ%34*qUy1KeBA-8VQ zzW~k|_Qq4EIU&ra_0s)Syx0w-u0HM`z` zY++)x%S3gD1&)mpvJ||lc~q_cev=#3j-2|jcM_ccq$Y#hDvNkE)uu_oNmKL6>TE2z z!0_9ao;-2iYlip1#pUcXWp~PE zysgop-Lh{pqyd&y6_L_u%wgmg-3!xkT$GL^pEC3qHBHmpqw@5&^ct3`;0#9&4u&ll zf{~@_UuyfQ@q@|e+Ec-M*UGTuFkI^?Kv$X?vMDOuL`<9-5zp|87;UBEq=SmwPnzyZ zG&IPJb79~v^2fY}&>(#5E%2B>dgxI^Pb&Ufh zk{Wu;UA42n^Hsm*D<_`no03u^Y|8t*VKIgFQL7(sAf=LrY8pAET4S2OQMAhe3kg8l zv40G4-a9)esmtFzATvj-56x>MYk%QB+5BFtJKnnL8@jTbnG+RFf-}vAH!3EH=bKEr z@wHgMxFg+%3lmWxK8$*~XN77tICAKZvpcS@Lf-OIMTi0Z7av)q z5iKM)KiAK7ahh{CsG93flVDVd@~8Z`MuH`;-_eau<;ln7B1)Jold7DW6IKa7ZTv!q zr&|9ZaNk5#FkbFsc!+0SA$wR~Q89UOY2K_Q6@0&IHcxdS`Qx2}iUURBgrI*(TpUj$ z;%4VZYElccPEBFDFaklRJ$?jIv=G}H0fB1ZN{uw0C=Vtzt}s36zY*>1c>2Yf#YL!h zlhdr>`GqOu!e$5e%@i$>#pPYS>K1n#se-|muNR*jFDOhIu{vrPC|HE-Kfb%9%kSoM ztpPrbtkn-}{LybK_4nVOujQ{$xmlffqqV(cT=uIIFnfxuEOmHVzg15PP&@CtuRc0) z=R0`R2nu5eps6B0u+KcChbOOm!0nP6l(PZ=9qm4?XHnPxGlYn=0%him27rJ7E`KFU z7|w{pH>hE6yMv)bTPj@pJ=H<^^@N6Z-%GeoGwY~TKsXlu8TK%&{U3dGUV6RT#PT2N zz6h!Ehoa*MU0=TYI2hmS`<(dNtf?6Rb!b!Z4K3eN!;ATn`!h$&ht(3s)EV{*u z;+HLegqhY+Vj)S5_I{OY&~8%7ueZ zue2md8&$KK5@Bis#4j~rPkc&$l-_>+mk`I2CTe)Jz0pq{oqPPS(!z>B-pOw9`s^>i z{lI$zHkf`wEBIqgDGVB|rL6Y>I{F>J4c0j!W$!c{PS@QM>?#ipq`Qv`+mb=$+R|&S z?&UR!fXlC5ILOzq=QkA3!6HD&8O@1`8|rVU#8C90;igY(I*B~FHY?;Y6My|w`Ks&%;qOQ8A&ywD}!tDXc? z`{BEimk*OwC`r+?-uVIWe|T)-CijXMVmo(bH{kis1k9}dWJc7IdS#z+{~VEi`*UQc zkGKoQ^ZT3nXm0Xo(EYC#>Nr>TDoaOaT8F9X!eW4r)QTvPtrSwfrRbdz;B22xf{yBs@vNs3#YHQSx^HdPCT_NFmKFco6J2X=O()d2 zm$lIvry{*y{Xie1a{iCv)b4+p*AuRm2T>^scTCou_)RJ|sOr2I%;BvErPe$dPC<<9G^I&c!d7CY75r zvrDp*3p`6{-XhSdGHLK>c$W+_f%6hd%$WABc9rt}G7?Vyu>ndD18?sBw(jm)*Fe_F zrT{fIz>hU$oK#%!dbJntv5Pp{@!!7s)mG%C?yR*BHoo$n_^p8vycqq=-VU=*ypDr^ z=(WoS=ObN2kcnQ_5;{v5tE$~RMp58liH5rjKj5Bjv$0jSRJ9(}K}+{AB0x1n0_pvs zvUsDa17PN6q0PqJNeW+lI&jq?d}&o==@qXZz)ut$K7_*IFV~@uMZ}#oW%*%mE%YC_ zMF0(3rrbpZu&CRclKv0k(H94GT%PL?hy~}7i>PhFR42&N|3vOtcwntzGRFE|;jU3> ziW+~IUwdIw>_rWsp@(DV+3`zPh!XG(YiYM!7SlO6Q&(*e79agrsTKY4L@bmP8E!%UY?6qdR9rx1L`zo=4^$ zOR9c6H;ql5p~4Vp^F{-`msAq+=L zc8r?@rkn5VZaqCyPdA~2`+)fOBKnYgyGPrT70p-j2$&+o$*Q-zUu8Wu?Yh$-p&%M| z?>SeSk8!T!?nen8cBqTQ~|(+YJiSsB}H?tGR;k?`+1DFrv*XG*^V(;W51$ zcDCnYoCe}PlwEHDzcJCNVRBnU*Y2Sc47Zt|0u(4;`9#uMXxmCIKwg)Z?zBnvx;7ya6zw=TepmvQgg4C%Vcjzd6g8# zgR9Rqs!80w1V)DRnJOU4&PY}-x>j&5m22VkNa@;H-24?4cECOS+FJG}`_EkqKf5M1 zXmi=X9-v5HV)*R2%E5-@4>~R>&C68+mz1A<$^vF_cRA9V zT-JZ4#(7An8joCI&Zoyy_uZ*aK?pPB^Kk|S}NKPqyvPkANJ45Qhh9W zDZ9wpXNQsa;MR0$$33lvTL~_^CYR`uZ1)o70;)-r27_ikUG?*lBB8H1IeFr)8i$0(450(sJ zX8pMWsM&AvIQ6QeCJ+U@fDH-%*MbC!D?rKW4>hs|!4pul->SJub1tU`qR%j}b?ZMT z5M5O~!Osa!|5$O(j-Eo~@z*5cMmQYO8~*%<`UuM+8r_sW_Zc@uMRXK;G)PRABVdK_ z=?&lQ9wb|8TBJM#Rt+kR8!|ccYROtHcR5FDady&89`$MgJ_fz7ej@DmzN=>b7on?i zeQPa_*!|63qLBJpzJl^d7I$F)?wH;2q}YXSERCXnf__LpRn;xzDFhI^PwC6@%|>%EE^f}7CYMnl*v$tNwAsUZ5Te5% z2=ejR+1X^N1p)OtEMgMoj{SPuprtwR(~P>>-7wIrdZ$+O8#4ThzWOO4!Y$)#@!x=G z$gK4C$DVsJywwhnV*chaYdB&0N-99jt$rlLMj|~+cg)rTwQak*7-zX zm^6DL$2+O$#M?56Hylr$8LTb%0ER4N^XQWuA{0#(m+u!86bW{*l@$S?9WLUcT++R* z;4FWVgd@vB(gz!zm*|J1De1qT%~+Cy0am95JmDixl-NcZZxI-&7KS#Utsqp`kX;AAo{&Q{2$GLly8s5GJ3rUtBIet$IQQ0#6#|)(ZYy~GfKn9Ho z<|$9;iqaf{wZ^o5n?H8w(F&B7hs~!2k8628hq7~r+Hk*TL#3U+BEOqF7pIUw7`3)w zV|KrRcgBK-l5fjzE)etZ0(gCU48JLPTN>9<2>cUuQ!ep=_0#*&Qvf}<8m>a!LYz9r`Sz=?slq%fwziMk&lw@6u~L%)aOwsR7t^^Q7j z>nq8oo*bazdPD$bpxF6BE0GYF@wkbuS@hNfE~m(~1I45XkAIH>z)k;l0lv4r0DvoJ zNAo*NK`k2YOTA;`5vjdAg49&8v9Q~B=-ype*z6tGuuIPGMjE4ibib(ePi zdsvCW8wQ#eSk*#qtNvzwSO|z$3Hs(%iO6`oBB+TFzaKE4#twRsVGV9gM!w$s^jxyCJk|I-@i{Ntq(52;=PmY zMd+Jagynr}CXgpn&5fUcoq_Kbx-2bneU1~L%ayWO2<^s22#0Jg9Ku>8JDrO@)~UX7 zCF!btq9jD+b$!My=GLE$z#S*;yB0fg?Hwc`a(`h(%u7rIN$jWhR5~gH0ox;J-M{e! z5>ciE^E%vpKf#>6vZ;&^H4Eg|b3J5296rIq?rO`q%lZ_Q^#&qaDV{m|?C`L}(a+bB zOv-g7?)oLihi&jO9zO>*Jl;Bv`$TfQH7oqcw)apOe%FH`o41SHUTD#UhQ0DpY85#g zT~ywG{{H!HD1J1%iSn4E#z(C_qI+LOZTj&|sDzj~LGy$O*KgB^Wr~wm%+Uzvf@t0T zPjP#!2ShZpbQH4t?*(gI_UXG~GHnAwOibMNk8dDjoT29!FvQ1Or<)jYQgPM6D<2uB zN*;jFf2C!HGVkdL9`@ZAx<88Bs(48>GWwk@z5bIb+%?25(*JnUopVP8Ij=cH&1YGPD@rBTHH%XAd<51 zv~JCiOG(?IwC2o%HJa5%s3Vwhg|hi*+XMChiukjCu)g7|mk627Eq5-81c$kjc>IZsxcg%7?B7zBhrGqf{Jm&0?Ns4PSF6j>A`t1Q1P#SMSpBg`2EE-9mxo|#;GcWt#9t)dw}QkgsSz|qJ!*HD_F zPhVA(wn6ZQ86wFz64zKSfdeaq{2 zG(i)#=@*W!PS98VZzRO!k)(Td z!hGz;K{63e4yt~y(pd z_+-3qPe`4`)!yMUj(o|I+y~RsY=9AhTO5)akeApu?4>ATg+I(VxObjhiBE-t%UZ6~ zM&^22VDU;RL+tszRlvK`u5CGapCNkYkvEDvF963s*ei-~|HC9|!|w^m$4I%$!2pu4 zov3oJ{XhFkMH>_7Z%xwG0CY@)JM`1$pdgWIgeF_@?V~WJq>k#RPCu|`cUX5{i--R- zZm^R;VrS+_u>85hs%yGv@dy^5cQU@3B$dGP3RYtFuVHk})j_aCPJGVx#yryV5iu~0 z?SH=vZ4R-BzMtMa!0EGi9B++4XfK=Q9<*k^*jw?Z}Ah`O7aP~W!RcQ_>`vXVZ;kZ}Zyo#t%g1`PbN-M9zX6d3iGDoBwJvB`> zZJ2rsNesoyTZo|XoDjnY-6qD$HPvL;fvQ7@FBB@?Y*drtD;9JF;d z{_QLQjQXQT>;6SbqdLx_YonV86ONnq1y7(w+;W{tHCH*GQvr0Vn&Sh#`j%7+PE$W< zDFdj-H7=Mc+38nYUW%|3>p_*Vc#v0ElfNyDY95s@@}tm#CBD;VcE3BJWcTL>k~rGz zDoW)S)nu)-j3btc1YaHi&j9{$vO)AtR7cnb9;acwRF;_)WFUU%Sv1Zl1)-1}K00!a zY=(Q@-|+_i@CPBqd%OLPnf^p12J0o4yjQfUjLUJSO3S;Sq4Pee1Cr_>28olx>LGSq`MoD2I*Eo zI#oJFO1e8mIs~P=yE~s*@8AE~m%LzSuWRPaiSPNGk3u$pB{l>gnPdVkh@2s8!xH4n z|3iO}_5hnWEGN@!DLaIyYRNrWCg3oDdLwi=2#!dJDF=XdN5H+%bYprQ>vZ@xJaLdyKbgsHpXmn=?9_S4^*^et^epEgagjI>U72sH6n%f?4FCdwqUUMWNyNs>o>=`j) zXD>~2SCpsx_sbr6u9t^vX>?$SIeudH%hG5l=!wB-CfFlRvfctGWBr;K2I@fl=6C<` z@Ais)h;B?QyVZH~voi~9?TAQIN*dSwovmbYf!6C6x>3l6GwG*Rps2aJlPWV+0)LE? zD8JPvEm~iDDMI(#McEsz0&~b5ela_DJ|p++;g&ls;tQjm17)sKFhf9@fyaVA_KI== zUQ#Xg+Y?ym7RK+yB$gyJN0lbG_^XHKI$ zI=S;1*W?KxHuHJ6UfMa0w-x)I5Q~%#0 zR#KM`;(CzQ_C5|8_p^(hSASP}Zywwe7+zJ0RN*sTrD!eTGV#6_p($0`9i^P&BR2YJeS^-Oc>h<^g{ai96;rrjuH z9khbIFV6%u{$0JdBf^c}0{^XoWXRE<6yRD?DV`q1ZkowEGayB7TwK-qu|jLY;E@dH zQ+JFv{Q%tvepr~v1&|U8oE?h^N5=V`5)h4l=;^8wBDnux2<_^IR6am(qs;7#{~u2B7O4TTQ*>e`#t85jEOmR z`<_5POpum_rut%fVsxZ$H-31vJr6L?d*-Wd?UE6C5aK9AtNp=z^=U_kN7n@O`5CZwV&N2tm;l7$gpO<$!yg>30872XWv1p8f*G zis)PvStyw}*CIl6IAPeustNd90dp zR(zt&h%pzGbd6CKr0{N+#P}^}JAXrMA0x*LF=_HKaZT$-m!0Yji$U)0Vv}2(+ z;X;7Aj=4v6)sTvf*5Etho*$|J==wLl-Cv~UDs~IJnzk3*%s(!Oz+b;Yg1ST8=vnW$ zC$e>u0Sco!Ot_PFJSQ+N=|4Q|0wHJoU9zOUbJ()luVpfS!>j{%YNgyYyaD!tu)g*^ zJn|JqBT^xJm?3ZluH#A(OW24pI2&3(4+^$Jk2WgAV3D*Ti{u74sx3!-OD)8+TL8p0 zE}NpKR|-s*vMfR5lkjWEOPBUiHUl5%GMt^~shCA(<9nZe$y+&qtSJR&_~)w#5C;-{ z_;kzYKF1j{GIF=hoWGot^EC{on0UAz>0@()S^*I!J79nXf@d_I5yFc?Hbg5ZpbU>b zrTcHU8l*7?UQyVyJ!$|*FIUVpZzKe{Z)m{!!~_K!QhaL%!egapXM$(cLC@&o$5T_& z(kN`uIQMO|bX8M!9T-5-^+PDvd*!ujCFwSI_l3W&avIVURJZ7Ym$zu8*D>ML;3*R& z-YIKj+cN&|h_*T5L-+zzYua&JfTj53_OpBm?Z8u!mrzvnOUjo8pCMv!*R%K7Gs3cz zZdJ;bNx4X)BPHHEdHE$6)T$z%4%~VjJ{)G5vNxCm2SVLR>rcO8&hhx;#gmS z4ZQKT?X%n%a7+xW%mm8m|1Agw4svT&D4zZ(g%6CDi`@Ng>DYGS*72ZvR^PzkZ`Xa> zElX?5)4ZV11^SmOUj}8lk#3ZBHZEtN71))8&#WOD+xw@A{GdJvKDl+1M3B^PDNf>@ z2i@a8&DzsNOqUYHHkE^H*_fN<1}kmSGg9JHdN{^Ez<(%<;8fMo(?x`u$qR<_%ALr9 zGq|CbZ)DG;APpbni_yd4Ho>Pi`LJNl(AtgyZ-yD$286v0E*I5p_8vsPa* zYq8_A2y`EaBnzhUO8>jh-s~x@Au+B?YM?VZ02xpri+KQ zENJ8%A6kyhXAwyT>rl{au+V0zOLI)k8i(AT^bNVj z_H+6e6IJgm*iAJv>HbzkCv&74^m;B>>Qg{z_Xv1B00_h2qedDP)))#%pWOkM%8%O` zqHj%ccD~ES8iTJ6@BqhhTq9eH-3H~4R$XTsvMEfWJmcWow|2(XXeZsUgLwAb70EN* z4&7uBq2}dKY_KEYv&k`0EvY_a^KBBH*=IcP18u#Snp#=8ppbFc|5^|KJcH|Di=y%; zyNvWi!4SOiCr3=DtMWa!7uY zIu6_T^Bd_?0r7_`sFo1;F}ZH(FGXvmq~SZdWGSz46SMi|(ZoSH?k1dpqGFdtYXiWp z!KV;imhv8lewfT_5;5n)2#^^maaBvxoUM;&gWNEGi5?}Ra4%3SQHO!xr@(s@gaLY1-)uTY5T zE%PGWu^8Qo|G$&`L4lUmjzB+O(Eu5^Da9K42?&8&zHXz7s8U z<&&}=wi(JFhX_41su#jEfPd}hxz zgO4_TsvXqgM|)oqn^%%20J~F&`iKL5w`^c`z#Fp3fvj?u2PJ#`it9W4(hKp;_yCnl zwSCI%g6$|H9zYUyMVD%Tql}l6#%>K}#DctBg9515b>?9z>|p6ScmX#BIDp#8)mmL$`JemG&WOMUL%NSs#5*1`7*$!qkWg(~=0#08^}GJA zzJR3Pd0l1d+TNxSrhH7&i2?sOG87!mT&Ev^m8e4b1d??1LzXFWfXaJAG^J}qN&SD9 zX-NF)uDj|+e4=R0(+&mx^UKKknM?8s<^mA7-VYvFqGnp6;i1OA`G{a`OwjQGjjEEx z9vfP&;v<1d{j<^gdwT_1FW5ru69iJi1&)05B1Liopo5D};%7xL?q} zWTCE$(5;BTdXKVIK3U)>^esAX$&A)Tua$i_CR9~cZRTsAh%)MG-tVFf3u_zxlV$^d z%9wb}O8C1EOj`)^W9vg|;`qd2jrZUEIXhrT452(UmR&LVrjj!8RAEwu#4+RQdo|+> zqSGrRl7RYM9hmJ`O2Ov+ot4?Y(hj<)YiKKZFWn)i$sg;HEEz%XSmJR512tMs8BObWS7u@t3pZ`*)izjM5J0*z~-Z;+;?Fnov zcyDS+cBU}3|cEOg*C?bAmE{TE->;Xy8E z*M4|HR&eFuA=3xRD$ky~amK>MBrQJN@{)U>?zH|S3YCBaMX{vx<1vs!WhG-EIj!Q= zIjI{{N{;0ce}^}ZJb;=TRz<`uWB+EcA_27Xt~e+I(x0@$&@gN~DJZT&0+XR0q~CUO z7*ddU!>`>Yv$~GMf&b{>ZI=hTG=Eiyksa#perIh`_##I_EJ{7w1ES?J-uZwS_qR2y zUh7%j{R@zE@Q6nC6gq@&72g*Q6ZfL>6O-aAuYQop4pMrZA?SpS^ADNQo*R)yLY}JA!Pv#DfQKmxcbwRV??-OC4MLQrk4NXQ^JO-egiqSy+3(siKT6b1hC zIWHIm^y6@ZEGotZ<@0+kR+e@aAil{>=3X*+w+#17(tvw6Z1hwQe%kv1RdO%p<<7aW z?8~fz(&8oGd>*c65qCjL=Hzf=KWP)p(Ya29Esn{_;nIastkhhA8@a8cl+#Rx$!3WZ zZgb`kFJ5PZV?@q=$Bm%)5x@oYU{`b#Gu_&Kfq)u{AoU+6>gFt;8)O#f0R?Kh_|KIY zdp{iCVDP&|Lt79lO)UJJx}L}BMTN|AK^LXcvYR!Z+V z;m~ld$!CRn2#Wz5K)`=|NCR=IGX@M$LIF=9%YxM!X(3vb8|!Czvb1?Ll&ka}7i1Nz zY_-FnhFz`1zZ}A0ccT*+a{T7`+qtk0Z9_t~<6=4}AEy+APNi1d#X#4zgH*?ga=v&t zHP&Inm#FzK%|qUQ|Iyl)5$NKN+qT!B!z}3H6Rr`ot}dkEbvQzQg4qzJriv+Gk|~(P zU^uNV37Y*z@%r@X;#TfTKL`ivL!7D1TK4{PVrKH9vQFk#T-T|OeRWj5#W-Ps2_o@P4#y>b4{G>yS>!m3b+Gt zI*D-Sg8@4oDHp^<`ENITU|I$BZ1LY#9d-iw&kFB+%Vz1h6M$fjHWYoP8=fm36mnH% zp|?_{^=vX%d*_=s1|9;-gDr`4t$i7gcAEBV=LIvc_hVH3IF3pBO=6tVuZanxjmSJ~Nc& zU&KOQep1GC2l7Xtq6A)D8pud+f6T({{kY7Zh(dSJLAl!U3uWd(^PDr}S+Z=s%fA!K zS$_d5ZgXhP^9NgqqM~A{---oxo<$7L*mFCa9<*i(N}GD^)4q7_xu|&Tf7sBRK4oMQ z_}~KpC9;p+^{%!XHc?bEtUZLVGaZyoDmh!T!5*G;*c1ellu<(XK5P?Vzn`lcacX2s zr|1j0=ZKS=;n-G?zn>R{3g6*D7_|pm@K=e7jth=XwV!CU;TOuBKb04`xKloM;wU_H z;1M{lb8lfxFO4HJ-@iAA)%di}V{O)+sAzKf;L-sn8M22Vvabyn{RG+m^Ci8}4W#6+2Cgg@!09#VByLJmF8766JS-4|j@qHQLRwq@@qakQ>?} zcJlDK4=Cq=R*&sB=*r!Y9TEfmTrbU;DBvoY!3m?z)qPtH87uM)KT);W7q&KA?B94~ zhDAev-m*DH;kG5V`#faG4CONm2FMv;=dukLZK!_)HF~C7qQtPXE~#Ns4Z8w1kl<*4 z@0UgOlS`SqvZnAIC@lH|2!jPIj+ai(5I5T%A#079P`ZMuNX%lP;7Dp7~b4uKuu zWpU!-6Q=lCF0I#97HODcWZvI)d)!#*n;lCDBcSG>uG>Q94`|vRt{I_wz_Ey51?HL} zQNFXWw$Da`B>#BD6)YHO@|hU41QNDF^6Fx!N_8ehDR&PHa_Z^(ImhIKGZD781@kY6 zRa2_fgCtikB~eI>{R}It_IO^+akO5Qq+M{Q93=Ch-r%m=LS?&~prDN9fSPokY$!OP zr%P6|aZWGY>CBf{GNP!LIk)Pg#LsAMwKF6OdH=nkrYi(6iOT$~CHsy$zgNrKtu3Ap z_F^AT^2AwYZ~Q)(OHpodASfO;^+5Xx;}aYavO#e0X3ipm$orGRPp?HLS2_C=j z=&;@BLDx_LN2v1oT*&2{=8C#mgf3(Pu9Uj7q5QShK{uC1bo{< zi$9E>da_2U3f8)w;c_9So&O5k^tXr96uI+yC0MPa8uENE9>WYv2`*CXS`=JCjErTG zkIA2zdYN@@@IbNG&|=9{Xja!{^07V{Qknmpx01Sq&;KneMpkvVk(`c+cWW1X+_!F7 zVx<|o%ogi@IRWJPnmkI$)+6HRR93-V5xc&-wy2gn^^P81FIdsuw`7Ba2RbA& zSz4);>tB~6)t6>la=7PqC>cF?-$$0*n4!ho$4zA;Y&wkW`BnVvpmCq?8IFQBT z!o9k}9`|lltkFKpt3@kM@dy}EX9&DJe3X;^gOEFKB{G5`0bz#{dnwuxU7$_P_S28V z5%qY2=}DpVqHujEl+H=e$%Zu6DOH7E0E*Q%4I$_^61k-k9;%e=-%EkLCL8rKBiiMz7D(=8j>(qfBYxj$>jZ#{VDtSg`V>IR`j_n&}ibHJGR91**~bhY}Ef< z$|F7mb;_Lk*9K=u8LDDjZ$5pTOQ^Rey%^D?GnW5e=5%PL2}_#+KLHY!`&^t_t^dev zyUk?2BKC5enNTUY+jhMFv@2e9Nc!(~E`(DYkf9y>sr*Vqdx9p43jGk(on<}hZv*1P zCnHl}e$H``rKgu(n6{RUxw8HVoQ`8X9!w(#H$9&{`$XZ0#v5dII47L2umLhCs*4+4 zarybdv;5^9?e6amJyJ|F79IE=lu`b_=4ZC{R{z zl>-2@;;*>dTpa&6xuKrWLgAly;~G#fEXd4-e^jEzL_BU|tr1hYs(;&kISmE;ptKT* zV~{SLaYZNh+UGprf6JCmRu|i_Vt53{=%r?n?6=>*2YWFNwM%egu?p{Q+Q_UB^)|P@ zUvPs-+%Gm~NActD0G&9nQp&+Yzr0+sE?d}86i%(>M=oN;g+|wreNL+rRE!ywYiBr? z!GJ}e=3yg@Bap&K`{yw#+u*D~iXI@G;uNqP{^Wf7sJWpfD^tniWodL&{d7b;N&thg z;JsD*4O1Y!ClGHpy=Qq)_a&OBD7R zCT@*q^oqqkQ{Na@4A_)F{<4tnk0*Hj@XSGLOlIB-l;_l88JfoV5CYG zYt01ZHEc(S@F9QCW^=L4rNPwE0+_Sgnk>5ol|uuJEZuJhn=b@k4m*i(#!Luj;9$^z zU~!u}M~}Fan7d;^BRnXEElws^xGKTi@wg9UAYdHZC&Fs)9*>>LDI_YhK8A^cfajZT z!{lpNy`?Z&0>SUt^uW)07I|dzFLca1Gam=TIDg8P6^o5ZP8I;4Y6%Rg*=YaC$jU&b zLCzMaCu0(N8oq_+sZYnX^)+NLAQ0-)M15GD#J=Rl3?IOuTd8d1h6d_5?P$F(qH^Y_ zjI{u)Az8so3OGk|uzG#8IJ<}d`-VPOK##AactgX^4l*87p$^2<*OFT0nhaYTMRs{= zXWHHk_g}+9aO*9eli^~7uy06(aANa}2#Xdu<5Wj|1FYm_XR^d?QGAj9&;uiY4zncr z$!m(5bvA4f^^P4~wQPvGzpeR?w7>qp!)DNm9%kEIWo7GT%Qc!+x}c47c$i;}&{ zOh_)L2k8HBnc@-cOGP3T@_vh8S0OMp!9%v%jf~&OhnH5*Q?Wht#sW*3szAFz{=;Ln ztn^2zuQLF7=Kpcmcykrn#dQ9%&Qjkv?G?;C8OWOdn>k61X9phOAc8A;;&D`uI{lo% zG}5>1j~8$`RaudJWJ=U@shSrkiB67{f9pbAX#M@I@byP%6WrL5*2}^o=T!lT*PE&J z7^=b;#;|@;q)dVVj$wWV0TQ|P-@CotU1Ry$i$K_|XJR<7L9c#n4MXZIv{M3vVe?<8(l}lnozDIGm{IztaHb3SH*1P%WaC_+LgG3hW zba=d)p^pV7;3}JLt=9TJ9b*A7N{is!1q~C5Ycjm|7uK3Y&D!@0@1lz{Il$ac4JGQ~ z!^jGM;~Y88@Ukf1Di3#O_cynzJEY07KFrK4ci#&^xmlrBUOJ!Vy--Wb-X`eIyXqep z5FG5dH_QLucD&^GsltN#;=PsNT$cZelci zrM`L_1OmS3-91P{x14?N{udN~Bh9zG%Ji3EeTtAsvtyuE$l|y-O4EJm(wzOQ9THow zyxEGSwN0Zu?+%@w;oT(0QgQd=cNW~s=rii4^S=;dxMLE$BPot{pxOGtq$xAw`D_UR z)l}nkWFbpPXS1_cMN?bT0Q*!G`V4n^!yQ7Gg`LJpQ^@^bQL_~ohe1z-=p zn~Gmxi_O{+kG=M3(B$mumGaPUcrfY@PYD00$tU(9aQaiJ_(}8FdFkrF7|}BcV_w^i zJTkaoh-cw`D*m_$%1xf%EeH#u@%uopAt3=V6POg@i;*ZF2yI%NqnZBp?7pfKF49-8 zbgWnNTtdUn__9k`DQU}Sk)By-4_-|Gzat(}HKR@&cLC9c=)z_12<^Ec9EXT!8h|n^ z7HWSHYu(slUh#c8*Ugoi-K#vSX4}F1zg*~weXU}Bms0RYOz(X2<4+%Y)6~L^jiI9a ziR(Im=n@0$7X`1bIi_1v9;^&rsAEE&0(XGkQ-s0_a}39aZDzqa;2Khf-#2^o_$q`= z>+h2^cdn=JhG5K&2ciueSL0prfKWH_?yHzP?KyYon#M znI&nCGK=$M8^q9mn9L@tuIlK5MEpn{y)AFOTg^IG)CB0G%E7(`og=_T8gap{E`vc*qqMayf(xgSd-I0z4P;h=H>&ndG^RehZ1D{fArZ5=X($Rqo?!qk*@jE+j|A7 zenax%;EO1jy?L0rKgh*(%f^|a77sBmh;J^Ru^0+&O%d$5(ucb(u{#v=x?RRs`p{Cs zVt9!dd@MCJ5=;N-by3GS7dMZ_L;$WnjC;cd&^K}ieo#I6jkqjIjUqnas7<+Pes5pQ-+D-ecooA z4m*rPEhJG_HGZ*aEW_yxqiI%qEVvUm23g7oBg$!moNq8_=Bs!b5x(Dzf8|SIn*wN8 zJ?zSLyrX|fpAFW&qn*8lU~4(JDZm8gERDZ3(*w4i!)+qCc9+vf5U|0h1!p&a(K}6n_g;YVaS(WpU!cAe{4Tp|=`1M1tFWXE z8k`~ug@JTsW2E0bv5_==bozgWP&4)0fGgxIrx0eWaO?uB1gFmjaS9D0`x^3oQT04T z!U=LyET~HP0Y7yRk5NLOmh#&H&Yg*@#F8d>)DAzv?9Bx0!}X?qn#z+#xm>SNUKMxe z>T7SA*^yTU*d6lCKTqL{ho}5K^w;1qT``8J zKP0!lcvAw4;7cdg$Hr>T7ZDMUj-b``^Pz=eq#`C#Lr1&Uu6XKY5p_FILH^=N8#OK% z6RU8aI8W6Q_&qsC4;(gw3OAiHZ?s)qh`fu=OMCC5tYI(L;fytvFzi#H?pm5=k`H6`Sx85eP!#FSGdQALN651)2#xcd#Ji7Tu zgdj%VkG6r3hxR+&Y)PuL$DD!uRE*C00in;eVbMnIsl=WQGMvJx_4PD09C$M;w(mn|m%@1G*OYuKy0 zk7U~W`f1sB@5itm^IxZu>bAy1YQ;ao%2uglZ?Y8rcG-a@v}IKwgReQ%3`C)HK!G^) zWv(o<;X4ba)g_f|BS63dP?##}Djxc>$)_yEs3s}IviAUHFHD9{+tAV3Il|sFK67n zyk5p??f zE~^$!ch??I)>xpin$8LIRsz~}rJU}wQaKj;j|$h-HyKT?Aam)G?wiqzx81z?cf=Q` z!;db0`A%)4%}&R-IaNVbSWoxcYoFNS!#*Cr%CGxhCQ_@J6R-6nZ5zouNI(h2C3Q4I z0Ao$v5wR6S6cqSASOy#qGHJPR8V@3dSSyv75QxO3g0zGtw37t}*Gnm#G`oV{@%M66Pu+SVjc><=chywiT!lRR>_o-iNJ9mZ84uu z*^6Wip(;^+=Pw3AaLPd|k?gU9nbZJ1hHv~**_L~fFxkyXm1?>8`xUuU>`uOU3ewG* z^1QA`50g=|He$dYtqSSI2XZ1P@+v<^^%CTIfnx@ft)dHNToxz)dA&WpN*&^gC7jmGBS}#pdyU;J0Vf1CV zaA~}9?~M3dP4CjWu|w9lw3)Y}jePu>>a`4Cmb$}^!|XIKkm~5b)Xaymnf5q=V(YI+ z&%b?>-V?KQFE<(SYiQ(_34-nHYrQfuXz5)nX~@q*N(LTou)3owB^cLL!xirFi!dJ3 zAtM8HUGdrl#8KIwwG~!X7}8&-m)1T7uOfi=&q-m!7XTxE{!m%)i?0}r{-ZWKTQwkt z?U{g*o=7>bvYFEevA)j{)&D8e_zil^uzRfZx0Do1Dxq;1Btj0xvik46M9vWy;GA5w z=Gm8-GqzOyBAj8x!q?{MJrebZ8S=MpmfK=)UYEOYQ#kUycOZa4fv345ZngeNez(pg zpon2RGegk&>p8;)0X?rrilm7(jsvJ$-eo-G4AqhxocQF? zoY#9c#(~iUe90jtpYR01OtT;Ey&^@ov#(GUJwGgC1Rul0Q7gWj=xM(|0ZZQD2TL{? zDS2Cuq)JKDc(h&H?YR^w^rUuL_!vv2I$+$UN^n|}5luowp19wgPlz{4B`G+<*E}mPUKT-FMf{TInInw1dX%KeQ`)xbuP%r^c zPOws6cW%OFk;P?gCkUc2n3`#u~!3_^^bWSAc31BPXJ& zenDE3Kh-c-AMm_Foa61%Z@W)Nb%dUe-|}$2je=~k*6{FftQ%r9-=RZ1KS0`c(E2fS zkR8r)qRxK_Z!1F30F7)&)Dc?cIPSmGh~aAKi+}H+Z1~Xz7)sL!zfD>F6}tZgqHK*L zy~O|BY#2Z6pn5+eHQ?|k}?7jze&UZtnxcp zrN~vGHykAPU2fh0X+zT#LcQ2&DRoD*(9=P^H6b`?RfH-KebVad&X?aypoWX2%SgMf z38Dqcm!eil(@(IVaj>qb*c1e8cL924Q$W$GiKSLzsL@z+%Kc0Hps29?db*>fgLbtB z{ZPVRpejz($KHAn7mV)HrJAC;ksFPHX(t{X?fR}aQ;NTnkX6xzWK0uX?CUg zpx+{}AC5ZgM*SFosTVBoZF5(Dbjr40Hn>h@btErwL{g$>zV?%TO!U9kLW>S^2Fac+33o zHR3pn-xR@^Za3RnxLh9zTt`=a8pLAQKse5W;=r9)B~3E_O+deH@bx?lAZNWI8!$I} z>)CTd8eFkNn_D*0s$HkJx5LeuSfHx4KVNHbfy=9Qa47g_0)|{{-vkyMJnF zd~h|wZUnap*pG0eiiGg4V$R!eJ2Io*7p{TkHU!lYbDZ`57^8J=2qx@xnz%7O(iY~T zKHQS|UTHnWI{F$3K)_F){&k;Wl2yzHeVnv^jho3V?}~o+HW<9X_Liv_%s9z3VY7cU z9cy}WIvQD9E8csEZ5Oji;4zw3oMXYW#D7y;gMc40xcYw19VVPb$K^xQJgN?X-rRYb z0%633HY<~64}xAK_4_cLuerI-GkFOSs^7{rj|vk^erx^X)_@CCTCM3kUOw;XIV^3fcCxkVlPksl>4-^L;PbVzFk0K$^uPuGQP|P?iFmgA$mBNGESAIAn>Fqo_=hJV88a3n z`EC*3q*6vNsV=XDadC@YMC>u=u-;ex;cJ#aXWVda&GWaFhV$ly!seClN9P1~V+HUgrfbMRwVoiA>!-Vko&!0aCU+x)ZQln=d zWs+XZHztr%A)_s@zHc_HmZVF^o9)_kczmP1m9lT9PFhtb$Xo;FEx6xxU2_OV92)Sd8BFS!=h#}=>L(Scb$E;b6J><`EO5U7*gZ2aT;NX7}B_mx{E zg)BB@sH7FsBPU~ai*n=8u~ilN9`oc=>S7RK4}nRqiy7wVZt?|QE6kjo_FYMPzkAal zv6MMO2}eE6c*?m`-PR|Y`q6-28%a2GafT)%>$mS8QA*892Lq*X9)!t<$x(m6u?ysy zj{FdCvXs$usm}YI2Xj8!wSn4v=JJ?G=NrSr0pDbK`%a_IHneWdcQqSpYwbQo7?TKx zKc9Oix0T&dRZ3C7i21A9>r|T;8Cd#~D*e&Cy&($mGGdl>ijbZ{6NDhXN~>>qyg$P_ ztvt0=9Q}_-AO29ey`>oK2}st(Ao{=@;6`-CY%VTfJ6Q9;wu6al6mSN8S?AAM3Ja>X z=n`MtlGA7p?FUYqQIVGM*U!)fOLzo2&HH-wh4y-`Crw7{dlu!V|@MnNrI0&+y@V?BoQ0yBbEAs@|pN0 zJL83Wo5Yj%zn1I%D*mWoCp_a*E==KA+Bhnx#re*x?PuN>`uBU?ikTl#sr-;Oa`qdU zhbP=i8}pybCrer?dN*wc4?9wue?&6}d+}h*k9`Vjx-2;rveQ|{bB!AV;@8oJbRP-X zJ$I^;$`a4p`&$t@6$v4Ag#`B zj8uGW2OBIn6f13y$%%wR+HlcKF{{1$kzcTQ(iX4RLJ~w3aKz1c>-nELhexTA(Gs5a zqJ0WJULN1I-Mhy6uzjxiN`$y!%vLU65IU(^wb+vTp|ec zKKbRm4hIgNzfq5i*SZp$(JE3N1T4Eyd|+<-_;2zjXhkIPh{TQ9p8Wbk*;x(SDn{8q ze&^mXNt6@H_%p|qvb8cZnys7>E8mYWxLi#(0wmeS4Ot$bgirjQ0L{(KpsA z@z0)hphWJ6y%*6u{Hvo>7o&@K+(d2X=xEm#ORM^(6~DEehxqDLsBgou>L?x_#q4%^ z2T?krOieSHn>z7us0>td4URA!5uX``u z3}i?crrixL(=s(j&2m`r>_hu2=xTZhp7?h#RMS!(%97jDC2iB1-yXUp9MXl8i6$+Q za+wh;WVeq{xIHT47e%BQwUOFF;Dp=M!{%n^L}_*~#>HWdkTYXgMY$}ywL*W-B-EHuV>aBI<_2p8e4!x0u^psIL zEj0GBM1Bfhxs|#2Jc)~Pv6Ua%WJTlJ$e4?S1^3&C<{=tSb>vh{)P*}B#s2&3IlVqb zIeiRnBiiwiP5nr|DJlXsmzm7s=}}V6SDvv^JCFL53{tL@ZUfW+6sCpDHvRE(c^}AO ztx~}=1nsXK9Sh}p_Xg_ID;9|}PpWZcJT2zeu|}ft3_+ch@{vBQRVsa6UqfKie-4-J z?H{I(&oACP=R94O?3WZ61&A^d-jlN(9TtRY<<#odSC-Wsrm=~oPAY1)@7MH+V$XJ^ zs?S|=h(=xZIeFa3>*I(sGRVW2KGzCJVo-A~GtpFWc#zI{+81@(%OZ*&e9c*agcSmY zq?jP(mVj&dsXu{#@94shD%zpnlG`(RzmfK!nt1Rjw__?v$t_amQWR|Hw#+2UK%G3# zkjrDS1iubZdMA_5HK{*Q^XZRnLe=kaATRhdfwqrve@vjZm36{{8@xqHqXy}5WLg-s z)5<9VK2LXdfBEr;ykOBqRD0(e8!6wH?u$*{dx*Pk;FiIsTa$@?zj$5Dnem$NAWtNR zFP(0OnMX6BDbIKRWDBKHvD?&ERcgTZK~UjtJOQF8BY^^uQx4cvOKA$nMIbe8a3l#^ zZ6`e7u73<)EGtH4TT+^3_&i`%7lx;LxW=2rt~`#h^-Xy2$vlm~R;}EyU4D#h3y=P7 zAg8>fffsGen&+!rEF>X5kIzP>Ef}OxpZarXEDZyy`cG+1;_8H&1?KJ{3Z~iLxr*d^ zaNOp^wi5#b!HG_Qzx`HOcJkngkN^q&)#83O>2&(~=u6ECizzvs$_RB%8 zzDdl%v4l&Fs2;$d4N6D}-*&AeTw}1>t_4&#?$|fcn$TtG3b~Xi_5O~4C@N@Hw6FWT zf2kIi7TiaGnKnDiF_z`lcW;z6;)&rKsgNd5_HbBVb%Cf%(JhXzKveOsAOhiD8AsyizHb8 zhJA4Cu#=*zNQHkUquQr%E0!?LCh%(JDjep4vo+v0I9`swL0E>QV+?Aoinyn4)Dj

@aHkkw+tC|G_JPj14=zP(FV$?r?nW(Kn!i0p=)h1x^(- zuMUJKXuyXBgnyTlhf%)Wo^`YdQ4z>7Y4hMEZ(u!(l_06TDl~a_%49wqv*0adBR@yU2w))8>r zG8lGTQY`^{xP?qiOepJw|CuKeZm&vW7b*9Qr##HVuB2Aezy=~Nf~xSQ&6d;K{u;lZ z>BPn-Oah6+2H<*}`{aZFIS$!`e@5j^0@<2ttjMDzXSunCiN+II(i3UvmqN~-(3Y)O zOy;h$?{x=+hwdwm5$C8$v3W0GKIgv2hs=KDJ=pXKmj7LlrddxKs$%4Qs_HK)6SVC_ zc&I0G_|{xpef^7yWal1@nU+_=sLHV@?PY@ zBsKS!)%DTk&rOK2hdQLzJ1+XT-6hk0m05b+lP_rWr+xQvBQLmFraMICxb*0y`H`%WgueRPaJ~b6!R)k#ntdg$xpU5cX6!dA{bJ5#bre zm|ydG=OUQKHfxiR!=~hR#{H&feOmwX?aro>+p8i6U)8Wscg*I+jl18#3|%f}XVbe| zAS<2cEHxO3fsS-`aDzO=iP*Nvsf83>I^XB&$5v~K;m|-wR#;1x3bLel-4h#0zIhcqqFmDZ zK%6epCdmX`xZB*y{1X~FJ6h!ILT?SClpjjrm&kY>OnGe_e>WT*JOG#4ivA{93dU%|tIlq%zz+0| zsC!RWR)+31t!U_Esj;C^<35&t{#^n6KRdq7t zn~|F5JLb};&N}CtuERLwEcY`v+6{SHHNLbtnS+$e{^68O-$-{FDZcrmLkeC!`q!6& zYXOaEGl9TH_T&ZUjF%o_s84VD+ZU+c0;gAHswiG={Rx_>x{p&yD%~$g8Z@cScTSSFLn+LZx-D+jS?~_Fu^?Yz=KS`MrGo zC7Uy3hIN3!DB-I5!t7(7UJZ4Sy3UxyO&Fo}gLDgH#6jClH3df2tk2s@hpESEPZI%_ z7&=w>u!AoV?)Z$9F8o+Uh3Tk}CnD_-ZGB=PCY#18RtV2bdH3uwY-vAdWuh#7k#$Ew|A=>X zV&BtFu2iH}H}d5R#y_804Ij<)nbp2J94_4NWMd_*{_4HCwqRR|wuV>TMbU@6=`DUm zw@A`O>KYr!;deSP5$XQ_>iX_*Hoy1b&|OqZlveFnwRcgYMu|OZOKYq3v1@CUwx|&! zLhY?;#NIk=q4uU!sJ(aad(7|k{_&oFJXf4^pL36M@8mf!WW{wNCj7YeeEkOcq3aDU zYRexvt7)y9G|4Bn-_ES0b3abshq-6W_=N&~#pe9xih@fPB>FBJjIiN86$$s`uc4)) zMCNRdRO@m_t9yU0$F8OO`Jf-qm}ZAP!qX*nf(AmTN-$B!l*~yS4|~Jlb43uG$b=!A zBCT4LwIAWbsH;y~!&*>g+YQkQZIMn4UdqtV`Tg(t+lC4mtdi+r{TVy9!2*`>}#m)l!ytkPPgy2@vBe18gH;*y1LiKkqUyg^OB}>6Z6CH=< zE43)pVg1krZU|Io94FOvu3qFbqDP->zw7U{4 z#pJq8+0D22*1P6sGIbiqyGF zI6#{T<5a{fhGjm@qy&km`Ji-E4d4S5Jsv<#z>HRd2CA5Yj}YK6jhcQB$qEu!Io36q zEjQV@E3|51p>4UGv(EY10&aAPRF`&|bd4f11J7hXsbv_N7P*pc6CbBs)_g|+2xZy( z(v1vr+?+Rzql;XbhT5_l*pKCeA`)acWV*((C;M*X8krqSPUK_E{z7rC2}AWF8U2&* zaZfVv#2MM{gg=Ld=C8xOV~a+Cw*~9#w-(vgSe&?eCq6uD4Q|HLIM7r8~_L`zXryAOtbmpE-yn~* zId%ii&rslXoJrr~AZKw)guaL7?=ID&=ufLbPW)b$f{P#H#1tJ<>N0C*pNljF&67D? zS)cieEP^l?x=jMnL9Zt4ao>`diTBd{s97LrEzmK(cMr%|MMA><3G0U1F(n!g3kmGl z*80DY#@Scf1H&s5U}Zf*W>wN%m77EI1AL~jqQ)q~AX6>*No+Z^+jJ~@*-L_fH-IGB zx=Bk9p?;w3dR*R;ATN6$ni?4bu-!+=PNoo9zVj8Jb3Fc$xR-l<^Q8iwxKC_xRrUD#1OW zACJXua@85_PHV#0w+`PF-u#{BT>Q8+VNv%c4iwQgYug)LrqK4?22;R$J;1Q6lJu)W zGy!K(+=KEI@Oy>w8lC~cXWgDq5r~?#pM(fU&!dWMAX#X@>Do6s3r8&g`MUdO3t>TwAub3m#+r?}Qrlj`UO!Ggq(oE+%x&=2PF=T%fepwBUOHcr0bIUB~_* zVKkp;1xtm0tOI@^7sJ7q78FFyX=mIGIv|s6@wz#HIJOAwqxjo-7CJIEKDUlnR{Qcr zUV>(>E0Ef?&mO0et&C4CvF?1gdB$e$T9L2yeuD@RbM{5TR(o+bRKUZXy5E zHR&V>ZAk}f2%?;5}29gbb9B9RT8t!+}`A>P! z-UTvb@ni8fVn=WzwuoB77PyCYrrjM+WFW29lNiX~0lH#c4cetJeD^N=Y- zR8U-qisCp4`r)2Ex3n!nOVPm1OmO9|WU@{BswZZGo=P!@aecbNR$}AHFlAV8Qa~?| z|0b;Tlxv~vu{D2hos>m-jjzaMrUo7YXcpI^ZW*m4?FRtx3^ycv|h zGfk=El&L>~k6a~Jy}KRY1^g&dc~#=vda9yXI% zUBrvFK~YcW+jkVk9`Dy$7nGLEe&5(}oe>KC>mN=eG`{dtqahZvP6%)G_2|Y-j4sCc z=qkzE-1$>tfttnYBb6dG3?%-3RT6;x9vX486imP)UwAlSXDb%P8)B$islUvPmMzOP zN9^V4X5keXhjOr{&+?L?Hg?3O_fJx|&?^l+95-Zlj6Dn&3p*L6r1sr`#Lh-UL8@LL zh$SJim>iu@-ZT4+IIhV%@{uo_S#SFrnN~HQ|J+P(*&dAnmT)JKqd@iLE9=1t^`opb zRr>NhS6f|MkWrz&?LK}%1O3=y)=oRLF(fm@1{Z?@2zg*}9_>1vts`;lq; zwBx;fGuz*Q8VD(?g+YA|Armvtf|()RKsKb%zm2>H=>%j&!q{_aEEYKl4no?+);}PH~^OL$wY?Vb|=eQBtvb6$d zAZc+CnOe8YCofsvKZ(YA6quxWtjtSgW5tQ|0&U$LMC@XV%#?&JU-~YW+(FFA>zS92 z`Ae{}|$KB#}0DCnd@}|Iy(O zr9R~rW?Tg+m@8X6r`0G81g9DhT$uBYT^8>+){Ay!j}YD3;T{|PQ~Y(VC05p?i;sOG zd-ukT{uEiN$2$U{IM7%_vBUbn@+my_<6!MTGmp-D3lM|2GHhtoG-|vZA#J;wxZ`0=-Nyjw zIwlh5Q9Y9G5?!wSps?kzL~j|KHZO`D>f>gMvdV5b8n&w0X}3*0 z#J5b<=k4e7{xNDx`^hJMAd~=E@E!sJUEy!cx%EMQzgmH@{L6~GA-g%s!?-~}hLY{h zP(iG0K*#A7G|h8(SE0x`R6A4pp=~Ep!wywhTitGjQe?N|Dr+)o@cEqbuQOTXDXUo! zE&$WI-fF|0lETN0^^j2&!QmPw>^a{W1eI_XdRk*;?mewuiwPo)eC4V&>Vf?=`tlyFMgq?m(8k`w?^6%hX6ztycQ)P ztxsCVsMbq8&=jGIZ-G*BJ^mJV%$@Vmmf+hfwwhFv{r`GPv5tGKi+gvK)-;7biNN z=$V`|5SW#DN)I?-m|r!F^aq2_iWR{cqrqxw(=ta?nO1+G#s4huQpfk<7W7c^R>MG_7fkebV;GT4VgFyJD*ZI=! zJaqj%qOLmcr`M$Q0UFrq|?o zJa~8aThci_<*bOI)x#(F2|LmyI`wFGGkwUTJocXbT*J6n@ltub{R#*qo5jh$NR>H7 zM;jUvHfK;cb&H`$ZIm?Po8!g@^pYrY+1qy7k&qQAhQ+S@$6qZ+MtlI1p{u1ne|unmBI$_=-9=#gHs+U8crNVSR4@O zWdXrHf+X%}J%gE~HIck3Bt&6r+4LYlS;w*L!OzURFVaN$E7?4W!xbI_1_|7jk1Qi~ zZ)lAxcE&6WROG$sr`fPVa^VA5a}aOescMX0OAh318D)URnxH;O@1|?U>4&m()6H!E z*WpMAIDhW3_pgS1bab{UZ^yvsZncc&;Sa6S1M~%_^6ZE?4(5@&#z(IA(r_!nd0WN^ z!z~9=hE)bOxJ77e9>B6CLW{q>w$cH@tzueW%Yg2%&KJL{oUmOL+L>+6!N?UF4)g;s zQJ9Os^aB8fi>%5=jr_I1 z61&AU@B1frJF{vaMkhYazHrLkKUVO!Ne%l-Sn>0`U|Nxd z1wowC!~pl|Hq_&uKe%aadZrH@8h#a&l-8)d^{PGSFN^f>fQd(&rWNWTV!*QPD!%5m zDz`9atW~h-YrZm{U-!i@Zm@(LSrMQH`B3*7aG-{)tiUBKrRL6J^}wf`kIGRa!lPci ziW*K78E1E7#7_R+pIF8cQ_esm!HH%888i8)&WXjSl8JaSYr1Rz#LJ{>{(*Tll`c8U zdE;y=d+}-{+3`l9GOZ;WL1M%@yb9enp>f8cRi>@d_QkK-j(az_Z8XyQSXr)tMxIRc zzyCRg?S#;@Y-VO`XVBfm1*t?2U~7Kn)riyxYc@s+V?^rI<+9_an8u|!^2BEXQ<#S7 zx68GM}yzI_=<=#*dYpi=B#{So^1*egd>%5}0Jm+T%zYUEZNTU&I^~;<^Ea ze@3LTKRxcqj&0k|)wN?khFdzE}AXI5RMkp6W2$ zd9HM?$;=wXvk!2=w+o0S5{;(e3Tp#EhY*_a^O1)|Y$%y(&swBB80Y?x)BUPyw23_H z^5N0@p{+Vb;!`ki#$NxQqleGnVG8^lCO`Zkz{LZGO0Asx^_o`bw;j1kE%>Nr*G97; z<5ybIKI6E@drwsF4sx)!pgYM$E0)O9RLMTDq0r-h4zNr0eae+Xak6QN?ftEXILwtU z60dg_*tA1bPJzlk8SuIH4#M2wwk&6K`U!~1&Qy0i^xA~@^vEAgAK^D!6Js`Z-GhK3*_&y$&R*Y(V}T96X~4lFeDl`i6kL7K zsdHqiQ6ZtmXZf!ba5H2+o*|S}tr@uxGE_JZPdG%TB|_oHj{1tTm4TgUtU}{xMkxQg z86O+=miEpEqHcY--DF8e6@%{8-wE|ye$Uqd^sBX1vaPYL9YwGFYtizuI;oA&keqJ1 z>i$?g%Djd7C;}4U6$(DXs}*L1XdYV~^tQ_mt`fBi6s6v{GTK%~<{(q8jnz_IoA@(5 z_D8er&iosRZqYOvXrRGso*}q?2fLWba?Fj7S5Ki>hjIe;K#oPf)}y-!2r64Gj+5^8 z@SN|UXB0HFqo&gbui&J6W_ zaNVmG@Twu9Gd*KXxxwRpD!ib7c*=VNkR>G|pnIpaxEp+YMlp!*z3*T5KIhC2+2t$e z(28o>uKc&tWm?xctcDdZy4~O-3HR~SDtXkqCJ?dBw&Q)j#V~A-8?zx`x13djEs!on z0@n0o?EjtOA|24w)DXd}heJDmn2b3rk)U4ji2z0g7)e-FP&oxw`LB%Yogigp9jEGc zhH}qY)UX(IW`AWq``biD$%g`pht)ZlB)&Fd0LIK>JojGIBRKh3QI*6 zh;Y1Nc|A}G?x8-QtVnn3^mAX6Bv1jN=TJ;#M@Now<~hq#B3&612a3Z#lpOg+v%PDy zSj{oL;ldh`Z4v}Re;Ys}s`e--2@g8-%$N2Fb?=`-!jF7V!o!de>E4cE?ha!ur2`TP zjDV3i8M4=JgYaJdJv>mzA?x4JQQyRI^(j(FUoQxssxz{V{6o^_K%pEdcP|NQ&su`C zhJu?Wgx|Oj5XPJmygupX?eKwpARLAew6s&=ExqD~p23C4T)Ve9k@Yjt@bTQ2Z^zlL zbGxaIg-VD8vWRUJ`}?|;aq*(-1yNdE?;0B3VV>?jYg?!Ai7585w@v#Nb#O(^Y3Vz< z3TB0hi9VyaU)~EC5l|W0GB`0+z>vLf272_;J2l6Bdt@QvLbE$@Sw{nqJ-Hivq{m#R zRojC3eE9a>(+)@HHK*+fU79?UzJCw zrZ~(#oO$1O`k+cf(3fg9Jf%}p(QTU%0u$1wWQ!YLi&3z9P#^l~^v1pF7TB?)Jm73O zJ)^G?P@LNpj3}&ZkdGPBljMJ@M05%q)FOfNGJI$gA)p9#Tkg@r!<|@E%Mtg9ovI^x zv5N$;MfX1Lsfk`W{C=uhO-~nH9~k=ENm(mzizsUhoPel1Ppg(GAeXbMOqX*iI>Uck z;cZP)H@G)r=%9aL(Lit?LVx{qq3?7usNiD-%e-4|?f1XEKF!$w5RKdE-`l|~A!2;i zuScBIbcc1#oJMK)WDW*X`(?WljW?{u;gnT_0R2wQgYUD`giGb8%yKM`0|j07RFs%8 zs=AHJX$K}91-vbSFPFZ8tz-1K{Z*ZsQIo*a5+U#Q)fK&&+5-1Zvg zJNuo;xnHvULm1(@$(3>;wkWaW{jhCe*@CB7wx_Q8Upirv-J+#Seja{p$8fdz4A0j) zUl{TA&HMj z1^b^Gk9DpU_Pu_PVj~Y4vGaK*?AGUVGZ?1ul=Vz{LDyAc$Z|YCPdx6iKg6nwFdR4$ zA>0Z6{(yvHMWLzxR5>DwBA1APa$L&U;bk*E-aLEqeR~f~`>ih&PX*`|)r98}@J((? znQButzL_oqdPKAw$DXfIolX@;se;ZI;YU71yY8q1ok02t4jU{dJ+rsg6nl#08Y)%0 z$DJ~?PHX0;tEh*VKxcOVbX^?i!yU5q-Ueakn#b}JRE7WAU=m(=DIQ87sO0V3`FK1Y zoBTAmz|I;BI%78I+Wq+-H)um^MSZEbXw!Q;f9`Tt>H~DK`7$>HOB1Lm)n40y2wvzT z@@u|lsEOmCvyf{qDr}wHUQ>%TVLaSB@>!ekfm~Ug1xnn`-G*TySglK0_I#KBZAUc! zGbS<}Xl+pP+YDz67+!5bwH7dV_Kvb=3e<=qV5GV~bR$NyCGJKJkz&fF$Y*dnE8oot!;%R=j<& zewqA3X*+i>hPJpN`g2tNmS<98tLTtt8U6Ho__15unA%OJcW`;nqY9TJy z*|I8dqsdahWL9PsK zARk&nCeVSdyF35mRYSSWE2FI2e3P$T>pt2R9~w1~`iB&RL(UGoDtF0#Vd~xhB>S<{ zJv*c6V$ zpJ!ZTn*)6AO!8}I-6Cz~ZNo6TcijnKJWy?Z-x+a!n+%n&T`V7QSt}2+QnR1Gj|gQ- zMpI&D1{8XVQvqeQ^3vxG8S#A0kGhq0!Y;r4>1!Bgu5%u})YHZ#JKKLS;3SCDHqBWl zWuGXzDsa5IKfFKl{pdH)N8jnzss~ZU4#=4GgM}n6Vuu2@OL~oPy7&BR!?$7zu-~zZ-1m;j z6)xR12Kr9zZa^{%Uqz}3bz)}a9!hu4AqGuWfCRgb6wc`?UjYdm>a+E$4<)>wC>7ee zy5%UiWH(_Ffr?rX-qwvwOI=ehgTvX;8mJmuO$If)T`O-q^d#H{#vtY8+4Jwt|9lqc zT-WBxoO#HtGmfsaO|>xtDv(|s=AL8}JB^9_MMXe-6UrRJzfkCR(!g-ey_X)BOt1cR zn&xu0+q$ysEncyliWJGv{@glWnsnE0B_Ev?XKw)ps^7jU5Yrzs@+*ws>q%I7GK5p! zovU+DHqak~2W1C>1Vm^gFMOP{Twy!$!qraVh`~A6I-drf;xJkN-C4aW+C8v)k6Z-Kyds`rMxc-TwRlboTEsNEH#z^{Bb-FoFT%GwOn_<6a^HC;bTWYcQ$P z>94gGx)8dg3)%IZ{;v?9<$4?U*1zcn-7k#i_6Z++`vUr-@MY?&`0x4{4*t_7kFh_e z`jtRj@w0P6WRwPCttpZvCNQF^BEBoqrZR!|YyzkD zg`kU1jaIFVkAzS-FFt|V(Gh?prn@%72qqZLZ3#ZZB_6W!{OlEnG2u;%v^|q)Z;v>K z1SI&|sV^q-UryBCv8~)GYYKjvA(4?Pd&O&N-%P!9PXFE6-t{U;7bf_V%SwjFil9qx zA;TP5cYO%p@Ml=}Xq|hgAW45*@v}9MarkHX=tsiygGz|H0h8ynZzT21j)dc=&;HFB zd4UcH!ZuVsU7PAEw7(|XB?Hlf-!u$unc*{yt9^b>c(T{k=yRd2{&iXa3sfsB_hhyF z6Q#h3RF5 zF^uEu=TD$wj$K*@+e2CcN)vHxk-bCCg)r&h(9Wr zB;*Jld$OHr{QUFKBW5Apr!W>41`h@*jM(rO%i2%pBBgTsQpf%Cd*IA-#D>ZKYH-BR z&2%Nh>f1=|qy7NWo9DiE`a4|l^Q}Tpd++Jg=xQnTo}x{jTCqsy&5EoSbuZ0*y>&4? z;KPOL5Cd$%nq9bz7IZj~SFUv_8pQ#0sYfxLK~o95WZCUMSgzR>-V~!>3Q+TkYPeJ& zqD5$WA84Aef?j|PzC4BR56Ve&!iGOLh0H@~Mt-2)>ZD|WXZ7)7X2|8t)m%W)oA6X= zbQ}Is1xIUJA5*;ekbc2nhi5p{pwq!A{^OTQddYsSz9nKHtT{}3W<9JUjgrVW3#mKK+py8W**=`Qh&Gia^%E~iu9O8wbkZ;BPPWKpt zUM}S!cP;_P-E+AU$Dux52T~~pdijLPXZ{$|N>fG8JwYfdH0QZOD2EgtqEq4orkP#zMo2wFDOtE&LVO?>;10%}mOhEE1$_ch1&X z|1aSARQYDKfsaJB;Z=A_8B|E2J577-O$5n;V;yD0;TVQ(xt3S0mv(L~j6hl5LHAq* z402swE>p-NMNVJ37iL&2Wa^MMblcwTa~hY!=V;5~78lpOW(lv1mi{Ww2&9d}PHxMK z3Xdx3QnfSv`v=QKneJS-bq^9$>f!jsgSXQ zDewox&B@Kh#wozY&8xx9EyTkk#3{hS$tlFiY2MVN{{Jknu`{tWbNl} MT1hHT;^n*l1F}rgmH+?% literal 0 HcmV?d00001 diff --git a/website/www/site/static/images/logos/powered-by/Wizeline.png b/website/www/site/static/images/logos/powered-by/Wizeline.png new file mode 100644 index 0000000000000000000000000000000000000000..16045d207da4e4bd924912ac97e7e1f18d83c5d8 GIT binary patch literal 6981 zcmb7J(mcAHF<097l+*3jl!M z|DT~GbK+A00Kgjsu(YOU=1G=coS~NAkW_cQL|T^eHz$hlRTBy-b}-n`RFQ~3noOH! z8Dedf*EM5O-lT0?5?f>)1>G{INSBM^)w1JML?ba1*8@rueNU<5+yv6gAxN!(>>;{f z^UXSk`K$YXLtd?WYkQIto$ZU}9yy&xp2*g{Y?o87)9kZ{&TN-3%xK)L5b~`2VP>{! zvW?qKFD*hGtjDL1l{*tS_tQ&h^8Wq{kH~?@WT}k5J5lVmtQdso@s(u&#^76b>nu;& zG$PlWh?Vo&r@tGC7dI8;ygwZ5Log7x$Vr^?wh!Pn_#`JGtJj}_fDf_1Tt@QBosr~; zg6sDOF1zQ|UzFuSp%kqGH(`ukOmmL?r1FyraRk7P7dmvFn^r5avLS0>wC~U;jHDVc zZ@)!kLWx;9kwxHBeteE>^wYAO5Z0kBUKQnlht{0R8n$9(y@t4aO;ggPfeTIy2SQI& zimdha+LlR39LC@?o=61t{)jAlo?(~`gY>&N79e_f#+X#vqm<7@p5{Z1cS$N9v;yF9Zkj{Ox+WY{)nI7twBv&S@E^hM2c$ zaA__zO}_>!H+sXR2<)w2zXY*L{HJW0y#9p4ATklk;ti!*kM9&A@x&M#V$yBi_-bxS;E441Z7tr4A0GJKfWW+%lJXCf+ghb>opAeq4BsRn zW{B3IvQRkV@GVk}_uPN-r?hwd2J741%nI7taS(XlGAYa6 zK`fV|YjV7#q?jwlehhsvlv=%;e5CcgnQmzoXLP97yJS&(C_dfq(DUi7^}v`n1P-PF zqfIYUQFu=1DAyk1XG^+95TXp0yo50JT24g;HLJw0qBxm1+F5%FuVftU4@XsN1f z_pux-rLPSW<%NBfvinDr*?h#q1++vN$Up=e1bfz{N2IGrxt*seSKl=D>=;!Ae_5_* z`(zMe9>v>a6q|zRM%r;`W9x@Ex_82s@r=Xab-UOMrhxGeX#Txxh4x`+`d~3wANL>wnj(}u?5%}^`AB)hhn$2xo}*`q|rNxb}$?# zmh5b-8F5w?Dc)jRq$fv8y&9XLoX>%)$7)9Hizh{I-GU!(g(&GF67?2uv47)_4W_iV z=3g%S`6KKnw_qL`N=T{eO>Av~JN=sarU60|Njwm+?f}N8UG)0|+~)|U|HI_?0xT#Y z87Lc^O|w$wc{PxN;kH)sWwUB_1|uqiH)EGW79S6vGVTK+icfM%*~Wb9SE*iD-RSUl zF-cYG=r7ZK5q3hultH8*MrQCxg(0Y@{1ce1S_Vh7!JkY}{kK$-8Qu zotkpXKuif#8C^Bi>AD6(8QzpR>1l1Z135xK7#jBS02Q#zgCiMp{AY3! zhW`7yY=OwlqgD|)Em1l>sRmP#E#NadurtvA<%>cD$?bu71k+riT}UUzL995D9&osfS9UfED~6g#Tb9dE&MCF>z2yM=_;IZL#L$ZqzXEcCJ0X> zSKcVyvsB^9XJsd$qxEO8sX?pzOb{f@*x{#*rZI;?6-*R>B$8YUgXl zsN#M9z+Qb!vXE4!n01Uhq5y+LOtRR^YqKzlc6vPB5R4ys@H)BDNCVVB0B5B~_0Ms{ z42*J{8;XS=LV00CA(Pr}^#sb=j1YocUJ8*sso(Q)y{tfmgWKVlDb)FE7}NCo?aF+c zneU{e(67XdMGouA-pZ6M?mPjpMNl{s_NbP{dO+bb>3 zn3|Cf`yIGDZ&jI3mvY_XOVtKMTAddk;X~c?SLzo6uH4dZ@$>CqD2SDpW7){oOzyEw z0(H~SI#Yce$JtxsR{5hzq%3xw0{CB@k`-|lL88$M+Vbr-6JKIK^jAXlATX~}c7?Mr z2XRUN+HmXFUm`NadXdUeHTH8JT}qR)=m&I^gAYy?0)D;{k?~_KFQ@;d-VD$jR;hEt zqOqOvd%u~MQH4rH4SELWHzg0H(t7}WcgZX(g@%*=ddf!r&~&{k?q^_*+8)`~tkqxp zy|89cEB&6bEu9`vPL1y1jO8V)M1n)K(#^jGCfTzY4p2NSdWfuPWs@Dm3I~YfU4}g( z%Cs2L1b#Z;sQ=j3Gq+|Pv5u~33=mqPO`(Wy+$m!O6JI7l60_e0T7^dtuq7l|_F~VE zEz_Cdb!g(CKVl>2*N&K`zg*D zscSUaDrAM`qH@>}EiOG5Rt)1wrOzt`T1AKP!Fk*Y;tDbVg-+dsmT!UwwRZNf zK!KQcPA97e*$UZheq)Pstj2r;FESa4RlGlB#)K`wy-{QLjBu4zFM@aeVg#sP_vt9J z|J`#==tUkj2R3pajV79%z4*DAk%xaWpC1k8UELG)f6BWVzgBUjYO@)!XN^#*S%#ON z*>>miwS4_0pi{esE8F@>i}#zI>Fz(J2jg-`5IXl2(_B5SK32yiVG9^M{%2i@rDFnB z#h1uw3u_D#N5p8o*QJd3(*#2O9v&q-d}&dEt)X)An>AFA6E{Tf)z9H!^|hRY85;u> zin|I!b!=$f`1S4Pfn~MOxd|GJr4FPAo*P%2vB3CdxnSTF5ORFq=|^yqGql`oUTcSq zIw>XKs}`S;X+oozSmOOLDGkHLAa6fsIsH1NG5R6Qd-{uPUKB+~D5Z}kQ`w4FaqDw?UL087P_|6d7c&n^_xS0@y#%$^X-{Y|B@DxxPnUl|2w)cS z&lki-Ncl>Fpjv0!ukr#V&Er0EmDh)F9zbP#mrr2H&_^ba%3Wg1^?S#J*TWRknVdxA zH*9hF1Ad$`{(_j@Yp>al@(5oz5nC?~fucUSqoWX-SClIkOLy0JtNtqSdP)1u(wlFT z8BPb|?~@_q&(vTzfGq^f8EF?=OL$~~4vU6jbvoN75XIUD@haE(muxH?#THd>^cd@o zxP7|sZuxIXxG1@lf-hnCbc7qvb!Q14C2iMv_IXmwN3i}3uwh&O zRv|ALoMw2=r==yFlmu4-xJ=d*zFj15=F?!TOWbdkQLgfZ^F|t{;9dw;y6ly)zh}vVSM4VV|;~*Bn+bb^kbJD?H5gz zH;~z)%sUY}nzK(sL}j(0CF4cnerey>?|qfpq}r8gcFi0$yHUO2M2mRmvx0L~j!js_ zE!?X9uUi_p>OqCZmfMH7-i*g>=No52(*@{1)9KL)`cxSm=(y=62P|vz`SM+#MSCBp zp+Zlct@pcm3b=W$!z%DS)r~z1Wxjm0kdi1Nd$}G7rtb9{os*hMoXuLiZg)_|pGF;0 zxlr#OG_qS_J6IbhF4=W?Gw5+^)I~d@=8Ymj!pNG)0(k;4$w$w!`1oF;+4)6g#t$n6 zc)(ypA_5=s`as5)S6**d{gspIg{u>7X(jxOcUe!1$Ea;2wJ@3~g^QjibK3)G;vN7Exa0ljUw^&+2XgQ$4Ca!XwBoQ;wp zx(-gH^{50j&tqrA7!&K*F*Fv=(#FeV?Qa$hR7DfHM@>RtxI*Z^2~DJaz`Tqjb(56N=*nne+Od(tQ+_cChxn7n9qxDpH-7nYHX#K5`753(a z`@8rfHT-bb6(L&Mi)!DfXCXyC20cw(*+=`fc{FHTSls^e4zFR#&vSVTuG3vSA7LlL zSZt-UEo#IDA&v6V4TG*`W~64t?3L33gSTsqrVN}~ZB0`pJQaxNLPIGO()jb!NO2x; zHVxa-z*Aba^ow^OBr%%p@*>%r1BOEDLmUQ0##2fKmYCrtmq9tI19db!Y2s%FggAEg zZse-`Rml96>-@M%D3oNbWeVht)b=MPAGu<^3#>+6X+P~SeEm83G?0x{LpFgzo)M%c z5>Qb3WnjwyQ+G}xc@z^*8kca}Dab(a)$j$Yuibm+CNR&wilhbT-Rm6Rb_#l^&++)} zW;EWSO;5)&g|>#LWpVlOLo-3dt38Xf?Ut~ZXG*XDgGrI|a3fwvv1nB>pZC-I-U+k` zKHU%o37y}y@t?AHoFi)p&RW}^L=Bg1k%!vt!J}{pgrQ7Nmf<05lvZJuN7|h+<}&p; zvyjcH z)+Px`rj;}QfvrE`d;TcrJ&KAat;okG@Uncqf-nBmX43*^ssKaX!lpZ4jSZXp{?7U) zXblKwZw;hh-2KX&T~4^QZ_OaRHju$A7%c5B(lm5DMOcUj-gZ)VSK#Z#34SLK`Hn+9 zSTpiu%ZvB(bmM_Ga_KR?y`=BTJ9ODQF8rcZ!%NhD@!QLR#y$JOp$4L+>}zCz$h6sa zE9_~^wO5On;_*hBr;?ki2!7fGI3|=o;a+pnca)H(b{65a+n0z4( zMm0SmDWKfsP4@d>B)+Z*iqcMu-R~go6)}DTy z?<8R%Do-1HIZ%Q8Jlf<>gpk#zl{-b5wEB}9Mu$dYiA<1)DPG+`p#5rDAqAK6pQ`Dk zq)NM?YLMeuou9;v@ADpT50fCqHN^^TX9R;b5($JuiR&*mpVNT5W4(X`TC*H+tW+fX zxJn~=o>HzAXMT!{nI`h{ymR*=<{Q26vkR<=lrDY?B?ec)l_-g>7O>^V50~N z)adqGher4!B9t2zXffV{{H#kNK1oYT@_BpIPt3l3S6(3m;g|HBYGpG*Q&H>H&*(+1 z=~b(8zHev;-nXZ_&C(s zJI0=SkdSLDrr}Bkvxp9qZU1;e{uDcdi12(3#GcxkxQ;V`V&fHBw=I}|?wW z=a!~d|3dizB`AK(B{|J=lC#AI)nDiJolbC&3byuE%|K5D*^H9P#tGJW0mOkYT$r4? zVXiCg6@{4!s-d{*gX*S#z~>7SsBTcZ2aQgsp6$)}PViu=;;JyA#1#s9wEH#ABZYR7-M?SkrvOIPP7^BVDh@6VG$%RV z_a+||9C>y=mP9PSASlCBqC!J77T>qF?i&X#JDvSF3N>5_<>{l6Xwb*VoZnqB9uP(; z4%`4<0lBz4NLn28u+*_IJqbO#${CqSmc+8E55t=Xhnpq32ZhgS=Tx+HY-{=_U-(Xo z>07GzeX_L-l(3lrHG}uw(v}MR;bpV-V615y6#iPRUeYt&Pl+(%zN9ZFB<&F=`EAjl za(EGxOm;GkILKn3liBG~7DQwT56&7NnAY>}CWX<0SKYY{*$dynqpdSF%VidJoA6HyvdL>+dZ2pDmfmn(4R&h&UPf39re+Nd>%T#< z>l;WZKeqyk-2rDw{5$#o>yj#HVkM%E_kp-soDS#GqyD{W)<^o9JU2YcSsrbWKQ*z+ z0Qi6uecL?_=`M?^hg<4pcPe-vi0@D=I+m0KVHqtw=nlaVfckuNixR0GB-&y}Hgg;n zi!BDw4lFWdF;*cRW~|1#I5%>XX=a7DQ1hM?LCzh3&o)(A7uoH2&SFFhrqI8}`~uyR z5Y}5sB>m)wl{kJ%-EY#XV;CUB1xrd1bZ55UGarl7vHVxC0k&9}in0cp7m7u%79I30 z%pXr}bZ-^octmf``5hwvSG*#SfXdA9SZxS3r)Ew?F8Gse75^?ia)OiR+}muF?TSSs zN|ee>QWkW#Wm|uI2l+jZShk(1M+{kAd|%StlqolZ-Nb4%~Z+;%YrQ|kk%oZ$N$P?P|7F0F1J zxf0e4Fx+8O3p*4M0Uv#LNw@(SQ|9%RB91fR0!*;n(r1H^vMaikXnWi& zcdI+x`&%xCB!=$>8|6+8D!jV%*H|^g2c^XM>5jka)CEDcZ)dyg*cC>)qkv=olSpYndZ#V46(Guf1U(W#9u`ov{A+EzPAizX$q!eRXd_#|`|Qibi}zL{ z1>zm`6*XXa!v+cVMLzDYvI^Ix?-DPiAzS8-l-oHk!Vae{ z=^TcTC^hsIrQ7ct*g8aZsr>7+I}Wn6dO(Z)*g42#oKSGIX3%hi2W-I-IG28vHxK{S zUq+%u&{ASG`mYmq$m8janj*PooYt!+fxFT$xy6>xE!`1b1UFoA*ibbnb6hr6a&f4k zZ^8BS&cli~m7JC=wxWZyXB=)1QD)9p_6US~bUGA!k5db35}BvkbUJo7Hqji>6)v^A zL}2{M75bF4(8IX&qAH61uB_?x(j&zaxHSF#hP#(SCIll`r%|e(afI@vt!)1(v{|7r1O0WO` literal 0 HcmV?d00001 From 8dd87491dcb2660f859f87b3336adb984c62c12d Mon Sep 17 00:00:00 2001 From: Robert Bradshaw Date: Fri, 21 Oct 2022 14:19:32 -0700 Subject: [PATCH 021/115] Avoid pickling unstable reference to moved proto classes. (#23739) CloudPickle notices that schema_pb2 (from the closure of the locally defined __reduce__ method) is not importable under its declared name (org.apache.beam...) and tries to pickle it (which fails due to proto classes themselves being unpicklable). This avoids that error by moving it out. --- sdks/python/apache_beam/typehints/schemas.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/sdks/python/apache_beam/typehints/schemas.py b/sdks/python/apache_beam/typehints/schemas.py index 4371ca2de0da..9f57700cccd6 100644 --- a/sdks/python/apache_beam/typehints/schemas.py +++ b/sdks/python/apache_beam/typehints/schemas.py @@ -463,12 +463,10 @@ def named_tuple_from_schema(self, schema: schema_pb2.Schema) -> type: # Define a reduce function, otherwise these types can't be pickled # (See BEAM-9574) - def __reduce__(self): - return ( - _hydrate_namedtuple_instance, - (schema.SerializeToString(), tuple(self))) - - setattr(user_type, '__reduce__', __reduce__) + setattr( + user_type, + '__reduce__', + _named_tuple_reduce_method(schema.SerializeToString())) self.schema_registry.add(user_type, schema) coders.registry.register_coder(user_type, coders.RowCoder) @@ -476,6 +474,13 @@ def __reduce__(self): return user_type +def _named_tuple_reduce_method(serialized_schema): + def __reduce__(self): + return _hydrate_namedtuple_instance, (serialized_schema, tuple(self)) + + return __reduce__ + + def _hydrate_namedtuple_instance(encoded_schema, values): return named_tuple_from_schema( proto_utils.parse_Bytes(encoded_schema, schema_pb2.Schema))(*values) From aadbe31e975e81508f8a8bc9765928ea9d93520f Mon Sep 17 00:00:00 2001 From: Robert Bradshaw Date: Fri, 21 Oct 2022 14:25:17 -0700 Subject: [PATCH 022/115] Unskip test_generated_class_pickle for cloudpickle. --- sdks/python/apache_beam/typehints/schemas_test.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/sdks/python/apache_beam/typehints/schemas_test.py b/sdks/python/apache_beam/typehints/schemas_test.py index 5d9434345cbc..831128afafdb 100644 --- a/sdks/python/apache_beam/typehints/schemas_test.py +++ b/sdks/python/apache_beam/typehints/schemas_test.py @@ -633,8 +633,10 @@ def test_generated_class_pickle_instance(self): self.assertEqual(instance, self.pickler.loads(self.pickler.dumps(instance))) - @unittest.skip("https://github.com/apache/beam/issues/22714") def test_generated_class_pickle(self): + if self.pickler in [pickle, dill]: + self.skipTest('https://github.com/apache/beam/issues/22714') + schema = schema_pb2.Schema( id="some-uuid", fields=[ From 0753d468812f02cb752f6a8129dbf4c271330782 Mon Sep 17 00:00:00 2001 From: Kenneth Knowles Date: Fri, 21 Oct 2022 15:09:42 -0700 Subject: [PATCH 023/115] Enable checkerframework by default --- .../groovy/org/apache/beam/gradle/BeamModulePlugin.groovy | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/buildSrc/src/main/groovy/org/apache/beam/gradle/BeamModulePlugin.groovy b/buildSrc/src/main/groovy/org/apache/beam/gradle/BeamModulePlugin.groovy index 1f1fe4589ffc..762ee7b3b3b2 100644 --- a/buildSrc/src/main/groovy/org/apache/beam/gradle/BeamModulePlugin.groovy +++ b/buildSrc/src/main/groovy/org/apache/beam/gradle/BeamModulePlugin.groovy @@ -981,10 +981,10 @@ class BeamModulePlugin implements Plugin { 'org.checkerframework.checker.nullness.NullnessChecker' ] - if (parseBooleanProperty(project, 'enableCheckerFramework') || project.jenkins.isCIBuild) { - skipCheckerFramework = false - } else { + if (!parseBooleanProperty(project, 'enableCheckerFramework') && !project.jenkins.isCIBuild) { skipCheckerFramework = true + } else { + skipCheckerFramework = false } // Always exclude checkerframework on tests. It's slow, and it often From 8f300b07399d5d868f90646de7b4cf31fd6c58f4 Mon Sep 17 00:00:00 2001 From: Robert Bradshaw Date: Fri, 21 Oct 2022 16:39:08 -0700 Subject: [PATCH 024/115] Allow local packages in requirements.txt dependency list. (#23684) We pull these out and install them with the extra local packages infrastructure. --- CHANGES.md | 9 ++++ .../apache_beam/runners/portability/stager.py | 26 ++++++++++- .../runners/portability/stager_test.py | 43 +++++++++++++++++++ .../sdks/python-pipeline-dependencies.md | 3 +- 4 files changed, 78 insertions(+), 3 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 1df3cb35b2bf..22cfe3f93d83 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -50,6 +50,15 @@ * ([#X](https://github.com/apache/beam/issues/X)). --> +# [2.44.0] - Unreleased + +## New Features / Improvements + +* Local packages can now be used as dependencies in the requirements.txt file, rather + than requiring them to be passed separately via the `--extra_package` option. + ([#23684](https://github.com/apache/beam/pull/23684)) + + # [2.43.0] - Unreleased ## Highlights diff --git a/sdks/python/apache_beam/runners/portability/stager.py b/sdks/python/apache_beam/runners/portability/stager.py index e06c71c917d2..abcef4679c20 100644 --- a/sdks/python/apache_beam/runners/portability/stager.py +++ b/sdks/python/apache_beam/runners/portability/stager.py @@ -224,9 +224,14 @@ def create_job_resources(options, # type: PipelineOptions 'The file %s cannot be found. It was specified in the ' '--requirements_file command line option.' % setup_options.requirements_file) + extra_packages, thinned_requirements_file = ( + Stager._extract_local_packages(setup_options.requirements_file)) + if extra_packages: + setup_options.extra_packages = ( + setup_options.extra_packages or []) + extra_packages resources.append( Stager._create_file_stage_to_artifact( - setup_options.requirements_file, REQUIREMENTS_FILE)) + thinned_requirements_file, REQUIREMENTS_FILE)) # Populate cache with packages from the requirement file option and # stage the files in the cache. if not use_beam_default_container: @@ -683,6 +688,25 @@ def _remove_dependency_from_requirements( return tmp_requirements_filename + @staticmethod + def _extract_local_packages(requirements_file): + local_deps = [] + pypi_deps = [] + with open(requirements_file, 'r') as fin: + for line in fin: + dep = line.strip() + if os.path.exists(dep): + local_deps.append(dep) + else: + pypi_deps.append(dep) + if local_deps: + with tempfile.NamedTemporaryFile(suffix='-requirements.txt', + delete=False) as fout: + fout.write('\n'.join(pypi_deps).encode('utf-8')) + return local_deps, fout.name + else: + return [], requirements_file + @staticmethod def _get_platform_for_default_sdk_container(): """ diff --git a/sdks/python/apache_beam/runners/portability/stager_test.py b/sdks/python/apache_beam/runners/portability/stager_test.py index b221bb1ec6f6..c1806c384941 100644 --- a/sdks/python/apache_beam/runners/portability/stager_test.py +++ b/sdks/python/apache_beam/runners/portability/stager_test.py @@ -832,6 +832,49 @@ def test_populate_requirements_cache_with_sdist(self): self.assertTrue('.tar.gz' in f) self.assertTrue('.whl' not in f) + def test_populate_requirements_cache_with_local_files(self): + staging_dir = self.make_temp_dir() + requirements_cache_dir = self.make_temp_dir() + source_dir = self.make_temp_dir() + pkg_dir = self.make_temp_dir() + + options = PipelineOptions() + self.update_options(options) + + options.view_as(SetupOptions).requirements_cache = requirements_cache_dir + options.view_as(SetupOptions).requirements_file = os.path.join( + source_dir, stager.REQUIREMENTS_FILE) + local_package = os.path.join(pkg_dir, 'local_package.tar.gz') + self.create_temp_file(local_package, 'local-package-content') + self.create_temp_file( + os.path.join(source_dir, stager.REQUIREMENTS_FILE), + '\n'.join(['fake_pypi', local_package])) + with mock.patch('apache_beam.runners.portability.stager_test' + '.stager.Stager._populate_requirements_cache', + staticmethod(self._populate_requitements_cache_fake)): + options.view_as(SetupOptions).requirements_cache_only_sources = True + resources = self.stager.create_and_stage_job_resources( + options, staging_location=staging_dir)[1] + + self.assertEqual( + sorted([ + stager.REQUIREMENTS_FILE, + stager.EXTRA_PACKAGES_FILE, + 'nothing.tar.gz', + 'local_package.tar.gz' + ]), + sorted(resources)) + + with open(os.path.join(staging_dir, stager.REQUIREMENTS_FILE)) as fin: + requirements_contents = fin.read() + self.assertIn('fake_pypi', requirements_contents) + self.assertNotIn('local_package', requirements_contents) + + with open(os.path.join(staging_dir, stager.EXTRA_PACKAGES_FILE)) as fin: + extra_packages_contents = fin.read() + self.assertNotIn('fake_pypi', extra_packages_contents) + self.assertIn('local_package', extra_packages_contents) + class TestStager(stager.Stager): def stage_artifact(self, local_path_to_artifact, artifact_name, sha256): diff --git a/website/www/site/content/en/documentation/sdks/python-pipeline-dependencies.md b/website/www/site/content/en/documentation/sdks/python-pipeline-dependencies.md index bf2e44e55866..330a8af8e449 100644 --- a/website/www/site/content/en/documentation/sdks/python-pipeline-dependencies.md +++ b/website/www/site/content/en/documentation/sdks/python-pipeline-dependencies.md @@ -36,7 +36,7 @@ If your pipeline uses public packages from the [Python Package Index](https://py This command creates a `requirements.txt` file that lists all packages that are installed on your machine, regardless of where they were installed from. -2. Edit the `requirements.txt` file and leave only the packages that were installed from PyPI and are used in the workflow source. Delete all packages that are not relevant to your code. +2. Edit the `requirements.txt` file and delete all packages that are not relevant to your code. 3. Run your pipeline with the following command-line option: @@ -44,7 +44,6 @@ If your pipeline uses public packages from the [Python Package Index](https://py The runner will use the `requirements.txt` file to install your additional dependencies onto the remote workers. -**Important:** Remote workers will install all packages listed in the `requirements.txt` file. Because of this, it's very important that you delete non-PyPI packages from the `requirements.txt` file, as stated in step 2. If you don't remove non-PyPI packages, the remote workers will fail when attempting to install packages from sources that are unknown to them. > **NOTE**: An alternative to `pip freeze` is to use a library like [pip-tools](https://github.com/jazzband/pip-tools) to compile all the dependencies required for the pipeline from a `--requirements_file`, where only top-level dependencies are mentioned. ## Custom Containers {#custom-containers} From 01da3fcb3e312ea0b5a62bd67b1b221074105a70 Mon Sep 17 00:00:00 2001 From: reuvenlax Date: Fri, 21 Oct 2022 19:40:56 -0700 Subject: [PATCH 025/115] Revert "Update BQIO to a single scheduled executor service reduce threads (#23234)" (#23793) This reverts commit 8e2431c0e55237af4bd00a9786e4c150e20d4e14. --- .../beam/sdk/options/ExecutorOptions.java | 59 ------------------- .../extensions/gcp/options/GcsOptions.java | 29 +++++---- .../io/gcp/bigquery/BigQueryServicesImpl.java | 27 --------- 3 files changed, 17 insertions(+), 98 deletions(-) delete mode 100644 sdks/java/core/src/main/java/org/apache/beam/sdk/options/ExecutorOptions.java diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/options/ExecutorOptions.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/options/ExecutorOptions.java deleted file mode 100644 index 2037d2174226..000000000000 --- a/sdks/java/core/src/main/java/org/apache/beam/sdk/options/ExecutorOptions.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.beam.sdk.options; - -import com.fasterxml.jackson.annotation.JsonIgnore; -import java.util.concurrent.ScheduledExecutorService; -import org.apache.beam.sdk.util.UnboundedScheduledExecutorService; - -/** - * Options for configuring the {@link ScheduledExecutorService} used throughout the Java runtime. - */ -public interface ExecutorOptions extends PipelineOptions { - - /** - * The {@link ScheduledExecutorService} instance to use to create threads, can be overridden to - * specify a {@link ScheduledExecutorService} that is compatible with the user's environment. If - * unset, the default is to create an {@link UnboundedScheduledExecutorService}. - */ - @JsonIgnore - @Description( - "The ScheduledExecutorService instance to use to create threads, can be overridden to specify " - + "a ScheduledExecutorService that is compatible with the user's environment. If unset, " - + "the default is to create an UnboundedScheduledExecutorService.") - @Default.InstanceFactory(ScheduledExecutorServiceFactory.class) - @Hidden - ScheduledExecutorService getScheduledExecutorService(); - - void setScheduledExecutorService(ScheduledExecutorService value); - - /** Returns the default {@link ScheduledExecutorService} to use within the Apache Beam SDK. */ - class ScheduledExecutorServiceFactory implements DefaultValueFactory { - @Override - public ScheduledExecutorService create(PipelineOptions options) { - /* The SDK requires an unbounded thread pool because a step may create X writers - * each requiring their own thread to perform the writes otherwise a writer may - * block causing deadlock for the step because the writers buffer is full. - * Also, the MapTaskExecutor launches the steps in reverse order and completes - * them in forward order thus requiring enough threads so that each step's writers - * can be active. - */ - return new UnboundedScheduledExecutorService(); - } - } -} diff --git a/sdks/java/extensions/google-cloud-platform-core/src/main/java/org/apache/beam/sdk/extensions/gcp/options/GcsOptions.java b/sdks/java/extensions/google-cloud-platform-core/src/main/java/org/apache/beam/sdk/extensions/gcp/options/GcsOptions.java index fea7be7f5c72..0b14b244da5e 100644 --- a/sdks/java/extensions/google-cloud-platform-core/src/main/java/org/apache/beam/sdk/extensions/gcp/options/GcsOptions.java +++ b/sdks/java/extensions/google-cloud-platform-core/src/main/java/org/apache/beam/sdk/extensions/gcp/options/GcsOptions.java @@ -29,10 +29,10 @@ import org.apache.beam.sdk.options.Default; import org.apache.beam.sdk.options.DefaultValueFactory; import org.apache.beam.sdk.options.Description; -import org.apache.beam.sdk.options.ExecutorOptions; import org.apache.beam.sdk.options.Hidden; import org.apache.beam.sdk.options.PipelineOptions; import org.apache.beam.sdk.util.InstanceBuilder; +import org.apache.beam.sdk.util.UnboundedScheduledExecutorService; import org.checkerframework.checker.nullness.qual.Nullable; /** Options used to configure Google Cloud Storage. */ @@ -48,22 +48,20 @@ public interface GcsOptions extends ApplicationNameOptions, GcpOptions, Pipeline /** * The ExecutorService instance to use to create threads, can be overridden to specify an - * ExecutorService that is compatible with the user's environment. If unset, the default is to use - * {@link ExecutorOptions#getScheduledExecutorService()}. - * - * @deprecated use {@link ExecutorOptions#getScheduledExecutorService()} instead + * ExecutorService that is compatible with the user's environment. If unset, the default is to + * create an ExecutorService with an unbounded number of threads; this is compatible with Google + * AppEngine. */ @JsonIgnore + @Description( + "The ExecutorService instance to use to create multiple threads. Can be overridden " + + "to specify an ExecutorService that is compatible with the user's environment. If unset, " + + "the default is to create an ExecutorService with an unbounded number of threads; this " + + "is compatible with Google AppEngine.") @Default.InstanceFactory(ExecutorServiceFactory.class) @Hidden - @Deprecated ExecutorService getExecutorService(); - /** - * @deprecated use {@link ExecutorOptions#setScheduledExecutorService} instead. If set, it may - * result in multiple ExecutorServices, and therefore thread pools, in the runtime. - */ - @Deprecated void setExecutorService(ExecutorService value); /** GCS endpoint to use. If unspecified, uses the default endpoint. */ @@ -134,7 +132,14 @@ public interface GcsOptions extends ApplicationNameOptions, GcpOptions, Pipeline class ExecutorServiceFactory implements DefaultValueFactory { @Override public ExecutorService create(PipelineOptions options) { - return options.as(ExecutorOptions.class).getScheduledExecutorService(); + /* The SDK requires an unbounded thread pool because a step may create X writers + * each requiring their own thread to perform the writes otherwise a writer may + * block causing deadlock for the step because the writers buffer is full. + * Also, the MapTaskExecutor launches the steps in reverse order and completes + * them in forward order thus requiring enough threads so that each step's writers + * can be active. + */ + return new UnboundedScheduledExecutorService(); } } diff --git a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryServicesImpl.java b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryServicesImpl.java index 9df75a5be943..7702538de1e3 100644 --- a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryServicesImpl.java +++ b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryServicesImpl.java @@ -29,7 +29,6 @@ import com.google.api.client.util.ExponentialBackOff; import com.google.api.client.util.Sleeper; import com.google.api.core.ApiFuture; -import com.google.api.gax.core.ExecutorProvider; import com.google.api.gax.core.FixedCredentialsProvider; import com.google.api.gax.rpc.ApiException; import com.google.api.gax.rpc.FixedHeaderProvider; @@ -106,7 +105,6 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.Executors; import java.util.concurrent.Future; -import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; @@ -122,7 +120,6 @@ import org.apache.beam.sdk.extensions.gcp.util.Transport; import org.apache.beam.sdk.metrics.Counter; import org.apache.beam.sdk.metrics.Metrics; -import org.apache.beam.sdk.options.ExecutorOptions; import org.apache.beam.sdk.options.PipelineOptions; import org.apache.beam.sdk.transforms.SerializableFunction; import org.apache.beam.sdk.util.FluentBackoff; @@ -1488,36 +1485,12 @@ private static BigQueryWriteClient newBigQueryWriteClient(BigQueryOptions option return BigQueryWriteClient.create( BigQueryWriteSettings.newBuilder() .setCredentialsProvider(() -> options.as(GcpOptions.class).getGcpCredential()) - .setBackgroundExecutorProvider(new OptionsExecutionProvider(options)) .build()); } catch (Exception e) { throw new RuntimeException(e); } } - /** - * OptionsExecutionProvider is a utility class used to wrap the Pipeline-wide {@link - * ScheduledExecutorService} into a supplier for the {@link BigQueryWriteClient}. - */ - private static class OptionsExecutionProvider implements ExecutorProvider { - - private final BigQueryOptions options; - - public OptionsExecutionProvider(BigQueryOptions options) { - this.options = options; - } - - @Override - public boolean shouldAutoClose() { - return false; - } - - @Override - public ScheduledExecutorService getExecutor() { - return options.as(ExecutorOptions.class).getScheduledExecutorService(); - } - } - public static CustomHttpErrors createBigQueryClientCustomErrors() { CustomHttpErrors.Builder builder = new CustomHttpErrors.Builder(); // 403 errors, to list tables, matching this URL: From 37763c99b8e6ccff88b3edafe668481100cdc68a Mon Sep 17 00:00:00 2001 From: Chamikara Jayalath Date: Fri, 21 Oct 2022 20:19:38 -0700 Subject: [PATCH 026/115] Updates Python test expansion service to use Cloud Pickle (#23786) * Updates Python test expansion service to use Cloud Pickle * Set the pipeline option at the correct location * Fix yapf --- .../runners/portability/expansion_service_test.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/sdks/python/apache_beam/runners/portability/expansion_service_test.py b/sdks/python/apache_beam/runners/portability/expansion_service_test.py index e99a7aa90c7c..7aa2e5f16e5b 100644 --- a/sdks/python/apache_beam/runners/portability/expansion_service_test.py +++ b/sdks/python/apache_beam/runners/portability/expansion_service_test.py @@ -376,6 +376,8 @@ def cleanup(unused_signum, unused_frame): def main(unused_argv): + # TODO: use the regular expansion service (expansion_service_main) instead of + # this custom service for testing. PyPIArtifactRegistry.register_artifact('beautifulsoup4', '>=4.9,<5.0') parser = argparse.ArgumentParser() parser.add_argument( @@ -388,8 +390,14 @@ def main(unused_argv): options.fully_qualified_name_glob): server = grpc.server(thread_pool_executor.shared_unbounded_instance()) expansion_servicer = expansion_service.ExpansionServiceServicer( - PipelineOptions( - ["--experiments", "beam_fn_api", "--sdk_location", "container"])) + PipelineOptions([ + "--experiments", + "beam_fn_api", + "--sdk_location", + "container", + "--pickle_library", + "cloudpickle" + ])) update_sklearn_model_dependency(expansion_servicer._default_environment) beam_expansion_api_pb2_grpc.add_ExpansionServiceServicer_to_server( expansion_servicer, server) From d38f577624eaf8b5f4e31fec43ca8cfba132a132 Mon Sep 17 00:00:00 2001 From: reuvenlax Date: Sat, 22 Oct 2022 00:09:20 -0700 Subject: [PATCH 027/115] Merge pull request #23795: Revert 23234: issue #23794 --- .../beam/sdk/options/ExecutorOptions.java | 59 +++++++++++++++++++ .../extensions/gcp/options/GcsOptions.java | 29 ++++----- 2 files changed, 71 insertions(+), 17 deletions(-) create mode 100644 sdks/java/core/src/main/java/org/apache/beam/sdk/options/ExecutorOptions.java diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/options/ExecutorOptions.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/options/ExecutorOptions.java new file mode 100644 index 000000000000..2037d2174226 --- /dev/null +++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/options/ExecutorOptions.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.beam.sdk.options; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import java.util.concurrent.ScheduledExecutorService; +import org.apache.beam.sdk.util.UnboundedScheduledExecutorService; + +/** + * Options for configuring the {@link ScheduledExecutorService} used throughout the Java runtime. + */ +public interface ExecutorOptions extends PipelineOptions { + + /** + * The {@link ScheduledExecutorService} instance to use to create threads, can be overridden to + * specify a {@link ScheduledExecutorService} that is compatible with the user's environment. If + * unset, the default is to create an {@link UnboundedScheduledExecutorService}. + */ + @JsonIgnore + @Description( + "The ScheduledExecutorService instance to use to create threads, can be overridden to specify " + + "a ScheduledExecutorService that is compatible with the user's environment. If unset, " + + "the default is to create an UnboundedScheduledExecutorService.") + @Default.InstanceFactory(ScheduledExecutorServiceFactory.class) + @Hidden + ScheduledExecutorService getScheduledExecutorService(); + + void setScheduledExecutorService(ScheduledExecutorService value); + + /** Returns the default {@link ScheduledExecutorService} to use within the Apache Beam SDK. */ + class ScheduledExecutorServiceFactory implements DefaultValueFactory { + @Override + public ScheduledExecutorService create(PipelineOptions options) { + /* The SDK requires an unbounded thread pool because a step may create X writers + * each requiring their own thread to perform the writes otherwise a writer may + * block causing deadlock for the step because the writers buffer is full. + * Also, the MapTaskExecutor launches the steps in reverse order and completes + * them in forward order thus requiring enough threads so that each step's writers + * can be active. + */ + return new UnboundedScheduledExecutorService(); + } + } +} diff --git a/sdks/java/extensions/google-cloud-platform-core/src/main/java/org/apache/beam/sdk/extensions/gcp/options/GcsOptions.java b/sdks/java/extensions/google-cloud-platform-core/src/main/java/org/apache/beam/sdk/extensions/gcp/options/GcsOptions.java index 0b14b244da5e..fea7be7f5c72 100644 --- a/sdks/java/extensions/google-cloud-platform-core/src/main/java/org/apache/beam/sdk/extensions/gcp/options/GcsOptions.java +++ b/sdks/java/extensions/google-cloud-platform-core/src/main/java/org/apache/beam/sdk/extensions/gcp/options/GcsOptions.java @@ -29,10 +29,10 @@ import org.apache.beam.sdk.options.Default; import org.apache.beam.sdk.options.DefaultValueFactory; import org.apache.beam.sdk.options.Description; +import org.apache.beam.sdk.options.ExecutorOptions; import org.apache.beam.sdk.options.Hidden; import org.apache.beam.sdk.options.PipelineOptions; import org.apache.beam.sdk.util.InstanceBuilder; -import org.apache.beam.sdk.util.UnboundedScheduledExecutorService; import org.checkerframework.checker.nullness.qual.Nullable; /** Options used to configure Google Cloud Storage. */ @@ -48,20 +48,22 @@ public interface GcsOptions extends ApplicationNameOptions, GcpOptions, Pipeline /** * The ExecutorService instance to use to create threads, can be overridden to specify an - * ExecutorService that is compatible with the user's environment. If unset, the default is to - * create an ExecutorService with an unbounded number of threads; this is compatible with Google - * AppEngine. + * ExecutorService that is compatible with the user's environment. If unset, the default is to use + * {@link ExecutorOptions#getScheduledExecutorService()}. + * + * @deprecated use {@link ExecutorOptions#getScheduledExecutorService()} instead */ @JsonIgnore - @Description( - "The ExecutorService instance to use to create multiple threads. Can be overridden " - + "to specify an ExecutorService that is compatible with the user's environment. If unset, " - + "the default is to create an ExecutorService with an unbounded number of threads; this " - + "is compatible with Google AppEngine.") @Default.InstanceFactory(ExecutorServiceFactory.class) @Hidden + @Deprecated ExecutorService getExecutorService(); + /** + * @deprecated use {@link ExecutorOptions#setScheduledExecutorService} instead. If set, it may + * result in multiple ExecutorServices, and therefore thread pools, in the runtime. + */ + @Deprecated void setExecutorService(ExecutorService value); /** GCS endpoint to use. If unspecified, uses the default endpoint. */ @@ -132,14 +134,7 @@ public interface GcsOptions extends ApplicationNameOptions, GcpOptions, Pipeline class ExecutorServiceFactory implements DefaultValueFactory { @Override public ExecutorService create(PipelineOptions options) { - /* The SDK requires an unbounded thread pool because a step may create X writers - * each requiring their own thread to perform the writes otherwise a writer may - * block causing deadlock for the step because the writers buffer is full. - * Also, the MapTaskExecutor launches the steps in reverse order and completes - * them in forward order thus requiring enough threads so that each step's writers - * can be active. - */ - return new UnboundedScheduledExecutorService(); + return options.as(ExecutorOptions.class).getScheduledExecutorService(); } } From 8df6f67c65b4888c45c31e088fb463972c4ec76b Mon Sep 17 00:00:00 2001 From: reuvenlax Date: Sat, 22 Oct 2022 10:37:18 -0700 Subject: [PATCH 028/115] Merge pull request #23556: Forward failed storage-api row inserts to the failedStorageApiInserts PCollection addresses #23628 --- .../beam/gradle/BeamModulePlugin.groovy | 2 +- .../sdk/io/gcp/bigquery/BigQueryOptions.java | 6 + .../sdk/io/gcp/bigquery/StorageApiLoads.java | 100 ++--- .../StorageApiWriteRecordsInconsistent.java | 50 ++- .../StorageApiWriteUnshardedRecords.java | 277 +++++++++++--- .../StorageApiWritesShardedRecords.java | 342 ++++++++++++------ .../sdk/io/gcp/testing/BigqueryClient.java | 4 +- .../io/gcp/testing/FakeDatasetService.java | 32 +- .../io/gcp/bigquery/BigQueryIOWriteTest.java | 21 +- .../gcp/bigquery/BigQueryNestedRecordsIT.java | 5 +- .../bigquery/StorageApiSinkFailedRowsIT.java | 266 ++++++++++++++ .../bigquery/TableRowToStorageApiProtoIT.java | 8 +- 12 files changed, 865 insertions(+), 248 deletions(-) create mode 100644 sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/bigquery/StorageApiSinkFailedRowsIT.java diff --git a/buildSrc/src/main/groovy/org/apache/beam/gradle/BeamModulePlugin.groovy b/buildSrc/src/main/groovy/org/apache/beam/gradle/BeamModulePlugin.groovy index 1f1fe4589ffc..7f6ac755d6b6 100644 --- a/buildSrc/src/main/groovy/org/apache/beam/gradle/BeamModulePlugin.groovy +++ b/buildSrc/src/main/groovy/org/apache/beam/gradle/BeamModulePlugin.groovy @@ -603,7 +603,7 @@ class BeamModulePlugin implements Plugin { google_cloud_pubsub : "com.google.cloud:google-cloud-pubsub", // google_cloud_platform_libraries_bom sets version google_cloud_pubsublite : "com.google.cloud:google-cloud-pubsublite", // google_cloud_platform_libraries_bom sets version // The GCP Libraries BOM dashboard shows the versions set by the BOM: - // https://storage.googleapis.com/cloud-opensource-java-dashboard/com.google.cloud/libraries-bom/25.2.0/artifact_details.html + // https://storage.googleapis.com/cloud-opensource-java-dashboard/com.google.cloud/libraries-bom/26.1.3/artifact_details.html // Update libraries-bom version on sdks/java/container/license_scripts/dep_urls_java.yaml google_cloud_platform_libraries_bom : "com.google.cloud:libraries-bom:26.1.3", google_cloud_spanner : "com.google.cloud:google-cloud-spanner", // google_cloud_platform_libraries_bom sets version diff --git a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryOptions.java b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryOptions.java index 953d1237d9c9..53cb27136412 100644 --- a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryOptions.java +++ b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryOptions.java @@ -150,4 +150,10 @@ public interface BigQueryOptions Integer getStorageApiAppendThresholdRecordCount(); void setStorageApiAppendThresholdRecordCount(Integer value); + + @Description("Maximum request size allowed by the storage write API. ") + @Default.Long(10 * 1000 * 1000) + Long getStorageWriteApiMaxRequestSize(); + + void setStorageWriteApiMaxRequestSize(Long value); } diff --git a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/StorageApiLoads.java b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/StorageApiLoads.java index e48b9a196902..20ab251c9c0c 100644 --- a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/StorageApiLoads.java +++ b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/StorageApiLoads.java @@ -24,6 +24,7 @@ import org.apache.beam.sdk.io.gcp.bigquery.BigQueryIO.Write.CreateDisposition; import org.apache.beam.sdk.schemas.NoSuchSchemaException; import org.apache.beam.sdk.transforms.DoFn; +import org.apache.beam.sdk.transforms.Flatten; import org.apache.beam.sdk.transforms.GroupIntoBatches; import org.apache.beam.sdk.transforms.PTransform; import org.apache.beam.sdk.transforms.ParDo; @@ -32,6 +33,7 @@ import org.apache.beam.sdk.util.ShardedKey; import org.apache.beam.sdk.values.KV; import org.apache.beam.sdk.values.PCollection; +import org.apache.beam.sdk.values.PCollectionList; import org.apache.beam.sdk.values.PCollectionTuple; import org.apache.beam.sdk.values.TupleTag; import org.joda.time.Duration; @@ -101,7 +103,7 @@ public WriteResult expandInconsistent( PCollection> inputInGlobalWindow = input.apply("rewindowIntoGlobal", Window.into(new GlobalWindows())); - PCollectionTuple convertedRecords = + PCollectionTuple convertMessagesResult = inputInGlobalWindow .apply( "CreateTables", @@ -116,20 +118,23 @@ public WriteResult expandInconsistent( successfulRowsTag, BigQueryStorageApiInsertErrorCoder.of(), successCoder)); - convertedRecords - .get(successfulRowsTag) - .apply( - "StorageApiWriteInconsistent", - new StorageApiWriteRecordsInconsistent<>(dynamicDestinations, bqServices)); + PCollectionTuple writeRecordsResult = + convertMessagesResult + .get(successfulRowsTag) + .apply( + "StorageApiWriteInconsistent", + new StorageApiWriteRecordsInconsistent<>( + dynamicDestinations, + bqServices, + failedRowsTag, + BigQueryStorageApiInsertErrorCoder.of())); + + PCollection insertErrors = + PCollectionList.of(convertMessagesResult.get(failedRowsTag)) + .and(writeRecordsResult.get(failedRowsTag)) + .apply("flattenErrors", Flatten.pCollections()); return WriteResult.in( - input.getPipeline(), - null, - null, - null, - null, - null, - failedRowsTag, - convertedRecords.get(failedRowsTag)); + input.getPipeline(), null, null, null, null, null, failedRowsTag, insertErrors); } public WriteResult expandTriggered( @@ -139,7 +144,7 @@ public WriteResult expandTriggered( // Handle triggered, low-latency loads into BigQuery. PCollection> inputInGlobalWindow = input.apply("rewindowIntoGlobal", Window.into(new GlobalWindows())); - PCollectionTuple result = + PCollectionTuple convertMessagesResult = inputInGlobalWindow .apply( "CreateTables", @@ -159,7 +164,7 @@ public WriteResult expandTriggered( if (this.allowAutosharding) { groupedRecords = - result + convertMessagesResult .get(successfulRowsTag) .apply( "GroupIntoBatches", @@ -171,7 +176,7 @@ public WriteResult expandTriggered( } else { PCollection, StorageApiWritePayload>> shardedRecords = - createShardedKeyValuePairs(result) + createShardedKeyValuePairs(convertMessagesResult) .setCoder(KvCoder.of(ShardedKey.Coder.of(destinationCoder), payloadCoder)); groupedRecords = shardedRecords.apply( @@ -181,20 +186,25 @@ public WriteResult expandTriggered( (StorageApiWritePayload e) -> (long) e.getPayload().length) .withMaxBufferingDuration(triggeringFrequency)); } - groupedRecords.apply( - "StorageApiWriteSharded", - new StorageApiWritesShardedRecords<>( - dynamicDestinations, createDisposition, kmsKey, bqServices, destinationCoder)); + PCollectionTuple writeRecordsResult = + groupedRecords.apply( + "StorageApiWriteSharded", + new StorageApiWritesShardedRecords<>( + dynamicDestinations, + createDisposition, + kmsKey, + bqServices, + destinationCoder, + BigQueryStorageApiInsertErrorCoder.of(), + failedRowsTag)); + + PCollection insertErrors = + PCollectionList.of(convertMessagesResult.get(failedRowsTag)) + .and(writeRecordsResult.get(failedRowsTag)) + .apply("flattenErrors", Flatten.pCollections()); return WriteResult.in( - input.getPipeline(), - null, - null, - null, - null, - null, - failedRowsTag, - result.get(failedRowsTag)); + input.getPipeline(), null, null, null, null, null, failedRowsTag, insertErrors); } private PCollection, StorageApiWritePayload>> @@ -232,7 +242,7 @@ public WriteResult expandUntriggered( PCollection> inputInGlobalWindow = input.apply( "rewindowIntoGlobal", Window.>into(new GlobalWindows())); - PCollectionTuple convertedRecords = + PCollectionTuple convertMessagesResult = inputInGlobalWindow .apply( "CreateTables", @@ -247,20 +257,24 @@ public WriteResult expandUntriggered( successfulRowsTag, BigQueryStorageApiInsertErrorCoder.of(), successCoder)); - convertedRecords - .get(successfulRowsTag) - .apply( - "StorageApiWriteUnsharded", - new StorageApiWriteUnshardedRecords<>(dynamicDestinations, bqServices)); + + PCollectionTuple writeRecordsResult = + convertMessagesResult + .get(successfulRowsTag) + .apply( + "StorageApiWriteUnsharded", + new StorageApiWriteUnshardedRecords<>( + dynamicDestinations, + bqServices, + failedRowsTag, + BigQueryStorageApiInsertErrorCoder.of())); + + PCollection insertErrors = + PCollectionList.of(convertMessagesResult.get(failedRowsTag)) + .and(writeRecordsResult.get(failedRowsTag)) + .apply("flattenErrors", Flatten.pCollections()); return WriteResult.in( - input.getPipeline(), - null, - null, - null, - null, - null, - failedRowsTag, - convertedRecords.get(failedRowsTag)); + input.getPipeline(), null, null, null, null, null, failedRowsTag, insertErrors); } } diff --git a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/StorageApiWriteRecordsInconsistent.java b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/StorageApiWriteRecordsInconsistent.java index 35b3ddfd080a..190525925aec 100644 --- a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/StorageApiWriteRecordsInconsistent.java +++ b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/StorageApiWriteRecordsInconsistent.java @@ -17,12 +17,14 @@ */ package org.apache.beam.sdk.io.gcp.bigquery; -import org.apache.beam.sdk.coders.VoidCoder; -import org.apache.beam.sdk.transforms.Create; +import org.apache.beam.sdk.coders.Coder; import org.apache.beam.sdk.transforms.PTransform; import org.apache.beam.sdk.transforms.ParDo; import org.apache.beam.sdk.values.KV; import org.apache.beam.sdk.values.PCollection; +import org.apache.beam.sdk.values.PCollectionTuple; +import org.apache.beam.sdk.values.TupleTag; +import org.apache.beam.sdk.values.TupleTagList; /** * A transform to write sharded records to BigQuery using the Storage API. This transform uses the @@ -32,34 +34,46 @@ */ @SuppressWarnings("FutureReturnValueIgnored") public class StorageApiWriteRecordsInconsistent - extends PTransform>, PCollection> { + extends PTransform>, PCollectionTuple> { private final StorageApiDynamicDestinations dynamicDestinations; private final BigQueryServices bqServices; + private final TupleTag failedRowsTag; + private final TupleTag> finalizeTag = new TupleTag<>("finalizeTag"); + private final Coder failedRowsCoder; public StorageApiWriteRecordsInconsistent( StorageApiDynamicDestinations dynamicDestinations, - BigQueryServices bqServices) { + BigQueryServices bqServices, + TupleTag failedRowsTag, + Coder failedRowsCoder) { this.dynamicDestinations = dynamicDestinations; this.bqServices = bqServices; + this.failedRowsTag = failedRowsTag; + this.failedRowsCoder = failedRowsCoder; } @Override - public PCollection expand(PCollection> input) { + public PCollectionTuple expand(PCollection> input) { String operationName = input.getName() + "/" + getName(); BigQueryOptions bigQueryOptions = input.getPipeline().getOptions().as(BigQueryOptions.class); // Append records to the Storage API streams. - input.apply( - "Write Records", - ParDo.of( - new StorageApiWriteUnshardedRecords.WriteRecordsDoFn<>( - operationName, - dynamicDestinations, - bqServices, - true, - bigQueryOptions.getStorageApiAppendThresholdBytes(), - bigQueryOptions.getStorageApiAppendThresholdRecordCount(), - bigQueryOptions.getNumStorageWriteApiStreamAppendClients())) - .withSideInputs(dynamicDestinations.getSideInputs())); - return input.getPipeline().apply("voids", Create.empty(VoidCoder.of())); + PCollectionTuple result = + input.apply( + "Write Records", + ParDo.of( + new StorageApiWriteUnshardedRecords.WriteRecordsDoFn<>( + operationName, + dynamicDestinations, + bqServices, + true, + bigQueryOptions.getStorageApiAppendThresholdBytes(), + bigQueryOptions.getStorageApiAppendThresholdRecordCount(), + bigQueryOptions.getNumStorageWriteApiStreamAppendClients(), + finalizeTag, + failedRowsTag)) + .withOutputTags(finalizeTag, TupleTagList.of(failedRowsTag)) + .withSideInputs(dynamicDestinations.getSideInputs())); + result.get(failedRowsTag).setCoder(failedRowsCoder); + return result; } } diff --git a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/StorageApiWriteUnshardedRecords.java b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/StorageApiWriteUnshardedRecords.java index 871fc73698af..0f86b8871f0e 100644 --- a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/StorageApiWriteUnshardedRecords.java +++ b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/StorageApiWriteUnshardedRecords.java @@ -20,26 +20,31 @@ import static org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.Preconditions.checkArgument; import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.services.bigquery.model.TableRow; import com.google.cloud.bigquery.storage.v1.AppendRowsResponse; +import com.google.cloud.bigquery.storage.v1.Exceptions; import com.google.cloud.bigquery.storage.v1.ProtoRows; import com.google.cloud.bigquery.storage.v1.WriteStream.Type; import com.google.protobuf.ByteString; import com.google.protobuf.DynamicMessage; +import com.google.protobuf.InvalidProtocolBufferException; import java.io.IOException; import java.time.Instant; +import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.Random; +import java.util.Set; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; -import java.util.stream.StreamSupport; +import org.apache.beam.sdk.coders.Coder; import org.apache.beam.sdk.coders.KvCoder; import org.apache.beam.sdk.coders.StringUtf8Coder; import org.apache.beam.sdk.io.gcp.bigquery.BigQueryServices.DatasetService; import org.apache.beam.sdk.io.gcp.bigquery.BigQueryServices.StreamAppendClient; -import org.apache.beam.sdk.io.gcp.bigquery.RetryManager.Operation.Context; import org.apache.beam.sdk.io.gcp.bigquery.RetryManager.RetryType; import org.apache.beam.sdk.io.gcp.bigquery.StorageApiDynamicDestinations.DescriptorWrapper; import org.apache.beam.sdk.io.gcp.bigquery.StorageApiDynamicDestinations.MessageConverter; @@ -51,14 +56,18 @@ import org.apache.beam.sdk.transforms.PTransform; import org.apache.beam.sdk.transforms.ParDo; import org.apache.beam.sdk.transforms.Reshuffle; -import org.apache.beam.sdk.transforms.windowing.BoundedWindow; import org.apache.beam.sdk.transforms.windowing.GlobalWindow; import org.apache.beam.sdk.util.Preconditions; import org.apache.beam.sdk.values.KV; import org.apache.beam.sdk.values.PCollection; +import org.apache.beam.sdk.values.PCollectionTuple; +import org.apache.beam.sdk.values.TupleTag; +import org.apache.beam.sdk.values.TupleTagList; +import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.Strings; import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.cache.Cache; import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.cache.CacheBuilder; import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.cache.RemovalNotification; +import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.Iterables; import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.Lists; import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.Maps; import org.checkerframework.checker.nullness.qual.NonNull; @@ -75,11 +84,14 @@ */ @SuppressWarnings({"FutureReturnValueIgnored"}) public class StorageApiWriteUnshardedRecords - extends PTransform>, PCollection> { + extends PTransform>, PCollectionTuple> { private static final Logger LOG = LoggerFactory.getLogger(StorageApiWriteUnshardedRecords.class); private final StorageApiDynamicDestinations dynamicDestinations; private final BigQueryServices bqServices; + private final TupleTag failedRowsTag; + private final TupleTag> finalizeTag = new TupleTag<>("finalizeTag"); + private final Coder failedRowsCoder; private static final ExecutorService closeWriterExecutor = Executors.newCachedThreadPool(); /** @@ -87,6 +99,8 @@ public class StorageApiWriteUnshardedRecords * StreamAppendClient after looking up the cache, and we must ensure that the cache is not * accessed in between the lookup and the pin (any access of the cache could trigger element * expiration). Therefore most used of APPEND_CLIENTS should synchronize. + * + *

TODO(reuvenlax); Once all uses of StreamWriter are using */ private static final Cache APPEND_CLIENTS = CacheBuilder.newBuilder() @@ -122,20 +136,24 @@ private static void runAsyncIgnoreFailure(ExecutorService executor, ThrowingRunn public StorageApiWriteUnshardedRecords( StorageApiDynamicDestinations dynamicDestinations, - BigQueryServices bqServices) { + BigQueryServices bqServices, + TupleTag failedRowsTag, + Coder failedRowsCoder) { this.dynamicDestinations = dynamicDestinations; this.bqServices = bqServices; + this.failedRowsTag = failedRowsTag; + this.failedRowsCoder = failedRowsCoder; } @Override - public PCollection expand(PCollection> input) { + public PCollectionTuple expand(PCollection> input) { String operationName = input.getName() + "/" + getName(); BigQueryOptions options = input.getPipeline().getOptions().as(BigQueryOptions.class); org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.Preconditions.checkArgument( !options.getUseStorageApiConnectionPool(), "useStorageApiConnectionPool only supported " + "when using STORAGE_API_AT_LEAST_ONCE"); - return input - .apply( + PCollectionTuple writeResults = + input.apply( "Write Records", ParDo.of( new WriteRecordsDoFn<>( @@ -145,19 +163,39 @@ public PCollection expand(PCollection extends DoFn, KV> { private final Counter forcedFlushes = Metrics.counter(WriteRecordsDoFn.class, "forcedFlushes"); + private final TupleTag> finalizeTag; + private final TupleTag failedRowsTag; + + static class AppendRowsContext extends RetryManager.Operation.Context { + long offset; + ProtoRows protoRows; + + public AppendRowsContext(long offset, ProtoRows protoRows) { + this.offset = offset; + this.protoRows = protoRows; + } + } class DestinationState { private final String tableUrn; @@ -175,11 +213,17 @@ class DestinationState { Metrics.counter(WriteRecordsDoFn.class, "schemaMismatches"); private final Distribution inflightWaitSecondsDistribution = Metrics.distribution(WriteRecordsDoFn.class, "streamWriterWaitSeconds"); + private final Counter rowsSentToFailedRowsCollection = + Metrics.counter( + StorageApiWritesShardedRecords.WriteRecordsDoFn.class, + "rowsSentToFailedRowsCollection"); + private final boolean useDefaultStream; private DescriptorWrapper descriptorWrapper; private Instant nextCacheTickle = Instant.MAX; private final int clientNumber; private final boolean usingMultiplexing; + private final long maxRequestSize; public DestinationState( String tableUrn, @@ -187,7 +231,8 @@ public DestinationState( DatasetService datasetService, boolean useDefaultStream, int streamAppendClientCount, - BigQueryOptions bigQueryOptions) { + boolean usingMultiplexing, + long maxRequestSize) { this.tableUrn = tableUrn; this.messageConverter = messageConverter; this.pendingMessages = Lists.newArrayList(); @@ -195,7 +240,8 @@ public DestinationState( this.useDefaultStream = useDefaultStream; this.descriptorWrapper = messageConverter.getSchemaDescriptor(); this.clientNumber = new Random().nextInt(streamAppendClientCount); - this.usingMultiplexing = bigQueryOptions.getUseStorageApiConnectionPool(); + this.usingMultiplexing = usingMultiplexing; + this.maxRequestSize = maxRequestSize; } void teardown() { @@ -217,7 +263,7 @@ String getStreamAppendClientCacheEntryKey() { return this.streamName; } - String createStreamIfNeeded() { + String getOrCreateStreamName() { try { if (!useDefaultStream) { this.streamName = @@ -242,7 +288,7 @@ StreamAppendClient generateClient() throws Exception { StreamAppendClient getStreamAppendClient(boolean lookupCache) { try { if (this.streamAppendClient == null) { - createStreamIfNeeded(); + getOrCreateStreamName(); final StreamAppendClient newStreamAppendClient; synchronized (APPEND_CLIENTS) { if (lookupCache) { @@ -313,7 +359,8 @@ void addMessage(StorageApiWritePayload payload) throws Exception { invalidateWriteStream(); if (useDefaultStream) { // Since the default stream client is shared across many bundles and threads, we can't - // simply look it upfrom the cache, as another thread may have recreated it with the old + // simply look it up from the cache, as another thread may have recreated it with the + // old // schema. getStreamAppendClient(false); } @@ -328,29 +375,62 @@ void addMessage(StorageApiWritePayload payload) throws Exception { pendingMessages.add(ByteString.copyFrom(payload.getPayload())); } - void flush(RetryManager> retryManager) + long flush( + RetryManager retryManager, + OutputReceiver failedRowsReceiver) throws Exception { if (pendingMessages.isEmpty()) { - return; + return 0; } - final ProtoRows.Builder inserts = ProtoRows.newBuilder(); - inserts.addAllSerializedRows(pendingMessages); - ProtoRows protoRows = inserts.build(); + final ProtoRows.Builder insertsBuilder = ProtoRows.newBuilder(); + insertsBuilder.addAllSerializedRows(pendingMessages); + final ProtoRows inserts = insertsBuilder.build(); pendingMessages.clear(); + // Handle the case where the request is too large. + if (inserts.getSerializedSize() >= maxRequestSize) { + if (inserts.getSerializedRowsCount() > 1) { + // TODO(reuvenlax): Is it worth trying to handle this case by splitting the protoRows? + // Given that we split + // the ProtoRows iterable at 2MB and the max request size is 10MB, this scenario seems + // nearly impossible. + LOG.error( + "A request containing more than one row is over the request size limit of " + + maxRequestSize + + ". This is unexpected. All rows in the request will be sent to the failed-rows PCollection."); + } + for (ByteString rowBytes : inserts.getSerializedRowsList()) { + TableRow failedRow = + TableRowToStorageApiProto.tableRowFromMessage( + DynamicMessage.parseFrom(descriptorWrapper.descriptor, rowBytes)); + failedRowsReceiver.output( + new BigQueryStorageApiInsertError( + failedRow, "Row payload too large. Maximum size " + maxRequestSize)); + } + return 0; + } + + long offset = -1; + if (!this.useDefaultStream) { + offset = this.currentOffset; + this.currentOffset += inserts.getSerializedRowsCount(); + } + AppendRowsContext appendRowsContext = new AppendRowsContext(offset, inserts); + retryManager.addOperation( c -> { + if (c.protoRows.getSerializedRowsCount() == 0) { + // This might happen if all rows in a batch failed and were sent to the failed-rows + // PCollection. + return ApiFutures.immediateFuture(AppendRowsResponse.newBuilder().build()); + } try { StreamAppendClient writeStream = getStreamAppendClient(true); - long offset = -1; - if (!this.useDefaultStream) { - offset = this.currentOffset; - this.currentOffset += inserts.getSerializedRowsCount(); - } - ApiFuture response = writeStream.appendRows(offset, protoRows); + ApiFuture response = + writeStream.appendRows(c.offset, c.protoRows); + inflightWaitSecondsDistribution.update(writeStream.getInflightWaitSeconds()); if (!usingMultiplexing) { - inflightWaitSecondsDistribution.update(writeStream.getInflightWaitSeconds()); if (writeStream.getInflightWaitSeconds() > 5) { LOG.warn( "Storage Api write delay more than {} seconds.", @@ -363,33 +443,78 @@ void flush(RetryManager> retryMa } }, contexts -> { + AppendRowsContext failedContext = + Preconditions.checkStateNotNull(Iterables.getFirst(contexts, null)); + if (failedContext.getError() != null + && failedContext.getError() instanceof Exceptions.AppendSerializtionError) { + Exceptions.AppendSerializtionError error = + Preconditions.checkStateNotNull( + (Exceptions.AppendSerializtionError) failedContext.getError()); + Set failedRowIndices = error.getRowIndexToErrorMessage().keySet(); + for (int failedIndex : failedRowIndices) { + // Convert the message to a TableRow and send it to the failedRows collection. + ByteString protoBytes = failedContext.protoRows.getSerializedRows(failedIndex); + try { + TableRow failedRow = + TableRowToStorageApiProto.tableRowFromMessage( + DynamicMessage.parseFrom(descriptorWrapper.descriptor, protoBytes)); + new BigQueryStorageApiInsertError( + failedRow, error.getRowIndexToErrorMessage().get(failedIndex)); + failedRowsReceiver.output( + new BigQueryStorageApiInsertError( + failedRow, error.getRowIndexToErrorMessage().get(failedIndex))); + } catch (InvalidProtocolBufferException e) { + LOG.error("Failed to insert row and could not parse the result!"); + } + } + rowsSentToFailedRowsCollection.inc(failedRowIndices.size()); + + // Remove the failed row from the payload, so we retry the batch without the failed + // rows. + ProtoRows.Builder retryRows = ProtoRows.newBuilder(); + for (int i = 0; i < failedContext.protoRows.getSerializedRowsCount(); ++i) { + if (!failedRowIndices.contains(i)) { + ByteString rowBytes = failedContext.protoRows.getSerializedRows(i); + retryRows.addSerializedRows(rowBytes); + } + } + failedContext.protoRows = retryRows.build(); + + // Since we removed rows, we need to update the insert offsets for all remaining + // rows. + long newOffset = failedContext.offset; + for (AppendRowsContext context : contexts) { + context.offset = newOffset; + newOffset += context.protoRows.getSerializedRowsCount(); + } + this.currentOffset = newOffset; + return RetryType.RETRY_ALL_OPERATIONS; + } + LOG.warn( "Append to stream {} by client #{} failed with error, operations will be retried. Details: {}", streamName, clientNumber, - retrieveErrorDetails(contexts)); + retrieveErrorDetails(failedContext)); invalidateWriteStream(); appendFailures.inc(); return RetryType.RETRY_ALL_OPERATIONS; }, - response -> { - recordsAppended.inc(protoRows.getSerializedRowsCount()); + c -> { + recordsAppended.inc(c.protoRows.getSerializedRowsCount()); }, - new Context<>()); + appendRowsContext); maybeTickleCache(); + return inserts.getSerializedRowsCount(); } - String retrieveErrorDetails(Iterable> contexts) { - return StreamSupport.stream(contexts.spliterator(), false) - .<@Nullable Throwable>map(ctx -> ctx.getError()) - .map( - err -> - (err == null) - ? "no error" - : Lists.newArrayList(err.getStackTrace()).stream() - .map(se -> se.toString()) - .collect(Collectors.joining("\n"))) - .collect(Collectors.joining(",")); + String retrieveErrorDetails(AppendRowsContext failedContext) { + return (failedContext.getError() != null) + ? Arrays.stream( + Preconditions.checkStateNotNull(failedContext.getError()).getStackTrace()) + .map(StackTraceElement::toString) + .collect(Collectors.joining("\n")) + : "no execption"; } } @@ -412,7 +537,9 @@ String retrieveErrorDetails(Iterable> contexts) { boolean useDefaultStream, int flushThresholdBytes, int flushThresholdCount, - int streamAppendClientCount) { + int streamAppendClientCount, + TupleTag> finalizeTag, + TupleTag failedRowsTag) { this.messageConverters = new TwoLevelMessageConverterCache<>(operationName); this.dynamicDestinations = dynamicDestinations; this.bqServices = bqServices; @@ -420,31 +547,47 @@ String retrieveErrorDetails(Iterable> contexts) { this.flushThresholdBytes = flushThresholdBytes; this.flushThresholdCount = flushThresholdCount; this.streamAppendClientCount = streamAppendClientCount; + this.finalizeTag = finalizeTag; + this.failedRowsTag = failedRowsTag; } boolean shouldFlush() { return numPendingRecords > flushThresholdCount || numPendingRecordBytes > flushThresholdBytes; } - void flushIfNecessary() throws Exception { + void flushIfNecessary(OutputReceiver failedRowsReceiver) + throws Exception { if (shouldFlush()) { forcedFlushes.inc(); // Too much memory being used. Flush the state and wait for it to drain out. // TODO(reuvenlax): Consider waiting for memory usage to drop instead of waiting for all the // appends to finish. - flushAll(); + flushAll(failedRowsReceiver); } } - void flushAll() throws Exception { - RetryManager> - retryManager = - new RetryManager<>(Duration.standardSeconds(1), Duration.standardSeconds(10), 1000); - Preconditions.checkStateNotNull(destinations); - for (DestinationState destinationState : destinations.values()) { - destinationState.flush(retryManager); + void flushAll(OutputReceiver failedRowsReceiver) + throws Exception { + List> retryManagers = + Lists.newArrayListWithCapacity(Preconditions.checkStateNotNull(destinations).size()); + long numRowsWritten = 0; + for (DestinationState destinationState : + Preconditions.checkStateNotNull(destinations).values()) { + RetryManager retryManager = + new RetryManager<>(Duration.standardSeconds(1), Duration.standardSeconds(10), 1000); + retryManagers.add(retryManager); + numRowsWritten += destinationState.flush(retryManager, failedRowsReceiver); + retryManager.run(false); + } + if (numRowsWritten > 0) { + // TODO(reuvenlax): Can we await in parallel instead? Failure retries aren't triggered until + // await is called, so + // this approach means that if one call fais, it has to wait for all prior calls to complete + // before a retry happens. + for (RetryManager retryManager : retryManagers) { + retryManager.await(); + } } - retryManager.run(true); numPendingRecords = 0; numPendingRecordBytes = 0; } @@ -488,14 +631,16 @@ DestinationState createDestinationState( datasetService, useDefaultStream, streamAppendClientCount, - bigQueryOptions); + bigQueryOptions.getUseStorageApiConnectionPool(), + bigQueryOptions.getStorageWriteApiMaxRequestSize()); } @ProcessElement public void process( ProcessContext c, PipelineOptions pipelineOptions, - @Element KV element) + @Element KV element, + MultiOutputReceiver o) throws Exception { DatasetService initializedDatasetService = initializeDatasetService(pipelineOptions); dynamicDestinations.setSideInputAccessorFromProcessContext(c); @@ -506,7 +651,7 @@ public void process( k -> createDestinationState( c, k, initializedDatasetService, pipelineOptions.as(BigQueryOptions.class))); - flushIfNecessary(); + flushIfNecessary(o.get(failedRowsTag)); state.addMessage(element.getValue()); ++numPendingRecords; numPendingRecordBytes += element.getValue().getPayload().length; @@ -514,14 +659,28 @@ public void process( @FinishBundle public void finishBundle(FinishBundleContext context) throws Exception { - flushAll(); + flushAll( + new OutputReceiver() { + @Override + public void output(BigQueryStorageApiInsertError output) { + outputWithTimestamp(output, GlobalWindow.INSTANCE.maxTimestamp()); + } + + @Override + public void outputWithTimestamp( + BigQueryStorageApiInsertError output, org.joda.time.Instant timestamp) { + context.output(failedRowsTag, output, timestamp, GlobalWindow.INSTANCE); + } + }); + final Map destinations = Preconditions.checkStateNotNull(this.destinations); for (DestinationState state : destinations.values()) { - if (!useDefaultStream) { + if (!useDefaultStream && !Strings.isNullOrEmpty(state.streamName)) { context.output( + finalizeTag, KV.of(state.tableUrn, state.streamName), - BoundedWindow.TIMESTAMP_MAX_VALUE.minus(Duration.millis(1)), + GlobalWindow.INSTANCE.maxTimestamp(), GlobalWindow.INSTANCE); } state.teardown(); diff --git a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/StorageApiWritesShardedRecords.java b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/StorageApiWritesShardedRecords.java index c8bb805b6e8f..af0ae5169bc9 100644 --- a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/StorageApiWritesShardedRecords.java +++ b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/StorageApiWritesShardedRecords.java @@ -20,16 +20,23 @@ import static org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.Preconditions.checkArgument; import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.services.bigquery.model.TableRow; import com.google.cloud.bigquery.storage.v1.AppendRowsResponse; +import com.google.cloud.bigquery.storage.v1.Exceptions; import com.google.cloud.bigquery.storage.v1.Exceptions.StreamFinalizedException; import com.google.cloud.bigquery.storage.v1.ProtoRows; import com.google.cloud.bigquery.storage.v1.WriteStream.Type; +import com.google.protobuf.ByteString; +import com.google.protobuf.DynamicMessage; +import com.google.protobuf.InvalidProtocolBufferException; import io.grpc.Status; import io.grpc.Status.Code; import java.io.IOException; import java.time.Instant; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; @@ -74,6 +81,9 @@ import org.apache.beam.sdk.util.ShardedKey; import org.apache.beam.sdk.values.KV; import org.apache.beam.sdk.values.PCollection; +import org.apache.beam.sdk.values.PCollectionTuple; +import org.apache.beam.sdk.values.TupleTag; +import org.apache.beam.sdk.values.TupleTagList; import org.apache.beam.sdk.values.TypeDescriptor; import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.MoreObjects; import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.Strings; @@ -99,7 +109,7 @@ public class StorageApiWritesShardedRecords extends PTransform< PCollection, Iterable>>, - PCollection> { + PCollectionTuple> { private static final Logger LOG = LoggerFactory.getLogger(StorageApiWritesShardedRecords.class); private static final Duration DEFAULT_STREAM_IDLE_TIME = Duration.standardHours(1); @@ -108,7 +118,10 @@ public class StorageApiWritesShardedRecords destinationCoder; + private final Coder failedRowsCoder; private final Duration streamIdleTime = DEFAULT_STREAM_IDLE_TIME; + private final TupleTag failedRowsTag; + private final TupleTag> flushTag = new TupleTag<>("flushTag"); private static final ExecutorService closeWriterExecutor = Executors.newCachedThreadPool(); private static final Cache APPEND_CLIENTS = @@ -147,24 +160,29 @@ public StorageApiWritesShardedRecords( CreateDisposition createDisposition, String kmsKey, BigQueryServices bqServices, - Coder destinationCoder) { + Coder destinationCoder, + Coder failedRowsCoder, + TupleTag failedRowsTag) { this.dynamicDestinations = dynamicDestinations; this.createDisposition = createDisposition; this.kmsKey = kmsKey; this.bqServices = bqServices; this.destinationCoder = destinationCoder; + this.failedRowsCoder = failedRowsCoder; + this.failedRowsTag = failedRowsTag; } @Override - public PCollection expand( + public PCollectionTuple expand( PCollection, Iterable>> input) { String operationName = input.getName() + "/" + getName(); // Append records to the Storage API streams. - PCollection> written = + PCollectionTuple writeRecordsResult = input.apply( "Write Records", ParDo.of(new WriteRecordsDoFn(operationName, streamIdleTime)) - .withSideInputs(dynamicDestinations.getSideInputs())); + .withSideInputs(dynamicDestinations.getSideInputs()) + .withOutputTags(flushTag, TupleTagList.of(failedRowsTag))); SchemaCoder operationCoder; try { @@ -180,7 +198,8 @@ public PCollection expand( } // Send all successful writes to be flushed. - return written + writeRecordsResult + .get(flushTag) .setCoder(KvCoder.of(StringUtf8Coder.of(), operationCoder)) .apply( Window.>configure() @@ -192,6 +211,8 @@ public PCollection expand( .apply("maxFlushPosition", Combine.perKey(Max.naturalOrder(new Operation(-1, false)))) .apply( "Flush and finalize writes", ParDo.of(new StorageApiFlushAndFinalizeDoFn(bqServices))); + writeRecordsResult.get(failedRowsTag).setCoder(failedRowsCoder); + return writeRecordsResult; } class WriteRecordsDoFn @@ -215,6 +236,8 @@ class WriteRecordsDoFn Metrics.distribution(WriteRecordsDoFn.class, "appendSizeDistribution"); private final Distribution appendSplitDistribution = Metrics.distribution(WriteRecordsDoFn.class, "appendSplitDistribution"); + private final Counter rowsSentToFailedRowsCollection = + Metrics.counter(WriteRecordsDoFn.class, "rowsSentToFailedRowsCollection"); private TwoLevelMessageConverterCache messageConverters; @@ -297,8 +320,10 @@ public void process( final @AlwaysFetched @StateId("streamName") ValueState streamName, final @AlwaysFetched @StateId("streamOffset") ValueState streamOffset, @TimerId("idleTimer") Timer idleTimer, - final OutputReceiver> o) + final MultiOutputReceiver o) throws Exception { + BigQueryOptions bigQueryOptions = pipelineOptions.as(BigQueryOptions.class); + dynamicDestinations.setSideInputAccessorFromProcessContext(c); TableDestination tableDestination = destinations.computeIfAbsent( @@ -323,7 +348,7 @@ public void process( // Each ProtoRows object contains at most 1MB of rows. // TODO: Push messageFromTableRow up to top level. That we we cans skip TableRow entirely if // already proto or already schema. - final long oneMb = 1024 * 1024; + final long splitSize = bigQueryOptions.getStorageApiAppendThresholdBytes(); // Called if the schema does not match. Function updateSchemaHash = (Long expectedHash) -> { @@ -343,7 +368,7 @@ public void process( } }; Iterable messages = - new SplittingIterable(element.getValue(), oneMb, descriptor.get(), updateSchemaHash); + new SplittingIterable(element.getValue(), splitSize, descriptor.get(), updateSchemaHash); class AppendRowsContext extends RetryManager.Operation.Context { final ShardedKey key; @@ -352,9 +377,11 @@ class AppendRowsContext extends RetryManager.Operation.Context key) { + AppendRowsContext(ShardedKey key, ProtoRows protoRows) { this.key = key; + this.protoRows = protoRows; } @Override @@ -396,7 +423,7 @@ public String toString() { context.client = appendClient; context.offset = streamOffset.read(); ++context.tryIteration; - streamOffset.write(context.offset + context.numRows); + streamOffset.write(context.offset + context.protoRows.getSerializedRowsCount()); } } catch (Exception e) { throw new RuntimeException(e); @@ -415,114 +442,200 @@ public String toString() { } }; - Instant now = Instant.now(); - List contexts = Lists.newArrayList(); - RetryManager retryManager = - new RetryManager<>(Duration.standardSeconds(1), Duration.standardSeconds(10), 1000); - int numSplits = 0; - for (ProtoRows protoRows : messages) { - ++numSplits; - Function> run = - context -> { - try { - StreamAppendClient appendClient = - APPEND_CLIENTS.get( - context.streamName, - () -> - datasetService.getStreamAppendClient( - context.streamName, descriptor.get().descriptor, false)); - return appendClient.appendRows(context.offset, protoRows); - } catch (Exception e) { - throw new RuntimeException(e); + Function> runOperation = + context -> { + if (context.protoRows.getSerializedRowsCount() == 0) { + // This might happen if all rows in a batch failed and were sent to the failed-rows + // PCollection. + return ApiFutures.immediateFuture(AppendRowsResponse.newBuilder().build()); + } + try { + StreamAppendClient appendClient = + APPEND_CLIENTS.get( + context.streamName, + () -> + datasetService.getStreamAppendClient( + context.streamName, descriptor.get().descriptor, false)); + return appendClient.appendRows(context.offset, context.protoRows); + } catch (Exception e) { + throw new RuntimeException(e); + } + }; + + Function, RetryType> onError = + failedContexts -> { + // The first context is always the one that fails. + AppendRowsContext failedContext = + Preconditions.checkStateNotNull(Iterables.getFirst(failedContexts, null)); + + // AppendSerializationError means that BigQuery detected errors on individual rows, e.g. + // a row not conforming + // to bigQuery invariants. These errors are persistent, so we redirect those rows to the + // failedInserts + // PCollection, and retry with the remaining rows. + if (failedContext.getError() != null + && failedContext.getError() instanceof Exceptions.AppendSerializtionError) { + Exceptions.AppendSerializtionError error = + Preconditions.checkArgumentNotNull( + (Exceptions.AppendSerializtionError) failedContext.getError()); + Set failedRowIndices = error.getRowIndexToErrorMessage().keySet(); + for (int failedIndex : failedRowIndices) { + // Convert the message to a TableRow and send it to the failedRows collection. + ByteString protoBytes = failedContext.protoRows.getSerializedRows(failedIndex); + try { + TableRow failedRow = + TableRowToStorageApiProto.tableRowFromMessage( + DynamicMessage.parseFrom(descriptor.get().descriptor, protoBytes)); + new BigQueryStorageApiInsertError( + failedRow, error.getRowIndexToErrorMessage().get(failedIndex)); + o.get(failedRowsTag) + .output( + new BigQueryStorageApiInsertError( + failedRow, error.getRowIndexToErrorMessage().get(failedIndex))); + } catch (InvalidProtocolBufferException e) { + LOG.error("Failed to insert row and could not parse the result!"); + } } - }; - - // RetryManager - Function, RetryType> onError = - failedContexts -> { - // The first context is always the one that fails. - AppendRowsContext failedContext = - Preconditions.checkStateNotNull(Iterables.getFirst(failedContexts, null)); - // Invalidate the StreamWriter and force a new one to be created. - LOG.error( - "Got error " + failedContext.getError() + " closing " + failedContext.streamName); - clearClients.accept(contexts); - appendFailures.inc(); - - boolean explicitStreamFinalized = - failedContext.getError() instanceof StreamFinalizedException; - Throwable error = Preconditions.checkStateNotNull(failedContext.getError()); - Status.Code statusCode = Status.fromThrowable(error).getCode(); - // This means that the offset we have stored does not match the current end of - // the stream in the Storage API. Usually this happens because a crash or a bundle - // failure - // happened after an append but before the worker could checkpoint it's - // state. The records that were appended in a failed bundle will be retried, - // meaning that the unflushed tail of the stream must be discarded to prevent - // duplicates. - boolean offsetMismatch = - statusCode.equals(Code.OUT_OF_RANGE) || statusCode.equals(Code.ALREADY_EXISTS); - // This implies that the stream doesn't exist or has already been finalized. In this - // case we have no choice but to create a new stream. - boolean streamDoesNotExist = - explicitStreamFinalized - || statusCode.equals(Code.INVALID_ARGUMENT) - || statusCode.equals(Code.NOT_FOUND) - || statusCode.equals(Code.FAILED_PRECONDITION); - if (offsetMismatch || streamDoesNotExist) { - appendOffsetFailures.inc(); - LOG.warn( - "Append to " - + failedContext - + " failed with " - + failedContext.getError() - + " Will retry with a new stream"); - // Finalize the stream and clear streamName so a new stream will be created. - o.output( - KV.of(failedContext.streamName, new Operation(failedContext.offset - 1, true))); - // Reinitialize all contexts with the new stream and new offsets. - initializeContexts.accept(failedContexts, true); - - // Offset failures imply that all subsequent parallel appends will also fail. - // Retry them all. - return RetryType.RETRY_ALL_OPERATIONS; + rowsSentToFailedRowsCollection.inc(failedRowIndices.size()); + + // Remove the failed row from the payload, so we retry the batch without the failed + // rows. + ProtoRows.Builder retryRows = ProtoRows.newBuilder(); + for (int i = 0; i < failedContext.protoRows.getSerializedRowsCount(); ++i) { + if (!failedRowIndices.contains(i)) { + ByteString rowBytes = failedContext.protoRows.getSerializedRows(i); + retryRows.addSerializedRows(rowBytes); + } } + failedContext.protoRows = retryRows.build(); + // Since we removed rows, we need to update the insert offsets for all remaining rows. + long offset = failedContext.offset; + for (AppendRowsContext context : failedContexts) { + context.offset = offset; + offset += context.protoRows.getSerializedRowsCount(); + } + streamOffset.write(offset); return RetryType.RETRY_ALL_OPERATIONS; - }; + } - Consumer onSuccess = - context -> { - o.output( - KV.of( - context.streamName, - new Operation(context.offset + context.numRows - 1, false))); - flushesScheduled.inc(protoRows.getSerializedRowsCount()); - }; - - AppendRowsContext context = new AppendRowsContext(element.getKey()); - context.numRows = protoRows.getSerializedRowsCount(); - contexts.add(context); - retryManager.addOperation(run, onError, onSuccess, context); - recordsAppended.inc(protoRows.getSerializedRowsCount()); - appendSizeDistribution.update(context.numRows); - } - initializeContexts.accept(contexts, false); + // Invalidate the StreamWriter and force a new one to be created. + LOG.error( + "Got error " + failedContext.getError() + " closing " + failedContext.streamName); + clearClients.accept(failedContexts); + appendFailures.inc(); + + boolean explicitStreamFinalized = + failedContext.getError() instanceof StreamFinalizedException; + Throwable error = Preconditions.checkStateNotNull(failedContext.getError()); + Status.Code statusCode = Status.fromThrowable(error).getCode(); + // This means that the offset we have stored does not match the current end of + // the stream in the Storage API. Usually this happens because a crash or a bundle + // failure + // happened after an append but before the worker could checkpoint it's + // state. The records that were appended in a failed bundle will be retried, + // meaning that the unflushed tail of the stream must be discarded to prevent + // duplicates. + boolean offsetMismatch = + statusCode.equals(Code.OUT_OF_RANGE) || statusCode.equals(Code.ALREADY_EXISTS); + // This implies that the stream doesn't exist or has already been finalized. In this + // case we have no choice but to create a new stream. + boolean streamDoesNotExist = + explicitStreamFinalized + || statusCode.equals(Code.INVALID_ARGUMENT) + || statusCode.equals(Code.NOT_FOUND) + || statusCode.equals(Code.FAILED_PRECONDITION); + if (offsetMismatch || streamDoesNotExist) { + appendOffsetFailures.inc(); + LOG.warn( + "Append to " + + failedContext + + " failed with " + + failedContext.getError() + + " Will retry with a new stream"); + // Finalize the stream and clear streamName so a new stream will be created. + o.get(flushTag) + .output( + KV.of( + failedContext.streamName, new Operation(failedContext.offset - 1, true))); + // Reinitialize all contexts with the new stream and new offsets. + initializeContexts.accept(failedContexts, true); + + // Offset failures imply that all subsequent parallel appends will also fail. + // Retry them all. + return RetryType.RETRY_ALL_OPERATIONS; + } - try { - retryManager.run(true); - } finally { - // Make sure that all pins are removed. - for (AppendRowsContext context : contexts) { - if (context.client != null) { - runAsyncIgnoreFailure(closeWriterExecutor, context.client::unpin); + return RetryType.RETRY_ALL_OPERATIONS; + }; + + Consumer onSuccess = + context -> { + o.get(flushTag) + .output( + KV.of( + context.streamName, + new Operation( + context.offset + context.protoRows.getSerializedRowsCount() - 1, + false))); + flushesScheduled.inc(context.protoRows.getSerializedRowsCount()); + }; + long maxRequestSize = bigQueryOptions.getStorageWriteApiMaxRequestSize(); + Instant now = Instant.now(); + List contexts = Lists.newArrayList(); + RetryManager retryManager = + new RetryManager<>(Duration.standardSeconds(1), Duration.standardSeconds(10), 1000); + int numAppends = 0; + for (ProtoRows protoRows : messages) { + // Handle the case of a row that is too large. + if (protoRows.getSerializedSize() >= maxRequestSize) { + if (protoRows.getSerializedRowsCount() > 1) { + // TODO(reuvenlax): Is it worth trying to handle this case by splitting the protoRows? + // Given that we split + // the ProtoRows iterable at 2MB and the max request size is 10MB, this scenario seems + // nearly impossible. + LOG.error( + "A request containing more than one row is over the request size limit of " + + maxRequestSize + + ". This is unexpected. All rows in the request will be sent to the failed-rows PCollection."); + } + for (ByteString rowBytes : protoRows.getSerializedRowsList()) { + TableRow failedRow = + TableRowToStorageApiProto.tableRowFromMessage( + DynamicMessage.parseFrom(descriptor.get().descriptor, rowBytes)); + o.get(failedRowsTag) + .output( + new BigQueryStorageApiInsertError( + failedRow, "Row payload too large. Maximum size " + maxRequestSize)); } + } else { + ++numAppends; + // RetryManager + AppendRowsContext context = new AppendRowsContext(element.getKey(), protoRows); + contexts.add(context); + retryManager.addOperation(runOperation, onError, onSuccess, context); + recordsAppended.inc(protoRows.getSerializedRowsCount()); + appendSizeDistribution.update(context.protoRows.getSerializedRowsCount()); } } - appendSplitDistribution.update(numSplits); - java.time.Duration timeElapsed = java.time.Duration.between(now, Instant.now()); - appendLatencyDistribution.update(timeElapsed.toMillis()); + if (numAppends > 0) { + initializeContexts.accept(contexts, false); + try { + retryManager.run(true); + } finally { + // Make sure that all pins are removed. + for (AppendRowsContext context : contexts) { + if (context.client != null) { + runAsyncIgnoreFailure(closeWriterExecutor, context.client::unpin); + } + } + } + appendSplitDistribution.update(numAppends); + + java.time.Duration timeElapsed = java.time.Duration.between(now, Instant.now()); + appendLatencyDistribution.update(timeElapsed.toMillis()); + } idleTimer.offset(streamIdleTime).withNoOutputTimestamp().setRelative(); } @@ -530,15 +643,16 @@ public String toString() { private void finalizeStream( @AlwaysFetched @StateId("streamName") ValueState streamName, @AlwaysFetched @StateId("streamOffset") ValueState streamOffset, - OutputReceiver> o, + MultiOutputReceiver o, org.joda.time.Instant finalizeElementTs) { String stream = MoreObjects.firstNonNull(streamName.read(), ""); if (!Strings.isNullOrEmpty(stream)) { // Finalize the stream long nextOffset = MoreObjects.firstNonNull(streamOffset.read(), 0L); - o.outputWithTimestamp( - KV.of(stream, new Operation(nextOffset - 1, true)), finalizeElementTs); + o.get(flushTag) + .outputWithTimestamp( + KV.of(stream, new Operation(nextOffset - 1, true)), finalizeElementTs); streamName.clear(); streamOffset.clear(); // Make sure that the stream object is closed. @@ -550,7 +664,7 @@ private void finalizeStream( public void onTimer( @AlwaysFetched @StateId("streamName") ValueState streamName, @AlwaysFetched @StateId("streamOffset") ValueState streamOffset, - OutputReceiver> o, + MultiOutputReceiver o, BoundedWindow window) { // Stream is idle - clear it. // Note: this is best effort. We are explicitly emiting a timestamp that is before @@ -566,7 +680,7 @@ public void onTimer( public void onWindowExpiration( @AlwaysFetched @StateId("streamName") ValueState streamName, @AlwaysFetched @StateId("streamOffset") ValueState streamOffset, - OutputReceiver> o, + MultiOutputReceiver o, BoundedWindow window) { // Window is done - usually because the pipeline has been drained. Make sure to clean up // streams so that they are not leaked. diff --git a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/testing/BigqueryClient.java b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/testing/BigqueryClient.java index 6224729aa91a..f5752797acd6 100644 --- a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/testing/BigqueryClient.java +++ b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/testing/BigqueryClient.java @@ -288,7 +288,8 @@ private QueryResponse getTypedTableRows(QueryResponse response) { /** Performs a query without flattening results. */ @Nonnull - public List queryUnflattened(String query, String projectId, boolean typed) + public List queryUnflattened( + String query, String projectId, boolean typed, boolean useStandardSql) throws IOException, InterruptedException { Random rnd = new Random(System.currentTimeMillis()); String temporaryDatasetId = "_dataflow_temporary_dataset_" + rnd.nextInt(1000000); @@ -308,6 +309,7 @@ public List queryUnflattened(String query, String projectId, boolean t .setFlattenResults(false) .setAllowLargeResults(true) .setDestinationTable(tempTableReference) + .setUseLegacySql(!useStandardSql) .setQuery(query); JobConfiguration jc = new JobConfiguration().setQuery(jcQuery); diff --git a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/testing/FakeDatasetService.java b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/testing/FakeDatasetService.java index 44f73bd56cb2..948c75cb756d 100644 --- a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/testing/FakeDatasetService.java +++ b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/testing/FakeDatasetService.java @@ -32,6 +32,7 @@ import com.google.api.services.bigquery.model.TableSchema; import com.google.cloud.bigquery.storage.v1.AppendRowsResponse; import com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse; +import com.google.cloud.bigquery.storage.v1.Exceptions; import com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse; import com.google.cloud.bigquery.storage.v1.FlushRowsResponse; import com.google.cloud.bigquery.storage.v1.ProtoRows; @@ -43,6 +44,7 @@ import com.google.protobuf.Descriptors.Descriptor; import com.google.protobuf.DynamicMessage; import com.google.protobuf.Timestamp; +import com.google.rpc.Code; import java.io.IOException; import java.io.Serializable; import java.util.HashMap; @@ -50,6 +52,7 @@ import java.util.Map; import java.util.UUID; import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Function; import java.util.regex.Pattern; import javax.annotation.Nullable; import org.apache.beam.sdk.annotations.Internal; @@ -148,6 +151,8 @@ void commit() { } } + Function shouldFailRow = + (Function & Serializable) tr -> false; Map> insertErrors = Maps.newHashMap(); // The counter for the number of insertions performed. @@ -162,6 +167,10 @@ public static void setUp() { } } + public void setShouldFailRow(Function shouldFailRow) { + this.shouldFailRow = shouldFailRow; + } + @Override public Table getTable(TableReference tableRef) throws InterruptedException, IOException { if (tableRef.getProjectId() == null) { @@ -504,6 +513,7 @@ public StreamAppendClient getStreamAppendClient( @Override public ApiFuture appendRows(long offset, ProtoRows rows) throws Exception { + AppendRowsResponse.Builder responseBuilder = AppendRowsResponse.newBuilder(); synchronized (FakeDatasetService.class) { Stream stream = writeStreams.get(streamName); if (stream == null) { @@ -511,18 +521,32 @@ public ApiFuture appendRows(long offset, ProtoRows rows) } List tableRows = Lists.newArrayListWithExpectedSize(rows.getSerializedRowsCount()); - for (ByteString bytes : rows.getSerializedRowsList()) { + Map rowIndexToErrorMessage = Maps.newHashMap(); + for (int i = 0; i < rows.getSerializedRowsCount(); ++i) { + ByteString bytes = rows.getSerializedRows(i); DynamicMessage msg = DynamicMessage.parseFrom(protoDescriptor, bytes); if (msg.getUnknownFields() != null && !msg.getUnknownFields().asMap().isEmpty()) { throw new RuntimeException("Unknown fields set in append! " + msg.getUnknownFields()); } - tableRows.add( + TableRow tableRow = TableRowToStorageApiProto.tableRowFromMessage( - DynamicMessage.parseFrom(protoDescriptor, bytes))); + DynamicMessage.parseFrom(protoDescriptor, bytes)); + if (shouldFailRow.apply(tableRow)) { + rowIndexToErrorMessage.put(i, "Failing row " + tableRow.toPrettyString()); + } + tableRows.add(tableRow); + } + if (!rowIndexToErrorMessage.isEmpty()) { + return ApiFutures.immediateFailedFuture( + new Exceptions.AppendSerializtionError( + Code.INVALID_ARGUMENT.getNumber(), + "Append serialization failed for writer: " + streamName, + stream.streamName, + rowIndexToErrorMessage)); } stream.appendRows(offset, tableRows); } - return ApiFutures.immediateFuture(AppendRowsResponse.newBuilder().build()); + return ApiFutures.immediateFuture(responseBuilder.build()); } @Override diff --git a/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryIOWriteTest.java b/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryIOWriteTest.java index 7f529bfa3489..1e1749e8569a 100644 --- a/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryIOWriteTest.java +++ b/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryIOWriteTest.java @@ -64,6 +64,7 @@ import java.util.Set; import java.util.concurrent.ExecutionException; import java.util.concurrent.ThreadLocalRandom; +import java.util.function.Function; import java.util.function.LongFunction; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -2583,11 +2584,15 @@ public void testStorageApiErrors() throws Exception { TableRow goodNested = new TableRow().set("number", "42"); TableRow badNested = new TableRow().set("number", "nAn"); + final String failValue = "failme"; List goodRows = ImmutableList.of( new TableRow().set("name", "n1").set("number", "1"), + new TableRow().set("name", failValue).set("number", "1"), new TableRow().set("name", "n2").set("number", "2"), - new TableRow().set("name", "parent1").set("nested", goodNested)); + new TableRow().set("name", failValue).set("number", "2"), + new TableRow().set("name", "parent1").set("nested", goodNested), + new TableRow().set("name", failValue).set("number", "1")); List badRows = ImmutableList.of( // Unknown field. @@ -2614,6 +2619,11 @@ public void testStorageApiErrors() throws Exception { // Invalid nested row new TableRow().set("name", "parent2").set("nested", badNested)); + Function shouldFailRow = + (Function & Serializable) + tr -> tr.containsKey("name") && tr.get("name").equals(failValue); + fakeDatasetService.setShouldFailRow(shouldFailRow); + WriteResult result = p.apply(Create.of(Iterables.concat(goodRows, badRows))) .apply( @@ -2632,12 +2642,17 @@ public void testStorageApiErrors() throws Exception { .apply( MapElements.into(TypeDescriptor.of(TableRow.class)) .via(BigQueryStorageApiInsertError::getRow)); - PAssert.that(deadRows).containsInAnyOrder(badRows); + + PAssert.that(deadRows) + .containsInAnyOrder( + Iterables.concat(badRows, Iterables.filter(goodRows, shouldFailRow::apply))); p.run(); assertThat( fakeDatasetService.getAllRows("project-id", "dataset-id", "table"), - containsInAnyOrder(Iterables.toArray(goodRows, TableRow.class))); + containsInAnyOrder( + Iterables.toArray( + Iterables.filter(goodRows, r -> !shouldFailRow.apply(r)), TableRow.class))); } @Test diff --git a/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryNestedRecordsIT.java b/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryNestedRecordsIT.java index 698ef660293c..b85dc62c5fe9 100644 --- a/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryNestedRecordsIT.java +++ b/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryNestedRecordsIT.java @@ -97,12 +97,13 @@ private static void runPipeline(Options options) throws Exception { TableRow queryUnflattened = bigQueryClient - .queryUnflattened(options.getInput(), bigQueryOptions.getProject(), true) + .queryUnflattened(options.getInput(), bigQueryOptions.getProject(), true, false) .get(0); TableRow queryUnflattenable = bigQueryClient - .queryUnflattened(options.getUnflattenableInput(), bigQueryOptions.getProject(), true) + .queryUnflattened( + options.getUnflattenableInput(), bigQueryOptions.getProject(), true, false) .get(0); // Verify that the results are the same. diff --git a/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/bigquery/StorageApiSinkFailedRowsIT.java b/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/bigquery/StorageApiSinkFailedRowsIT.java new file mode 100644 index 000000000000..465bebbf1389 --- /dev/null +++ b/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/bigquery/StorageApiSinkFailedRowsIT.java @@ -0,0 +1,266 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.beam.sdk.io.gcp.bigquery; + +import static org.hamcrest.MatcherAssert.assertThat; + +import com.google.api.services.bigquery.model.Table; +import com.google.api.services.bigquery.model.TableFieldSchema; +import com.google.api.services.bigquery.model.TableReference; +import com.google.api.services.bigquery.model.TableRow; +import com.google.api.services.bigquery.model.TableSchema; +import java.io.IOException; +import java.util.List; +import org.apache.beam.sdk.Pipeline; +import org.apache.beam.sdk.extensions.gcp.options.GcpOptions; +import org.apache.beam.sdk.io.gcp.testing.BigqueryClient; +import org.apache.beam.sdk.testing.PAssert; +import org.apache.beam.sdk.testing.TestPipeline; +import org.apache.beam.sdk.transforms.Create; +import org.apache.beam.sdk.transforms.MapElements; +import org.apache.beam.sdk.values.PCollection; +import org.apache.beam.sdk.values.TypeDescriptor; +import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.ImmutableList; +import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.Iterables; +import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.Lists; +import org.hamcrest.Matchers; +import org.joda.time.Duration; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** Integration test for failed-rows handling when using the storage API. */ +@RunWith(Parameterized.class) +public class StorageApiSinkFailedRowsIT { + @Parameterized.Parameters + public static Iterable data() { + return ImmutableList.of( + new Object[] {true, false, false}, + new Object[] {false, true, false}, + new Object[] {false, false, true}, + new Object[] {true, false, true}); + } + + @Parameterized.Parameter(0) + public boolean useStreamingExactlyOnce; + + @Parameterized.Parameter(1) + public boolean useAtLeastOnce; + + @Parameterized.Parameter(2) + public boolean useBatch; + + private static final Logger LOG = LoggerFactory.getLogger(StorageApiSinkFailedRowsIT.class); + private static final BigqueryClient BQ_CLIENT = new BigqueryClient("StorageApiSinkFailedRowsIT"); + private static final String PROJECT = + TestPipeline.testingPipelineOptions().as(GcpOptions.class).getProject(); + private static final String BIG_QUERY_DATASET_ID = + "storage_api_sink_failed_rows" + System.nanoTime(); + + private static final List FIELDS = + ImmutableList.builder() + .add(new TableFieldSchema().setType("STRING").setName("str")) + .add(new TableFieldSchema().setType("INT64").setName("i64")) + .add(new TableFieldSchema().setType("DATE").setName("date")) + .add(new TableFieldSchema().setType("STRING").setMaxLength(1L).setName("strone")) + .add(new TableFieldSchema().setType("BYTES").setName("bytes")) + .add(new TableFieldSchema().setType("JSON").setName("json")) + .add( + new TableFieldSchema() + .setType("STRING") + .setMaxLength(1L) + .setMode("REPEATED") + .setName("stronearray")) + .build(); + + private static final TableSchema BASE_TABLE_SCHEMA = + new TableSchema() + .setFields( + ImmutableList.builder() + .addAll(FIELDS) + .add(new TableFieldSchema().setType("STRUCT").setFields(FIELDS).setName("inner")) + .build()); + + private static final byte[] BIG_BYTES = new byte[11 * 1024 * 1024]; + + private BigQueryIO.Write.Method getMethod() { + return useAtLeastOnce + ? BigQueryIO.Write.Method.STORAGE_API_AT_LEAST_ONCE + : BigQueryIO.Write.Method.STORAGE_WRITE_API; + } + + @BeforeClass + public static void setUpTestEnvironment() throws IOException, InterruptedException { + // Create one BQ dataset for all test cases. + BQ_CLIENT.createNewDataset(PROJECT, BIG_QUERY_DATASET_ID); + } + + @AfterClass + public static void cleanup() { + LOG.info("Start to clean up tables and datasets."); + BQ_CLIENT.deleteDataset(PROJECT, BIG_QUERY_DATASET_ID); + } + + @Test + public void testSchemaMismatchCaughtByBeam() throws IOException, InterruptedException { + String tableSpec = createTable(BASE_TABLE_SCHEMA); + TableRow good1 = new TableRow().set("str", "foo").set("i64", "42"); + TableRow good2 = new TableRow().set("str", "foo").set("i64", "43"); + Iterable goodRows = + ImmutableList.of( + good1.clone().set("inner", new TableRow()), + good2.clone().set("inner", new TableRow()), + new TableRow().set("inner", good1), + new TableRow().set("inner", good2)); + + TableRow bad1 = new TableRow().set("str", "foo").set("i64", "baad"); + TableRow bad2 = new TableRow().set("str", "foo").set("i64", "42").set("unknown", "foobar"); + Iterable badRows = + ImmutableList.of( + bad1, bad2, new TableRow().set("inner", bad1), new TableRow().set("inner", bad2)); + + runPipeline( + getMethod(), + useStreamingExactlyOnce, + tableSpec, + Iterables.concat(goodRows, badRows), + badRows); + assertGoodRowsWritten(tableSpec, goodRows); + } + + @Test + public void testInvalidRowCaughtByBigquery() throws IOException, InterruptedException { + String tableSpec = createTable(BASE_TABLE_SCHEMA); + + TableRow good1 = + new TableRow() + .set("str", "foo") + .set("i64", "42") + .set("date", "2022-08-16") + .set("stronearray", Lists.newArrayList()); + TableRow good2 = + new TableRow().set("str", "foo").set("i64", "43").set("stronearray", Lists.newArrayList()); + Iterable goodRows = + ImmutableList.of( + good1.clone().set("inner", new TableRow().set("stronearray", Lists.newArrayList())), + good2.clone().set("inner", new TableRow().set("stronearray", Lists.newArrayList())), + new TableRow().set("inner", good1).set("stronearray", Lists.newArrayList()), + new TableRow().set("inner", good2).set("stronearray", Lists.newArrayList())); + + TableRow bad1 = new TableRow().set("str", "foo").set("i64", "42").set("date", "10001-08-16"); + TableRow bad2 = new TableRow().set("str", "foo").set("i64", "42").set("strone", "ab"); + TableRow bad3 = new TableRow().set("str", "foo").set("i64", "42").set("json", "BAADF00D"); + TableRow bad4 = + new TableRow() + .set("str", "foo") + .set("i64", "42") + .set("stronearray", Lists.newArrayList("toolong")); + TableRow bad5 = new TableRow().set("bytes", BIG_BYTES); + Iterable badRows = + ImmutableList.of( + bad1, + bad2, + bad3, + bad4, + bad5, + new TableRow().set("inner", bad1), + new TableRow().set("inner", bad2), + new TableRow().set("inner", bad3)); + + runPipeline( + getMethod(), + useStreamingExactlyOnce, + tableSpec, + Iterables.concat(goodRows, badRows), + badRows); + assertGoodRowsWritten(tableSpec, goodRows); + } + + private static String createTable(TableSchema tableSchema) + throws IOException, InterruptedException { + String table = "table" + System.nanoTime(); + BQ_CLIENT.deleteTable(PROJECT, BIG_QUERY_DATASET_ID, table); + BQ_CLIENT.createNewTable( + PROJECT, + BIG_QUERY_DATASET_ID, + new Table() + .setSchema(tableSchema) + .setTableReference( + new TableReference() + .setTableId(table) + .setDatasetId(BIG_QUERY_DATASET_ID) + .setProjectId(PROJECT))); + return PROJECT + "." + BIG_QUERY_DATASET_ID + "." + table; + } + + private void assertGoodRowsWritten(String tableSpec, Iterable goodRows) + throws IOException, InterruptedException { + TableRow queryResponse = + Iterables.getOnlyElement( + BQ_CLIENT.queryUnflattened( + String.format("SELECT COUNT(*) FROM %s", tableSpec), PROJECT, true, true)); + int numRowsWritten = Integer.parseInt((String) queryResponse.get("f0_")); + if (useAtLeastOnce) { + assertThat(numRowsWritten, Matchers.greaterThanOrEqualTo(Iterables.size(goodRows))); + } else { + assertThat(numRowsWritten, Matchers.equalTo(Iterables.size(goodRows))); + } + } + + private static void runPipeline( + BigQueryIO.Write.Method method, + boolean triggered, + String tableSpec, + Iterable tableRows, + Iterable expectedFailedRows) { + Pipeline p = Pipeline.create(); + + BigQueryIO.Write write = + BigQueryIO.writeTableRows() + .to(tableSpec) + .withSchema(BASE_TABLE_SCHEMA) + .withMethod(method) + .withCreateDisposition(BigQueryIO.Write.CreateDisposition.CREATE_NEVER); + if (method == BigQueryIO.Write.Method.STORAGE_WRITE_API) { + write = write.withNumStorageWriteApiStreams(1); + if (triggered) { + write = write.withTriggeringFrequency(Duration.standardSeconds(1)); + } + } + PCollection input = p.apply("Create test cases", Create.of(tableRows)); + if (triggered) { + input = input.setIsBoundedInternal(PCollection.IsBounded.UNBOUNDED); + } + WriteResult result = input.apply("Write using Storage Write API", write); + + PCollection failedRows = + result + .getFailedStorageApiInserts() + .apply( + MapElements.into(TypeDescriptor.of(TableRow.class)) + .via(BigQueryStorageApiInsertError::getRow)); + + PAssert.that(failedRows).containsInAnyOrder(expectedFailedRows); + + p.run().waitUntilFinish(); + } +} diff --git a/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/bigquery/TableRowToStorageApiProtoIT.java b/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/bigquery/TableRowToStorageApiProtoIT.java index b2d9e04ffe22..5f488da0210b 100644 --- a/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/bigquery/TableRowToStorageApiProtoIT.java +++ b/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/bigquery/TableRowToStorageApiProtoIT.java @@ -337,7 +337,8 @@ public void testBaseTableRow() throws IOException, InterruptedException { runPipeline(tableSpec, Collections.singleton(BASE_TABLE_ROW)); List actualTableRows = - BQ_CLIENT.queryUnflattened(String.format("SELECT * FROM [%s]", tableSpec), PROJECT, true); + BQ_CLIENT.queryUnflattened( + String.format("SELECT * FROM %s", tableSpec), PROJECT, true, true); assertEquals(1, actualTableRows.size()); assertEquals(BASE_TABLE_ROW_EXPECTED, actualTableRows.get(0)); @@ -362,7 +363,8 @@ public void testNestedRichTypesAndNull() throws IOException, InterruptedExceptio runPipeline(tableSpec, Collections.singleton(tableRow)); List actualTableRows = - BQ_CLIENT.queryUnflattened(String.format("SELECT * FROM [%s]", tableSpec), PROJECT, true); + BQ_CLIENT.queryUnflattened( + String.format("SELECT * FROM %s", tableSpec), PROJECT, true, true); assertEquals(1, actualTableRows.size()); assertEquals(BASE_TABLE_ROW_EXPECTED, actualTableRows.get(0).get("nestedValue1")); @@ -391,7 +393,7 @@ private static String createTable(TableSchema tableSchema) .setTableId(table) .setDatasetId(BIG_QUERY_DATASET_ID) .setProjectId(PROJECT))); - return PROJECT + ":" + BIG_QUERY_DATASET_ID + "." + table; + return PROJECT + "." + BIG_QUERY_DATASET_ID + "." + table; } private static void runPipeline(String tableSpec, Iterable tableRows) { From cae014141cb1b53461eb33041e38cd6dfb8a1f6d Mon Sep 17 00:00:00 2001 From: Yi Hu Date: Sat, 22 Oct 2022 15:59:50 -0400 Subject: [PATCH 029/115] Bump dataflow java fn container version to beam-master-20221022 --- runners/google-cloud-dataflow-java/build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runners/google-cloud-dataflow-java/build.gradle b/runners/google-cloud-dataflow-java/build.gradle index 8429bb40816a..b8f292df9f9d 100644 --- a/runners/google-cloud-dataflow-java/build.gradle +++ b/runners/google-cloud-dataflow-java/build.gradle @@ -55,7 +55,7 @@ processResources { 'dataflow.legacy_environment_major_version' : '8', 'dataflow.fnapi_environment_major_version' : '8', 'dataflow.legacy_container_version' : 'beam-master-20220816', - 'dataflow.fnapi_container_version' : 'beam-master-20220923', + 'dataflow.fnapi_container_version' : 'beam-master-20221022', 'dataflow.container_base_repository' : 'gcr.io/cloud-dataflow/v1beta3', ] } From 96051f2cc6befe3b9e7526bf79dfa1a56bfc4b97 Mon Sep 17 00:00:00 2001 From: Moritz Mack Date: Mon, 24 Oct 2022 08:59:41 +0200 Subject: [PATCH 030/115] Remove unnecessary dependencies from jpms test (#23775) --- sdks/java/testing/jpms-tests/build.gradle | 5 ----- 1 file changed, 5 deletions(-) diff --git a/sdks/java/testing/jpms-tests/build.gradle b/sdks/java/testing/jpms-tests/build.gradle index 9aa3f41b73a0..f781c29b8480 100644 --- a/sdks/java/testing/jpms-tests/build.gradle +++ b/sdks/java/testing/jpms-tests/build.gradle @@ -78,9 +78,6 @@ configurations { sparkRunnerIntegrationTest.extendsFrom(baseIntegrationTest) } -def spark_version = '3.1.1' -def spark_scala_version = '2.12' - dependencies { implementation project(path: ":sdks:java:core", configuration: "shadow") implementation project(path: ":sdks:java:extensions:google-cloud-platform-core") @@ -93,8 +90,6 @@ dependencies { flinkRunnerIntegrationTest project(":runners:flink:${project.ext.latestFlinkVersion}") dataflowRunnerIntegrationTest project(":runners:google-cloud-dataflow-java") sparkRunnerIntegrationTest project(":runners:spark:3") - sparkRunnerIntegrationTest "org.apache.spark:spark-sql_$spark_scala_version:$spark_version" - sparkRunnerIntegrationTest "org.apache.spark:spark-streaming_$spark_scala_version:$spark_version" } /* From f083b054a4b101cb4f2c27bc18604e01e4612e40 Mon Sep 17 00:00:00 2001 From: Moritz Mack Date: Mon, 24 Oct 2022 09:18:26 +0200 Subject: [PATCH 031/115] Use Spark 3 job-server as default Spark job-server for PortableRunner (addresses #23728) (#23751) --- CHANGES.md | 2 ++ .../org/apache/beam/gradle/BeamModulePlugin.groovy | 8 +++----- sdks/go/test/build.gradle | 4 ++-- .../python/apache_beam/options/pipeline_options.py | 7 ++++--- .../runners/portability/spark_runner.py | 12 ++++++------ .../runners/portability/spark_runner_test.py | 2 +- .../portability/spark_uber_jar_job_server.py | 10 +++++----- sdks/python/test-suites/portable/common.gradle | 14 +++++++------- .../site/content/en/documentation/runners/spark.md | 2 +- 9 files changed, 31 insertions(+), 30 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 22cfe3f93d83..f1b8df5c5b25 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -85,6 +85,8 @@ ## Breaking Changes * Python SDK CoGroupByKey outputs an iterable allowing for arbitrarily large results. [#21556](https://github.com/apache/beam/issues/21556) Beam users may see an error on transforms downstream from CoGroupByKey. Users must change methods expecting a List to expect an Iterable going forward. See [document](https://docs.google.com/document/d/1RIzm8-g-0CyVsPb6yasjwokJQFoKHG4NjRUcKHKINu0) for information and fixes. +* The PortableRunner for Spark assumes Spark 3 as default Spark major version unless configured otherwise using `--spark_version`. + Spark 2 support is deprecated and will be removed soon ([#23728](https://github.com/apache/beam/issues/23728)). ## Deprecations diff --git a/buildSrc/src/main/groovy/org/apache/beam/gradle/BeamModulePlugin.groovy b/buildSrc/src/main/groovy/org/apache/beam/gradle/BeamModulePlugin.groovy index 7f6ac755d6b6..99700b097bc8 100644 --- a/buildSrc/src/main/groovy/org/apache/beam/gradle/BeamModulePlugin.groovy +++ b/buildSrc/src/main/groovy/org/apache/beam/gradle/BeamModulePlugin.groovy @@ -1919,9 +1919,7 @@ class BeamModulePlugin implements Plugin { } if (runner?.equalsIgnoreCase('spark')) { - testRuntimeOnly it.project(path: ":runners:spark:2", configuration: "testRuntimeMigration") - testRuntimeOnly project.library.java.spark_core - testRuntimeOnly project.library.java.spark_streaming + testRuntimeOnly it.project(path: ":runners:spark:3", configuration: "testRuntimeMigration") // Testing the Spark runner causes a StackOverflowError if slf4j-jdk14 is on the classpath project.configurations.testRuntimeClasspath { @@ -2679,7 +2677,7 @@ class BeamModulePlugin implements Plugin { dependsOn = [installGcpTest] mustRunAfter = [ ":runners:flink:${project.ext.latestFlinkVersion}:job-server:shadowJar", - ':runners:spark:2:job-server:shadowJar', + ':runners:spark:3:job-server:shadowJar', ':sdks:python:container:py37:docker', ':sdks:python:container:py38:docker', ':sdks:python:container:py39:docker', @@ -2695,7 +2693,7 @@ class BeamModulePlugin implements Plugin { "--parallelism=2", "--sdk_worker_parallelism=1", "--flink_job_server_jar=${project.project(flinkJobServerProject).shadowJar.archivePath}", - "--spark_job_server_jar=${project.project(':runners:spark:2:job-server').shadowJar.archivePath}", + "--spark_job_server_jar=${project.project(':runners:spark:3:job-server').shadowJar.archivePath}", ] if (isStreaming) options += [ diff --git a/sdks/go/test/build.gradle b/sdks/go/test/build.gradle index 76acadb5db17..5d34f9c72c8a 100644 --- a/sdks/go/test/build.gradle +++ b/sdks/go/test/build.gradle @@ -104,7 +104,7 @@ task sparkValidatesRunner { dependsOn ":sdks:go:test:goBuild" dependsOn ":sdks:java:container:java8:docker" - dependsOn ":runners:spark:2:job-server:shadowJar" + dependsOn ":runners:spark:3:job-server:shadowJar" dependsOn ":sdks:java:testing:expansion-service:buildTestExpansionServiceJar" doLast { def pipelineOptions = [ // Pipeline options piped directly to Go SDK flags. @@ -112,7 +112,7 @@ task sparkValidatesRunner { ] def options = [ "--runner spark", - "--spark_job_server_jar ${project(":runners:spark:2:job-server").shadowJar.archivePath}", + "--spark_job_server_jar ${project(":runners:spark:3:job-server").shadowJar.archivePath}", "--pipeline_opts \"${pipelineOptions.join(' ')}\"", ] exec { diff --git a/sdks/python/apache_beam/options/pipeline_options.py b/sdks/python/apache_beam/options/pipeline_options.py index 54eaaf19ed8e..036613ca5469 100644 --- a/sdks/python/apache_beam/options/pipeline_options.py +++ b/sdks/python/apache_beam/options/pipeline_options.py @@ -1497,9 +1497,10 @@ def _add_argparse_args(cls, parser): 'For example, http://hostname:6066') parser.add_argument( '--spark_version', - default='2', - choices=['2', '3'], - help='Spark major version to use.') + default='3', + choices=['3', '2'], + help='Spark major version to use. ' + 'Note, Spark 2 support is deprecated') class TestOptions(PipelineOptions): diff --git a/sdks/python/apache_beam/runners/portability/spark_runner.py b/sdks/python/apache_beam/runners/portability/spark_runner.py index b1d754d89836..b4c46c0dac06 100644 --- a/sdks/python/apache_beam/runners/portability/spark_runner.py +++ b/sdks/python/apache_beam/runners/portability/spark_runner.py @@ -88,15 +88,15 @@ def path_to_jar(self): 'Unable to parse jar URL "%s". If using a full URL, make sure ' 'the scheme is specified. If using a local file path, make sure ' 'the file exists; you may have to first build the job server ' - 'using `./gradlew runners:spark:2:job-server:shadowJar`.' % + 'using `./gradlew runners:spark:3:job-server:shadowJar`.' % self._jar) return self._jar else: - if self._spark_version == '3': - return self.path_to_beam_jar(':runners:spark:3:job-server:shadowJar') - return self.path_to_beam_jar( - ':runners:spark:2:job-server:shadowJar', - artifact_id='beam-runners-spark-job-server') + if self._spark_version == '2': + return self.path_to_beam_jar( + ':runners:spark:2:job-server:shadowJar', + artifact_id='beam-runners-spark-job-server') + return self.path_to_beam_jar(':runners:spark:3:job-server:shadowJar') def java_arguments( self, job_port, artifact_port, expansion_port, artifacts_dir): diff --git a/sdks/python/apache_beam/runners/portability/spark_runner_test.py b/sdks/python/apache_beam/runners/portability/spark_runner_test.py index 488222f2f2fa..5530caa1e971 100644 --- a/sdks/python/apache_beam/runners/portability/spark_runner_test.py +++ b/sdks/python/apache_beam/runners/portability/spark_runner_test.py @@ -84,7 +84,7 @@ def parse_options(self, request): self.set_spark_job_server_jar( known_args.spark_job_server_jar or job_server.JavaJarJobServer.path_to_beam_jar( - ':runners:spark:2:job-server:shadowJar')) + ':runners:spark:3:job-server:shadowJar')) self.environment_type = known_args.environment_type self.environment_options = known_args.environment_options diff --git a/sdks/python/apache_beam/runners/portability/spark_uber_jar_job_server.py b/sdks/python/apache_beam/runners/portability/spark_uber_jar_job_server.py index 832f3142cb63..97fa6b629cee 100644 --- a/sdks/python/apache_beam/runners/portability/spark_uber_jar_job_server.py +++ b/sdks/python/apache_beam/runners/portability/spark_uber_jar_job_server.py @@ -69,17 +69,17 @@ def executable_jar(self): 'Unable to parse jar URL "%s". If using a full URL, make sure ' 'the scheme is specified. If using a local file path, make sure ' 'the file exists; you may have to first build the job server ' - 'using `./gradlew runners:spark:2:job-server:shadowJar`.' % + 'using `./gradlew runners:spark:3:job-server:shadowJar`.' % self._executable_jar) url = self._executable_jar else: - if self._spark_version == '3': - url = job_server.JavaJarJobServer.path_to_beam_jar( - ':runners:spark:3:job-server:shadowJar') - else: + if self._spark_version == '2': url = job_server.JavaJarJobServer.path_to_beam_jar( ':runners:spark:2:job-server:shadowJar', artifact_id='beam-runners-spark-job-server') + else: + url = job_server.JavaJarJobServer.path_to_beam_jar( + ':runners:spark:3:job-server:shadowJar') return job_server.JavaJarJobServer.local_jar(url) def create_beam_job(self, job_id, job_name, pipeline, options): diff --git a/sdks/python/test-suites/portable/common.gradle b/sdks/python/test-suites/portable/common.gradle index 79770e893256..73665759abbe 100644 --- a/sdks/python/test-suites/portable/common.gradle +++ b/sdks/python/test-suites/portable/common.gradle @@ -172,15 +172,15 @@ task samzaValidatesRunner() { def createSparkRunnerTestTask(String workerType) { def taskName = "sparkCompatibilityMatrix${workerType}" - // `project(':runners:spark:2:job-server').shadowJar.archivePath` is not resolvable until runtime, so hard-code it here. - def jobServerJar = "${rootDir}/runners/spark/2/job-server/build/libs/beam-runners-spark-job-server-${version}.jar" + // `project(':runners:spark:3:job-server').shadowJar.archivePath` is not resolvable until runtime, so hard-code it here. + def jobServerJar = "${rootDir}/runners/spark/3/job-server/build/libs/beam-runners-spark-3-job-server-${version}.jar" def options = "--spark_job_server_jar=${jobServerJar} --environment_type=${workerType}" if (workerType == 'PROCESS') { options += " --environment_options=process_command=${buildDir.absolutePath}/sdk_worker.sh" } def task = toxTask(taskName, 'spark-runner-test', options) task.configure { - dependsOn ':runners:spark:2:job-server:shadowJar' + dependsOn ':runners:spark:3:job-server:shadowJar' if (workerType == 'DOCKER') { dependsOn pythonContainerTask } else if (workerType == 'PROCESS') { @@ -208,7 +208,7 @@ project.tasks.register("preCommitPy${pythonVersionSuffix}") { project.tasks.register("postCommitPy${pythonVersionSuffix}") { dependsOn = ['setupVirtualenv', "postCommitPy${pythonVersionSuffix}IT", - ':runners:spark:2:job-server:shadowJar', + ':runners:spark:3:job-server:shadowJar', 'portableLocalRunnerJuliaSetWithSetupPy', 'portableWordCountSparkRunnerBatch', 'portableLocalRunnerTestWithRequirementsFile'] @@ -248,13 +248,13 @@ project.tasks.register("sparkExamples") { dependsOn = [ 'setupVirtualenv', 'installGcpTest', - ':runners:spark:2:job-server:shadowJar' + ':runners:spark:3:job-server:shadowJar' ] doLast { def testOpts = [ "--log-cli-level=INFO", ] - def jobServerJar = "${rootDir}/runners/spark/2/job-server/build/libs/beam-runners-spark-job-server-${version}.jar" + def jobServerJar = "${rootDir}/runners/spark/2/job-server/build/libs/beam-runners-spark-3-job-server-${version}.jar" def pipelineOpts = [ "--runner=SparkRunner", "--project=apache-beam-testing", @@ -388,7 +388,7 @@ def addTestJavaJarCreator(String runner, Task jobServerJarTask) { // TODO(BEAM-11333) Update and test multiple Flink versions. addTestJavaJarCreator("FlinkRunner", tasks.getByPath(":runners:flink:${latestFlinkVersion}:job-server:shadowJar")) -addTestJavaJarCreator("SparkRunner", tasks.getByPath(":runners:spark:2:job-server:shadowJar")) +addTestJavaJarCreator("SparkRunner", tasks.getByPath(":runners:spark:3:job-server:shadowJar")) def addTestFlinkUberJar(boolean saveMainSession) { project.tasks.register("testUberJarFlinkRunner${saveMainSession ? 'SaveMainSession' : ''}") { diff --git a/website/www/site/content/en/documentation/runners/spark.md b/website/www/site/content/en/documentation/runners/spark.md index ff6fa3cc47a8..b7283f0cbe1b 100644 --- a/website/www/site/content/en/documentation/runners/spark.md +++ b/website/www/site/content/en/documentation/runners/spark.md @@ -293,7 +293,7 @@ python -m apache_beam.examples.wordcount \ - `--runner`(required): `SparkRunner`. - `--output_executable_path`(required): path for the bundle jar to be created. - `--output`(required): where output shall be written. -- `--spark_version`(optional): select spark version 2 (default) or 3. +- `--spark_version`(optional): select spark version 3 (default) or 2 (deprecated!). 5. Submit spark job to Dataproc cluster's master node. From 1ec1945ec5c1d29dbc5efe574712733922ced07d Mon Sep 17 00:00:00 2001 From: Moritz Mack Date: Mon, 24 Oct 2022 09:19:14 +0200 Subject: [PATCH 032/115] Support usage of custom profileName with AWS ProfileCredentialsProvider (closes #23206) (#23553) --- .../beam/gradle/BeamModulePlugin.groovy | 1 + .../java/io/amazon-web-services2/build.gradle | 1 + .../beam/sdk/io/aws2/options/AwsModule.java | 25 ++- .../beam/sdk/io/aws2/options/AwsOptions.java | 29 +++- .../sdk/io/aws2/options/AwsModuleTest.java | 158 ++++++++++++++++-- .../aws2/options/SerializationTestUtil.java | 15 +- 6 files changed, 209 insertions(+), 20 deletions(-) diff --git a/buildSrc/src/main/groovy/org/apache/beam/gradle/BeamModulePlugin.groovy b/buildSrc/src/main/groovy/org/apache/beam/gradle/BeamModulePlugin.groovy index 99700b097bc8..44c1011a7cfc 100644 --- a/buildSrc/src/main/groovy/org/apache/beam/gradle/BeamModulePlugin.groovy +++ b/buildSrc/src/main/groovy/org/apache/beam/gradle/BeamModulePlugin.groovy @@ -546,6 +546,7 @@ class BeamModulePlugin implements Plugin { aws_java_sdk2_http_client_spi : "software.amazon.awssdk:http-client-spi:$aws_java_sdk2_version", aws_java_sdk2_regions : "software.amazon.awssdk:regions:$aws_java_sdk2_version", aws_java_sdk2_utils : "software.amazon.awssdk:utils:$aws_java_sdk2_version", + aws_java_sdk2_profiles : "software.amazon.awssdk:profiles:$aws_java_sdk2_version", bigdataoss_gcsio : "com.google.cloud.bigdataoss:gcsio:$google_cloud_bigdataoss_version", bigdataoss_util : "com.google.cloud.bigdataoss:util:$google_cloud_bigdataoss_version", byte_buddy : "net.bytebuddy:byte-buddy:1.12.14", diff --git a/sdks/java/io/amazon-web-services2/build.gradle b/sdks/java/io/amazon-web-services2/build.gradle index 1c5d3dc82683..5b25cde8f0e0 100644 --- a/sdks/java/io/amazon-web-services2/build.gradle +++ b/sdks/java/io/amazon-web-services2/build.gradle @@ -48,6 +48,7 @@ dependencies { implementation library.java.aws_java_sdk2_auth, excludeNetty implementation library.java.aws_java_sdk2_regions, excludeNetty implementation library.java.aws_java_sdk2_utils, excludeNetty + implementation library.java.aws_java_sdk2_profiles, excludeNetty implementation library.java.aws_java_sdk2_http_client_spi, excludeNetty implementation library.java.aws_java_sdk2_apache_client, excludeNetty implementation library.java.aws_java_sdk2_netty_client, excludeNetty diff --git a/sdks/java/io/amazon-web-services2/src/main/java/org/apache/beam/sdk/io/aws2/options/AwsModule.java b/sdks/java/io/amazon-web-services2/src/main/java/org/apache/beam/sdk/io/aws2/options/AwsModule.java index 0f8b138d0b95..d814b395950a 100644 --- a/sdks/java/io/amazon-web-services2/src/main/java/org/apache/beam/sdk/io/aws2/options/AwsModule.java +++ b/sdks/java/io/amazon-web-services2/src/main/java/org/apache/beam/sdk/io/aws2/options/AwsModule.java @@ -49,6 +49,7 @@ import org.apache.beam.sdk.annotations.Experimental.Kind; import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.ImmutableSet; import org.checkerframework.checker.nullness.qual.NonNull; +import org.slf4j.LoggerFactory; import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; import software.amazon.awssdk.auth.credentials.AwsCredentials; import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; @@ -60,6 +61,7 @@ import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; import software.amazon.awssdk.auth.credentials.SystemPropertyCredentialsProvider; import software.amazon.awssdk.http.apache.ProxyConfiguration; +import software.amazon.awssdk.profiles.ProfileFileSystemSetting; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.sts.StsClient; import software.amazon.awssdk.services.sts.auth.StsAssumeRoleCredentialsProvider; @@ -75,6 +77,7 @@ public class AwsModule extends SimpleModule { private static final String ACCESS_KEY_ID = "accessKeyId"; private static final String SECRET_ACCESS_KEY = "secretAccessKey"; private static final String SESSION_TOKEN = "sessionToken"; + private static final String PROFILE_NAME = "profileName"; public AwsModule() { super("AwsModule"); @@ -160,7 +163,9 @@ public AwsCredentialsProvider deserializeWithType( } else if (hasName(SystemPropertyCredentialsProvider.class, typeName)) { return SystemPropertyCredentialsProvider.create(); } else if (hasName(ProfileCredentialsProvider.class, typeName)) { - return ProfileCredentialsProvider.create(); + return json.has(PROFILE_NAME) + ? ProfileCredentialsProvider.create(getNotNull(json, PROFILE_NAME, typeName)) + : ProfileCredentialsProvider.create(); } else if (hasName(ContainerCredentialsProvider.class, typeName)) { return ContainerCredentialsProvider.builder().build(); } else if (typeName.equals(StsAssumeRoleCredentialsProvider.class.getSimpleName())) { @@ -195,7 +200,6 @@ private static class AWSCredentialsProviderSerializer DefaultCredentialsProvider.class, EnvironmentVariableCredentialsProvider.class, SystemPropertyCredentialsProvider.class, - ProfileCredentialsProvider.class, ContainerCredentialsProvider.class); @Override @@ -228,6 +232,23 @@ public void serializeWithType( jsonGenerator.writeStringField(ACCESS_KEY_ID, credentials.accessKeyId()); jsonGenerator.writeStringField(SECRET_ACCESS_KEY, credentials.secretAccessKey()); } + } else if (providerClass.equals(ProfileCredentialsProvider.class)) { + String profileName = (String) readField(credentialsProvider, PROFILE_NAME); + String envProfileName = ProfileFileSystemSetting.AWS_PROFILE.getStringValueOrThrow(); + if (profileName != null && !profileName.equals(envProfileName)) { + jsonGenerator.writeStringField(PROFILE_NAME, profileName); + } + try { + Exception exception = (Exception) readField(credentialsProvider, "loadException"); + if (exception != null) { + LoggerFactory.getLogger(AwsModule.class) + .warn("Serialized ProfileCredentialsProvider in faulty state.", exception); + } + } catch (RuntimeException e) { + LoggerFactory.getLogger(AwsModule.class) + .warn("Failed to check ProfileCredentialsProvider for loadException.", e); + } + } else if (providerClass.equals(StsAssumeRoleCredentialsProvider.class)) { Supplier reqSupplier = (Supplier) diff --git a/sdks/java/io/amazon-web-services2/src/main/java/org/apache/beam/sdk/io/aws2/options/AwsOptions.java b/sdks/java/io/amazon-web-services2/src/main/java/org/apache/beam/sdk/io/aws2/options/AwsOptions.java index d2e02217d15a..ae86c27b78e2 100644 --- a/sdks/java/io/amazon-web-services2/src/main/java/org/apache/beam/sdk/io/aws2/options/AwsOptions.java +++ b/sdks/java/io/amazon-web-services2/src/main/java/org/apache/beam/sdk/io/aws2/options/AwsOptions.java @@ -78,21 +78,20 @@ public Region create(PipelineOptions options) { *

The class name of the provider must be set in the {@code @type} field. Note: Not all * available providers are supported and some configuration options might be ignored. * - *

Most providers rely on system's environment to follow AWS conventions, there's no further - * configuration: + *

Most providers must use the system environment following AWS conventions. Programmatic + * configuration for these providers is NOT supported: *

  • {@link DefaultCredentialsProvider} *
  • {@link EnvironmentVariableCredentialsProvider} *
  • {@link SystemPropertyCredentialsProvider} - *
  • {@link ProfileCredentialsProvider} *
  • {@link ContainerCredentialsProvider} * *

    Example: * - *

    {@code --awsCredentialsProvider={"@type": "ProfileCredentialsProvider"}}
    + *
    {@code --awsCredentialsProvider={"@type": "EnvironmentVariableCredentialsProvider"}}
    +   *     
    * - *

    Some other providers require additional configuration: + *

    Some other providers support additional configuration: *

  • {@link StaticCredentialsProvider} - *
  • {@link StsAssumeRoleCredentialsProvider} * *

    Examples: * @@ -107,9 +106,27 @@ public Region create(PipelineOptions options) { * "awsAccessKeyId": "key_id_value", * "awsSecretKey": "secret_value", * "sessionToken": "token_value" + * }} + * + *

  • {@link ProfileCredentialsProvider} + * + *

    {@code profileName} is optional, if not set the environment default is used. Be careful + * if using this provider programmatically, it can behave unexpectedly. + * + *

    Examples: + * + *

    {@code --awsCredentialsProvider={
    +   *   "@type": "ProfileCredentialsProvider"
        * }
        *
        * --awsCredentialsProvider={
    +   *   "@type": "ProfileCredentialsProvider",
    +   *   "profileName": "my_profile"
    +   * }}
    + * + *
  • {@link StsAssumeRoleCredentialsProvider} + * + *
    {@code --awsCredentialsProvider={
        *   "@type": "StsAssumeRoleCredentialsProvider",
        *   "roleArn": "role_arn_Value",
        *   "roleSessionName": "session_name_value",
    diff --git a/sdks/java/io/amazon-web-services2/src/test/java/org/apache/beam/sdk/io/aws2/options/AwsModuleTest.java b/sdks/java/io/amazon-web-services2/src/test/java/org/apache/beam/sdk/io/aws2/options/AwsModuleTest.java
    index e5962812e64b..17e6f528f969 100644
    --- a/sdks/java/io/amazon-web-services2/src/test/java/org/apache/beam/sdk/io/aws2/options/AwsModuleTest.java
    +++ b/sdks/java/io/amazon-web-services2/src/test/java/org/apache/beam/sdk/io/aws2/options/AwsModuleTest.java
    @@ -18,6 +18,7 @@
     package org.apache.beam.sdk.io.aws2.options;
     
     import static org.apache.beam.repackaged.core.org.apache.commons.lang3.reflect.FieldUtils.readField;
    +import static org.apache.beam.sdk.io.aws2.options.SerializationTestUtil.serialize;
     import static org.assertj.core.api.Assertions.assertThat;
     import static org.hamcrest.Matchers.hasItem;
     import static org.hamcrest.Matchers.instanceOf;
    @@ -25,21 +26,32 @@
     import static software.amazon.awssdk.core.SdkSystemSetting.AWS_ACCESS_KEY_ID;
     import static software.amazon.awssdk.core.SdkSystemSetting.AWS_REGION;
     import static software.amazon.awssdk.core.SdkSystemSetting.AWS_SECRET_ACCESS_KEY;
    +import static software.amazon.awssdk.profiles.ProfileFileSystemSetting.AWS_CONFIG_FILE;
    +import static software.amazon.awssdk.profiles.ProfileFileSystemSetting.AWS_PROFILE;
     
     import com.amazonaws.regions.Regions;
     import com.fasterxml.jackson.databind.Module;
     import com.fasterxml.jackson.databind.ObjectMapper;
    +import java.io.IOException;
     import java.net.URI;
    +import java.nio.file.Files;
    +import java.nio.file.Path;
    +import java.util.Arrays;
     import java.util.List;
     import java.util.Properties;
     import java.util.function.Supplier;
    +import org.apache.beam.sdk.testing.ExpectedLogs;
     import org.apache.beam.sdk.util.ThrowingSupplier;
     import org.apache.beam.sdk.util.common.ReflectHelpers;
     import org.hamcrest.MatcherAssert;
    +import org.junit.ClassRule;
    +import org.junit.Rule;
     import org.junit.Test;
    +import org.junit.rules.ExternalResource;
     import org.junit.runner.RunWith;
     import org.junit.runners.JUnit4;
     import software.amazon.awssdk.auth.credentials.AwsBasicCredentials;
    +import software.amazon.awssdk.auth.credentials.AwsCredentials;
     import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider;
     import software.amazon.awssdk.auth.credentials.AwsSessionCredentials;
     import software.amazon.awssdk.auth.credentials.ContainerCredentialsProvider;
    @@ -57,6 +69,24 @@
     @RunWith(JUnit4.class)
     public class AwsModuleTest {
     
    +  @ClassRule
    +  public static final ProfileFile PROFILE =
    +      new ProfileFile(
    +          "[default]",
    +          "aws_access_key_id=defaultkey",
    +          "aws_secret_access_key=123",
    +          "[profile other]",
    +          "aws_access_key_id=otherkey",
    +          "aws_secret_access_key=abc");
    +
    +  private static final AwsCredentials DEFAULT_CREDENTIALS =
    +      AwsBasicCredentials.create("defaultkey", "123");
    +
    +  private static final AwsCredentials OTHER_CREDENTIALS =
    +      AwsBasicCredentials.create("otherkey", "abc");
    +
    +  @Rule public final ExpectedLogs logs = ExpectedLogs.none(AwsModule.class);
    +
       @Test
       public void testObjectMapperIsAbleToFindModule() {
         List modules = ObjectMapper.findModules(ReflectHelpers.findClassLoader());
    @@ -68,7 +98,7 @@ private  T serializeAndDeserialize(T obj) {
       }
     
       @Test
    -  public void testStaticCredentialsProviderSerializationDeserialization() {
    +  public void testStaticCredentialsProviderSerDe() {
         AwsCredentialsProvider provider =
             StaticCredentialsProvider.create(AwsBasicCredentials.create("key", "secret"));
     
    @@ -84,7 +114,7 @@ public void testStaticCredentialsProviderSerializationDeserialization() {
       }
     
       @Test
    -  public void testAwsCredentialsProviderSerializationDeserialization() {
    +  public void testAwsCredentialsProviderSerDe() {
         AwsCredentialsProvider provider = DefaultCredentialsProvider.create();
         AwsCredentialsProvider deserializedProvider = serializeAndDeserialize(provider);
         assertEquals(provider.getClass(), deserializedProvider.getClass());
    @@ -97,17 +127,90 @@ public void testAwsCredentialsProviderSerializationDeserialization() {
         deserializedProvider = serializeAndDeserialize(provider);
         assertEquals(provider.getClass(), deserializedProvider.getClass());
     
    -    provider = ProfileCredentialsProvider.create();
    -    deserializedProvider = serializeAndDeserialize(provider);
    -    assertEquals(provider.getClass(), deserializedProvider.getClass());
    -
         provider = ContainerCredentialsProvider.builder().build();
         deserializedProvider = serializeAndDeserialize(provider);
         assertEquals(provider.getClass(), deserializedProvider.getClass());
       }
     
       @Test
    -  public void testStsAssumeRoleCredentialsProviderSerializationDeserialization() throws Exception {
    +  public void testProfileCredentialsProviderSerDeWithDefaultProfile() throws Exception {
    +    withSystemProperties(
    +        PROFILE.properties("default"),
    +        () -> {
    +          AwsCredentialsProvider provider = ProfileCredentialsProvider.create();
    +          String serializedProvider = serialize(provider);
    +
    +          assertThat(serializedProvider).isEqualTo("{\"@type\":\"ProfileCredentialsProvider\"}");
    +
    +          AwsCredentialsProvider actual = deserialize(serializedProvider);
    +          assertThat(actual.resolveCredentials())
    +              .isEqualToComparingFieldByField(DEFAULT_CREDENTIALS);
    +          return assertThat(actual)
    +              .isExactlyInstanceOf(ProfileCredentialsProvider.class)
    +              .isEqualToComparingFieldByFieldRecursively(provider);
    +        });
    +  }
    +
    +  @Test
    +  public void testProfileCredentialsProviderSerDeWithCustomProfile() throws Exception {
    +    withSystemProperties(
    +        PROFILE.properties("default"),
    +        () -> {
    +          AwsCredentialsProvider provider = ProfileCredentialsProvider.create("other");
    +          String serializedProvider = serialize(provider);
    +
    +          assertThat(serializedProvider)
    +              .isEqualTo("{\"@type\":\"ProfileCredentialsProvider\",\"profileName\":\"other\"}");
    +
    +          AwsCredentialsProvider actual = deserialize(serializedProvider);
    +          assertThat(actual.resolveCredentials()).isEqualToComparingFieldByField(OTHER_CREDENTIALS);
    +          return assertThat(actual)
    +              .isExactlyInstanceOf(ProfileCredentialsProvider.class)
    +              .isEqualToComparingFieldByFieldRecursively(provider);
    +        });
    +  }
    +
    +  @Test
    +  public void testProfileCredentialsProviderSerDeWithCustomDefaultProfile() throws Exception {
    +    withSystemProperties(
    +        PROFILE.properties("other"),
    +        () -> {
    +          AwsCredentialsProvider provider = ProfileCredentialsProvider.create("other");
    +          String serializedProvider = serialize(provider);
    +
    +          assertThat(serializedProvider).isEqualTo("{\"@type\":\"ProfileCredentialsProvider\"}");
    +
    +          AwsCredentialsProvider actual = deserialize(serializedProvider);
    +          assertThat(actual.resolveCredentials())
    +              .isEqualToComparingFieldByFieldRecursively(OTHER_CREDENTIALS);
    +          return assertThat(actual)
    +              .isExactlyInstanceOf(ProfileCredentialsProvider.class)
    +              .isEqualToComparingFieldByFieldRecursively(provider);
    +        });
    +  }
    +
    +  @Test
    +  public void testProfileCredentialsProviderSerDeWithUnknownProfile() throws Exception {
    +    withSystemProperties(
    +        PROFILE.properties("default"),
    +        () -> {
    +          AwsCredentialsProvider provider = ProfileCredentialsProvider.create("unknown");
    +          String serializedProvider = serialize(provider);
    +
    +          // ProfileCredentialsProvider SILENTLY drops unknown profiles
    +          assertThat(serializedProvider).isEqualTo("{\"@type\":\"ProfileCredentialsProvider\"}");
    +
    +          AwsCredentialsProvider actual = deserialize(serializedProvider);
    +          // NOTE: This documents the unexpected behavior in case a faulty provider is serialized
    +          return assertThat(actual.resolveCredentials())
    +              .isEqualToComparingFieldByField(DEFAULT_CREDENTIALS);
    +        });
    +
    +    logs.verifyWarn("Serialized ProfileCredentialsProvider in faulty state.");
    +  }
    +
    +  @Test
    +  public void testStsAssumeRoleCredentialsProviderSerDe() throws Exception {
         AssumeRoleRequest req = AssumeRoleRequest.builder().roleArn("roleArn").policy("policy").build();
         Supplier provider =
             () ->
    @@ -123,7 +226,7 @@ public void testStsAssumeRoleCredentialsProviderSerializationDeserialization() t
     
         // Region and credentials for STS client are resolved using default providers
         AwsCredentialsProvider deserializedProvider =
    -        withSystemPropertyOverrides(overrides, () -> serializeAndDeserialize(provider.get()));
    +        withSystemProperties(overrides, () -> serializeAndDeserialize(provider.get()));
     
         Supplier requestSupplier =
             (Supplier)
    @@ -132,7 +235,7 @@ public void testStsAssumeRoleCredentialsProviderSerializationDeserialization() t
       }
     
       @Test
    -  public void testProxyConfigurationSerializationDeserialization() {
    +  public void testProxyConfigurationSerDe() {
         ProxyConfiguration proxyConfiguration =
             ProxyConfiguration.builder()
                 .endpoint(URI.create("http://localhost:8080"))
    @@ -147,7 +250,7 @@ public void testProxyConfigurationSerializationDeserialization() {
         assertEquals("password", deserializedProxyConfiguration.password());
       }
     
    -  private  T withSystemPropertyOverrides(Properties overrides, ThrowingSupplier fun)
    +  private  T withSystemProperties(Properties overrides, ThrowingSupplier fun)
           throws Exception {
         Properties systemProps = System.getProperties();
     
    @@ -164,4 +267,39 @@ private  T withSystemPropertyOverrides(Properties overrides, ThrowingSupplier
           previousProps.forEach(systemProps::put);
         }
       }
    +
    +  private static AwsCredentialsProvider deserialize(String provider) {
    +    return SerializationTestUtil.deserialize(provider, AwsCredentialsProvider.class);
    +  }
    +
    +  static class ProfileFile extends ExternalResource {
    +    private String[] lines;
    +    private Path path;
    +
    +    public ProfileFile(String... lines) {
    +      this.lines = lines;
    +    }
    +
    +    public Properties properties(String defaultProfile) {
    +      Properties props = new Properties();
    +      props.setProperty(AWS_CONFIG_FILE.property(), path.toString());
    +      props.setProperty(AWS_PROFILE.property(), defaultProfile);
    +      return props;
    +    }
    +
    +    @Override
    +    protected void before() throws Throwable {
    +      path = Files.createTempFile("profile", ".conf");
    +      Files.write(path, Arrays.asList(lines));
    +    }
    +
    +    @Override
    +    protected void after() {
    +      try {
    +        Files.delete(path);
    +      } catch (IOException e) {
    +        // ignore
    +      }
    +    }
    +  }
     }
    diff --git a/sdks/java/io/amazon-web-services2/src/test/java/org/apache/beam/sdk/io/aws2/options/SerializationTestUtil.java b/sdks/java/io/amazon-web-services2/src/test/java/org/apache/beam/sdk/io/aws2/options/SerializationTestUtil.java
    index 0f5daf0bc92c..6cf79c958090 100644
    --- a/sdks/java/io/amazon-web-services2/src/test/java/org/apache/beam/sdk/io/aws2/options/SerializationTestUtil.java
    +++ b/sdks/java/io/amazon-web-services2/src/test/java/org/apache/beam/sdk/io/aws2/options/SerializationTestUtil.java
    @@ -28,11 +28,22 @@ public class SerializationTestUtil {
               .registerModules(ObjectMapper.findModules(ReflectHelpers.findClassLoader()));
     
       public static  T serializeDeserialize(Class clazz, T obj) {
    +    return deserialize(serialize(obj), clazz);
    +  }
    +
    +  public static  String serialize(T obj) {
    +    try {
    +      return MAPPER.writeValueAsString(obj);
    +    } catch (JsonProcessingException e) {
    +      throw new RuntimeException("Failed to serialize " + obj.getClass().getSimpleName(), e);
    +    }
    +  }
    +
    +  public static  T deserialize(String jsonString, Class clazz) {
         try {
    -      String jsonString = MAPPER.writeValueAsString(obj);
           return MAPPER.readValue(jsonString, clazz);
         } catch (JsonProcessingException e) {
    -      throw new RuntimeException("Failed to serialize/deserialize " + clazz.getSimpleName(), e);
    +      throw new RuntimeException("Failed to deserialize " + clazz.getSimpleName(), e);
         }
       }
     }
    
    From 760c83e94020971d84b904270b087d4c3d69fd00 Mon Sep 17 00:00:00 2001
    From: Moritz Mack 
    Date: Mon, 24 Oct 2022 10:30:30 +0200
    Subject: [PATCH 033/115] Migrate examples and maven-archetypes (including Java
     Quickstart) to Spark 3 (addresses #23728) (#23730)
    
    ---
     examples/java/build.gradle                    |  7 +----
     examples/kotlin/build.gradle                  |  7 +----
     release/build.gradle.kts                      |  2 +-
     runners/spark/3/build.gradle                  |  3 ++
     runners/spark/spark_runner.gradle             |  3 --
     .../maven-archetypes/examples/build.gradle    |  2 +-
     .../resources/archetype-resources/pom.xml     | 28 +++----------------
     .../gcp-bom-examples/build.gradle             |  2 +-
     .../resources/archetype-resources/pom.xml     | 24 ++--------------
     9 files changed, 15 insertions(+), 63 deletions(-)
    
    diff --git a/examples/java/build.gradle b/examples/java/build.gradle
    index 13b2518bf382..aa51dcfeae85 100644
    --- a/examples/java/build.gradle
    +++ b/examples/java/build.gradle
    @@ -109,13 +109,8 @@ dependencies {
       }
       directRunnerPreCommit project(path: ":runners:direct-java", configuration: "shadow")
       flinkRunnerPreCommit project(":runners:flink:${project.ext.latestFlinkVersion}")
    -  // TODO: Make the netty version used configurable, we add netty-all 4.1.17.Final so it appears on the classpath
    -  // before 4.1.8.Final defined by Apache Beam
    -  sparkRunnerPreCommit "io.netty:netty-all:4.1.17.Final"
    -  sparkRunnerPreCommit project(":runners:spark:2")
    +  sparkRunnerPreCommit project(":runners:spark:3")
       sparkRunnerPreCommit project(":sdks:java:io:hadoop-file-system")
    -  sparkRunnerPreCommit library.java.spark_streaming
    -  sparkRunnerPreCommit library.java.spark_core
     }
     
     /*
    diff --git a/examples/kotlin/build.gradle b/examples/kotlin/build.gradle
    index 0aa3dc257b09..79a1248712d0 100644
    --- a/examples/kotlin/build.gradle
    +++ b/examples/kotlin/build.gradle
    @@ -81,13 +81,8 @@ dependencies {
       }
       directRunnerPreCommit project(path: ":runners:direct-java", configuration: "shadow")
       flinkRunnerPreCommit project(":runners:flink:${project.ext.latestFlinkVersion}")
    -  // TODO: Make the netty version used configurable, we add netty-all 4.1.17.Final so it appears on the classpath
    -  // before 4.1.8.Final defined by Apache Beam
    -  sparkRunnerPreCommit "io.netty:netty-all:4.1.17.Final"
    -  sparkRunnerPreCommit project(":runners:spark:2")
    +  sparkRunnerPreCommit project(":runners:spark:3")
       sparkRunnerPreCommit project(":sdks:java:io:hadoop-file-system")
    -  sparkRunnerPreCommit library.java.spark_streaming
    -  sparkRunnerPreCommit library.java.spark_core
     }
     
     /*
    diff --git a/release/build.gradle.kts b/release/build.gradle.kts
    index 7de4ab3af61a..ce895af80f8b 100644
    --- a/release/build.gradle.kts
    +++ b/release/build.gradle.kts
    @@ -38,7 +38,7 @@ task("runJavaExamplesValidationTask") {
       description = "Run the Beam quickstart across all Java runners"
       dependsOn(":runners:direct-java:runQuickstartJavaDirect")
       dependsOn(":runners:google-cloud-dataflow-java:runQuickstartJavaDataflow")
    -  dependsOn(":runners:spark:2:runQuickstartJavaSpark")
    +  dependsOn(":runners:spark:3:runQuickstartJavaSpark")
       dependsOn(":runners:flink:1.13:runQuickstartJavaFlinkLocal")
       dependsOn(":runners:direct-java:runMobileGamingJavaDirect")
       dependsOn(":runners:google-cloud-dataflow-java:runMobileGamingJavaDataflow")
    diff --git a/runners/spark/3/build.gradle b/runners/spark/3/build.gradle
    index 3d59bd525c4b..494d367131b4 100644
    --- a/runners/spark/3/build.gradle
    +++ b/runners/spark/3/build.gradle
    @@ -29,6 +29,9 @@ project.ext {
     // Load the main build script which contains all build logic.
     apply from: "$basePath/spark_runner.gradle"
     
    +// Generates runQuickstartJavaSpark task (can only support 1 version of Spark)
    +createJavaExamplesArchetypeValidationTask(type: 'Quickstart', runner: 'Spark')
    +
     // Additional supported Spark versions (used in compatibility tests)
     def sparkVersions = [
         "330": "3.3.0",
    diff --git a/runners/spark/spark_runner.gradle b/runners/spark/spark_runner.gradle
    index 14a433162fb6..1869f9c21742 100644
    --- a/runners/spark/spark_runner.gradle
    +++ b/runners/spark/spark_runner.gradle
    @@ -385,9 +385,6 @@ tasks.register("validatesRunner") {
       //dependsOn validatesStructuredStreamingRunnerBatch
     }
     
    -// Generates :runners:spark:*:runQuickstartJavaSpark task
    -createJavaExamplesArchetypeValidationTask(type: 'Quickstart', runner: 'Spark')
    -
     tasks.register("hadoopVersionsTest") {
       group = "Verification"
       dependsOn hadoopVersions.collect{k,v -> "hadoopVersion${k}Test"}
    diff --git a/sdks/java/maven-archetypes/examples/build.gradle b/sdks/java/maven-archetypes/examples/build.gradle
    index 148015f43898..6a034029f10e 100644
    --- a/sdks/java/maven-archetypes/examples/build.gradle
    +++ b/sdks/java/maven-archetypes/examples/build.gradle
    @@ -36,7 +36,7 @@ processResources {
         'libraries-bom.version': dependencies.create(project.library.java.google_cloud_platform_libraries_bom).getVersion(),
         'pubsub.version': dependencies.create(project.library.java.google_api_services_pubsub).getVersion(),
         'slf4j.version': dependencies.create(project.library.java.slf4j_api).getVersion(),
    -    'spark.version': dependencies.create(project.library.java.spark_core).getVersion(),
    +    'spark.version': dependencies.create(project.library.java.spark3_core).getVersion(),
         'nemo.version': dependencies.create(project.library.java.nemo_compiler_frontend_beam).getVersion(),
         'hadoop.version': dependencies.create(project.library.java.hadoop_client).getVersion(),
         'mockito.version': dependencies.create(project.library.java.mockito_core).getVersion(),
    diff --git a/sdks/java/maven-archetypes/examples/src/main/resources/archetype-resources/pom.xml b/sdks/java/maven-archetypes/examples/src/main/resources/archetype-resources/pom.xml
    index 50515b812078..5560ca93257e 100644
    --- a/sdks/java/maven-archetypes/examples/src/main/resources/archetype-resources/pom.xml
    +++ b/sdks/java/maven-archetypes/examples/src/main/resources/archetype-resources/pom.xml
    @@ -220,15 +220,11 @@
     
         
           spark-runner
    -      
    -      
    -        4.1.17.Final
    -      
    +      
           
             
               org.apache.beam
    -          beam-runners-spark
    +          beam-runners-spark-3
               ${beam.version}
               runtime
               
    @@ -246,7 +242,7 @@
             
             
               org.apache.spark
    -          spark-streaming_2.11
    +          spark-streaming_2.12
               ${spark.version}
               runtime
               
    @@ -258,26 +254,10 @@
             
             
               com.fasterxml.jackson.module
    -          jackson-module-scala_2.11
    +          jackson-module-scala_2.12
               ${jackson.version}
               runtime
             
    -        
    -        
    -          org.apache.beam
    -          beam-sdks-java-io-google-cloud-platform
    -          ${beam.version}
    -          
    -            
    -              io.grpc
    -              grpc-netty
    -            
    -            
    -              io.netty
    -              netty-handler
    -            
    -          
    -        
           
         
         
    diff --git a/sdks/java/maven-archetypes/gcp-bom-examples/build.gradle b/sdks/java/maven-archetypes/gcp-bom-examples/build.gradle
    index 0e4f394170e5..af06bfc41d8e 100644
    --- a/sdks/java/maven-archetypes/gcp-bom-examples/build.gradle
    +++ b/sdks/java/maven-archetypes/gcp-bom-examples/build.gradle
    @@ -35,7 +35,7 @@ processResources {
                 'junit.version': dependencies.create(project.library.java.junit).getVersion(),
                 'pubsub.version': dependencies.create(project.library.java.google_api_services_pubsub).getVersion(),
                 'slf4j.version': dependencies.create(project.library.java.slf4j_api).getVersion(),
    -            'spark.version': dependencies.create(project.library.java.spark_core).getVersion(),
    +            'spark.version': dependencies.create(project.library.java.spark3_core).getVersion(),
                 'nemo.version': dependencies.create(project.library.java.nemo_compiler_frontend_beam).getVersion(),
                 'hadoop.version': dependencies.create(project.library.java.hadoop_client).getVersion(),
                 'mockito.version': dependencies.create(project.library.java.mockito_core).getVersion(),
    diff --git a/sdks/java/maven-archetypes/gcp-bom-examples/src/main/resources/archetype-resources/pom.xml b/sdks/java/maven-archetypes/gcp-bom-examples/src/main/resources/archetype-resources/pom.xml
    index 863a465f0fdc..c3fb0f26fcb2 100644
    --- a/sdks/java/maven-archetypes/gcp-bom-examples/src/main/resources/archetype-resources/pom.xml
    +++ b/sdks/java/maven-archetypes/gcp-bom-examples/src/main/resources/archetype-resources/pom.xml
    @@ -216,13 +216,10 @@
           spark-runner
           
    -      
    -        4.1.17.Final
    -      
           
             
               org.apache.beam
    -          beam-runners-spark
    +          beam-runners-spark-3
               runtime
               
                 
    @@ -238,7 +235,7 @@
             
             
               org.apache.spark
    -          spark-streaming_2.11
    +          spark-streaming_2.12
               runtime
               
                 
    @@ -249,25 +246,10 @@
             
             
               com.fasterxml.jackson.module
    -          jackson-module-scala_2.11
    +          jackson-module-scala_2.12
               ${jackson.version}
               runtime
             
    -        
    -        
    -          org.apache.beam
    -          beam-sdks-java-io-google-cloud-platform
    -          
    -            
    -              io.grpc
    -              grpc-netty
    -            
    -            
    -              io.netty
    -              netty-handler
    -            
    -          
    -        
           
         
         
    
    From 932f87e73329d7d914b23d05bd15d06a6e58132d Mon Sep 17 00:00:00 2001
    From: Moritz Mack 
    Date: Mon, 24 Oct 2022 11:50:12 +0200
    Subject: [PATCH 034/115] Bump dropwizard metrics-core for Spark 3 runner to
     match the version used in Spark 3.1 (addresses #23728)
    
    ---
     runners/spark/spark_runner.gradle | 3 ++-
     1 file changed, 2 insertions(+), 1 deletion(-)
    
    diff --git a/runners/spark/spark_runner.gradle b/runners/spark/spark_runner.gradle
    index 1869f9c21742..f8c0a061b0d7 100644
    --- a/runners/spark/spark_runner.gradle
    +++ b/runners/spark/spark_runner.gradle
    @@ -152,15 +152,16 @@ dependencies {
       implementation project(":sdks:java:fn-execution")
       implementation library.java.vendored_grpc_1_48_1
       implementation library.java.vendored_guava_26_0_jre
    -  implementation "io.dropwizard.metrics:metrics-core:3.1.5" // version used by Spark 2.4
       spark.components.each { component ->
         provided "$component:$spark_version"
       }
       permitUnusedDeclared "org.apache.spark:spark-network-common_$spark_scala_version:$spark_version"
       if (project.property("spark_scala_version").equals("2.11")) {
    +    implementation "io.dropwizard.metrics:metrics-core:3.1.5" // version used by Spark 2.4
         compileOnly "org.scala-lang:scala-library:2.11.12"
         runtimeOnly library.java.jackson_module_scala_2_11
       } else {
    +    implementation "io.dropwizard.metrics:metrics-core:4.1.1" // version used by Spark 3.1
         compileOnly "org.scala-lang:scala-library:2.12.15"
         runtimeOnly library.java.jackson_module_scala_2_12
       }
    
    From 33060c8dc312c1cf745819f87811e11cbbaafe22 Mon Sep 17 00:00:00 2001
    From: Moritz Mack 
    Date: Mon, 24 Oct 2022 11:58:18 +0200
    Subject: [PATCH 035/115] Update remaining pointers to Spark runner to Spark 3
     module (addresses #23728)
    
    ---
     website/www/site/content/en/contribute/release-guide.md       | 2 +-
     .../content/en/documentation/sdks/java/testing/nexmark.md     | 4 ++--
     2 files changed, 3 insertions(+), 3 deletions(-)
    
    diff --git a/website/www/site/content/en/contribute/release-guide.md b/website/www/site/content/en/contribute/release-guide.md
    index c5ea4442145b..a243659d9f8d 100644
    --- a/website/www/site/content/en/contribute/release-guide.md
    +++ b/website/www/site/content/en/contribute/release-guide.md
    @@ -877,7 +877,7 @@ _Note_: -Prepourl and -Pver can be found in the RC vote email sent by Release Ma
       ```
       **Spark Local Runner**
       ```
    -  ./gradlew :runners:spark:2:runQuickstartJavaSpark \
    +  ./gradlew :runners:spark:3:runQuickstartJavaSpark \
       -Prepourl=https://repository.apache.org/content/repositories/orgapachebeam-${KEY} \
       -Pver=${RELEASE_VERSION}
       ```
    diff --git a/website/www/site/content/en/documentation/sdks/java/testing/nexmark.md b/website/www/site/content/en/documentation/sdks/java/testing/nexmark.md
    index da5378034d8e..74ca4f2caaaa 100644
    --- a/website/www/site/content/en/documentation/sdks/java/testing/nexmark.md
    +++ b/website/www/site/content/en/documentation/sdks/java/testing/nexmark.md
    @@ -494,7 +494,7 @@ configure logging.
     Batch Mode:
     
         ./gradlew :sdks:java:testing:nexmark:run \
    -        -Pnexmark.runner=":runners:spark:2" \
    +        -Pnexmark.runner=":runners:spark:3" \
             -Pnexmark.args="
                 --runner=SparkRunner
                 --suite=SMOKE
    @@ -506,7 +506,7 @@ Batch Mode:
     Streaming Mode:
     
         ./gradlew :sdks:java:testing:nexmark:run \
    -        -Pnexmark.runner=":runners:spark:2" \
    +        -Pnexmark.runner=":runners:spark:3" \
             -Pnexmark.args="
                 --runner=SparkRunner
                 --suite=SMOKE
    
    From fc3e7af08664756e4a4cf3f8bd457349555635f5 Mon Sep 17 00:00:00 2001
    From: Ahmed Abualsaud <65791736+ahmedabu98@users.noreply.github.com>
    Date: Mon, 24 Oct 2022 11:31:29 -0400
    Subject: [PATCH 036/115] Ignoring BigQuery partitions with empty files
     (#23710)
    
    * ignoring partitions that have empty files
    
    * fix test
    
    * style
    ---
     .../apache_beam/io/gcp/bigquery_file_loads.py   |  7 +++++++
     .../io/gcp/bigquery_file_loads_test.py          | 17 +++++++++++++++++
     2 files changed, 24 insertions(+)
    
    diff --git a/sdks/python/apache_beam/io/gcp/bigquery_file_loads.py b/sdks/python/apache_beam/io/gcp/bigquery_file_loads.py
    index 8b899a343d35..5209552dc1e2 100644
    --- a/sdks/python/apache_beam/io/gcp/bigquery_file_loads.py
    +++ b/sdks/python/apache_beam/io/gcp/bigquery_file_loads.py
    @@ -761,6 +761,13 @@ def process(self, element):
         files = element[1]
         partitions = []
     
    +    if not files:
    +      _LOGGER.warning(
    +          'Ignoring a BigQuery batch load partition to %s '
    +          'that contains no source URIs.',
    +          destination)
    +      return
    +
         latest_partition = PartitionFiles.Partition(
             self.max_partition_size, self.max_files_per_partition)
     
    diff --git a/sdks/python/apache_beam/io/gcp/bigquery_file_loads_test.py b/sdks/python/apache_beam/io/gcp/bigquery_file_loads_test.py
    index 724032abfa7e..0c0e136eae4b 100644
    --- a/sdks/python/apache_beam/io/gcp/bigquery_file_loads_test.py
    +++ b/sdks/python/apache_beam/io/gcp/bigquery_file_loads_test.py
    @@ -400,6 +400,23 @@ def test_partition_files_dofn_size_split(self):
     
     
     class TestBigQueryFileLoads(_TestCaseWithTempDirCleanUp):
    +  def test_trigger_load_jobs_with_empty_files(self):
    +    destination = "project:dataset.table"
    +    empty_files = []
    +    load_job_prefix = "test_prefix"
    +
    +    with beam.Pipeline() as p:
    +      partitions = (
    +          p
    +          | beam.Create([(destination, empty_files)])
    +          | beam.ParDo(bqfl.PartitionFiles(1000, 10)).with_outputs(
    +              bqfl.PartitionFiles.MULTIPLE_PARTITIONS_TAG,
    +              bqfl.PartitionFiles.SINGLE_PARTITION_TAG))
    +
    +      _ = (
    +          partitions[bqfl.PartitionFiles.SINGLE_PARTITION_TAG]
    +          | beam.ParDo(bqfl.TriggerLoadJobs(), load_job_prefix))
    +
       def test_records_traverse_transform_with_mocks(self):
         destination = 'project1:dataset1.table1'
     
    
    From de8f4ba446d7d1b275e3731d71404615bc459665 Mon Sep 17 00:00:00 2001
    From: Shubham Krishna 
    Date: Mon, 24 Oct 2022 19:40:48 +0200
    Subject: [PATCH 037/115] Benchmarking RunInference Example (#23554)
    MIME-Version: 1.0
    Content-Type: text/plain; charset=UTF-8
    Content-Transfer-Encoding: 8bit
    
    * Add code for runinference pipeline demonstrating different metrics
    
    * Update Documentation
    
    * Complete documentation and improve docstrings
    
    * Add RunInference Metrics snapshot
    
    * Fix formatting and pylinting issue
    
    * Fix pylinting and code bug
    
    * Remove raise statement and fix formatting
    
    * Fix importing package order
    
    * Add newlines to fix linting and formatting errors
    
    * Improve documentation and docstrings
    
    * Add newline and fix packages import order
    
    * Change argument name in transformations.py
    
    * Remove print statement and unset PROJECT_ID
    
    * Add image to repo
    
    * Update image
    
    * Fix merging typo
    
    * Fix Image rendering in documentation
    
    Co-authored-by: Shubham Krishna <“shubham.krishna@ml6.eu”>
    ---
     .../runinference_metrics/__init__.py          |   16 +
     .../inference/runinference_metrics/config.py  |   30 +
     .../inference/runinference_metrics/main.py    |  127 +
     .../runinference_metrics/pipeline/__init__.py |   16 +
     .../runinference_metrics/pipeline/options.py  |   74 +
     .../pipeline/transformations.py               |   94 +
     .../inference/runinference_metrics/setup.py   |   43 +
     .../documentation/ml/runinference-metrics.md  |  102 +
     .../section-menu/en/documentation.html        |    1 +
     .../images/runinference_metrics_snapshot.svg  | 4751 +++++++++++++++++
     10 files changed, 5254 insertions(+)
     create mode 100644 sdks/python/apache_beam/examples/inference/runinference_metrics/__init__.py
     create mode 100644 sdks/python/apache_beam/examples/inference/runinference_metrics/config.py
     create mode 100644 sdks/python/apache_beam/examples/inference/runinference_metrics/main.py
     create mode 100644 sdks/python/apache_beam/examples/inference/runinference_metrics/pipeline/__init__.py
     create mode 100644 sdks/python/apache_beam/examples/inference/runinference_metrics/pipeline/options.py
     create mode 100644 sdks/python/apache_beam/examples/inference/runinference_metrics/pipeline/transformations.py
     create mode 100644 sdks/python/apache_beam/examples/inference/runinference_metrics/setup.py
     create mode 100644 website/www/site/content/en/documentation/ml/runinference-metrics.md
     create mode 100644 website/www/site/static/images/runinference_metrics_snapshot.svg
    
    diff --git a/sdks/python/apache_beam/examples/inference/runinference_metrics/__init__.py b/sdks/python/apache_beam/examples/inference/runinference_metrics/__init__.py
    new file mode 100644
    index 000000000000..cce3acad34a4
    --- /dev/null
    +++ b/sdks/python/apache_beam/examples/inference/runinference_metrics/__init__.py
    @@ -0,0 +1,16 @@
    +#
    +# Licensed to the Apache Software Foundation (ASF) under one or more
    +# contributor license agreements.  See the NOTICE file distributed with
    +# this work for additional information regarding copyright ownership.
    +# The ASF licenses this file to You under the Apache License, Version 2.0
    +# (the "License"); you may not use this file except in compliance with
    +# the License.  You may obtain a copy of the License at
    +#
    +#    http://www.apache.org/licenses/LICENSE-2.0
    +#
    +# Unless required by applicable law or agreed to in writing, software
    +# distributed under the License is distributed on an "AS IS" BASIS,
    +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +# See the License for the specific language governing permissions and
    +# limitations under the License.
    +#
    diff --git a/sdks/python/apache_beam/examples/inference/runinference_metrics/config.py b/sdks/python/apache_beam/examples/inference/runinference_metrics/config.py
    new file mode 100644
    index 000000000000..61b9a21bea4a
    --- /dev/null
    +++ b/sdks/python/apache_beam/examples/inference/runinference_metrics/config.py
    @@ -0,0 +1,30 @@
    +#
    +# Licensed to the Apache Software Foundation (ASF) under one or more
    +# contributor license agreements.  See the NOTICE file distributed with
    +# this work for additional information regarding copyright ownership.
    +# The ASF licenses this file to You under the Apache License, Version 2.0
    +# (the "License"); you may not use this file except in compliance with
    +# the License.  You may obtain a copy of the License at
    +#
    +#    http://www.apache.org/licenses/LICENSE-2.0
    +#
    +# Unless required by applicable law or agreed to in writing, software
    +# distributed under the License is distributed on an "AS IS" BASIS,
    +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +# See the License for the specific language governing permissions and
    +# limitations under the License.
    +#
    +
    +"""The file defines global variables."""
    +
    +PROJECT_ID = ""
    +REGION = "us-central1"
    +JOB_NAME = "benchmarking-runinference"
    +NUM_WORKERS = 1
    +TOKENIZER_NAME = "distilbert-base-uncased-finetuned-sst-2-english"
    +MODEL_STATE_DICT_PATH = (
    +    f"gs://{PROJECT_ID}-ml-examples/{TOKENIZER_NAME}/pytorch_model.bin")
    +MODEL_CONFIG_PATH = TOKENIZER_NAME
    +IMG_NAME = "kfp-components-preprocessing/pytorch-gpu"
    +TAG = "latest"
    +DOCKER_IMG = f"{REGION}-docker.pkg.dev/{PROJECT_ID}/{IMG_NAME}:{TAG}"
    diff --git a/sdks/python/apache_beam/examples/inference/runinference_metrics/main.py b/sdks/python/apache_beam/examples/inference/runinference_metrics/main.py
    new file mode 100644
    index 000000000000..7feeda4ea8e0
    --- /dev/null
    +++ b/sdks/python/apache_beam/examples/inference/runinference_metrics/main.py
    @@ -0,0 +1,127 @@
    +#
    +# Licensed to the Apache Software Foundation (ASF) under one or more
    +# contributor license agreements.  See the NOTICE file distributed with
    +# this work for additional information regarding copyright ownership.
    +# The ASF licenses this file to You under the Apache License, Version 2.0
    +# (the "License"); you may not use this file except in compliance with
    +# the License.  You may obtain a copy of the License at
    +#
    +#    http://www.apache.org/licenses/LICENSE-2.0
    +#
    +# Unless required by applicable law or agreed to in writing, software
    +# distributed under the License is distributed on an "AS IS" BASIS,
    +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +# See the License for the specific language governing permissions and
    +# limitations under the License.
    +#
    +
    +"""This file contains the pipeline for loading a ML model, and exploring
    +the different RunInference metrics."""
    +import argparse
    +import logging
    +import sys
    +
    +import apache_beam as beam
    +import config as cfg
    +from apache_beam.ml.inference import RunInference
    +from apache_beam.ml.inference.base import KeyedModelHandler
    +from apache_beam.ml.inference.pytorch_inference import PytorchModelHandlerKeyedTensor
    +from pipeline.options import get_pipeline_options
    +from pipeline.transformations import CustomPytorchModelHandlerKeyedTensor
    +from pipeline.transformations import HuggingFaceStripBatchingWrapper
    +from pipeline.transformations import PostProcessor
    +from pipeline.transformations import Tokenize
    +from transformers import DistilBertConfig
    +
    +
    +def parse_arguments(argv):
    +  """
    +    Parses the arguments passed to the command line and
    +    returns them as an object
    +    Args:
    +      argv: The arguments passed to the command line.
    +    Returns:
    +      The arguments that are being passed in.
    +    """
    +  parser = argparse.ArgumentParser(description="benchmark-runinference")
    +
    +  parser.add_argument(
    +      "-m",
    +      "--mode",
    +      help="Mode to run pipeline in.",
    +      choices=["local", "cloud"],
    +      default="local",
    +  )
    +  parser.add_argument(
    +      "-p",
    +      "--project",
    +      help="GCP project to run pipeline on.",
    +      default=cfg.PROJECT_ID,
    +  )
    +  parser.add_argument(
    +      "-d",
    +      "--device",
    +      help="Device to run the dataflow job on",
    +      choices=["CPU", "GPU"],
    +      default="CPU",
    +  )
    +
    +  args, _ = parser.parse_known_args(args=argv)
    +  return args
    +
    +
    +def run():
    +  """
    +    Runs the pipeline that loads a transformer based text classification model
    +    and does inference on a list of sentences.
    +    At the end of pipeline, different metrics like latency,
    +    throughput and others are printed.
    +    """
    +  args = parse_arguments(sys.argv)
    +
    +  inputs = [
    +      "This is the worst food I have ever eaten",
    +      "In my soul and in my heart, I’m convinced I’m wrong!",
    +      "Be with me always—take any form—drive me mad!"\
    +      "only do not leave me in this abyss, where I cannot find you!",
    +      "Do I want to live? Would you like to live with your soul in the grave?",
    +      "Honest people don’t hide their deeds.",
    +      "Nelly, I am Heathcliff!  He’s always,"\
    +      "always in my mind: not as a pleasure,"\
    +      "any more than I am always a pleasure to myself, but as my own being.",
    +  ] * 1000
    +
    +  pipeline_options = get_pipeline_options(
    +      job_name=cfg.JOB_NAME,
    +      num_workers=cfg.NUM_WORKERS,
    +      project=args.project,
    +      mode=args.mode,
    +      device=args.device,
    +  )
    +  model_handler_class = (
    +      PytorchModelHandlerKeyedTensor
    +      if args.device == "GPU" else CustomPytorchModelHandlerKeyedTensor)
    +  device = "cuda:0" if args.device == "GPU" else args.device
    +  model_handler = model_handler_class(
    +      state_dict_path=cfg.MODEL_STATE_DICT_PATH,
    +      model_class=HuggingFaceStripBatchingWrapper,
    +      model_params={
    +          "config": DistilBertConfig.from_pretrained(cfg.MODEL_CONFIG_PATH)
    +      },
    +      device=device,
    +  )
    +
    +  with beam.Pipeline(options=pipeline_options) as pipeline:
    +    _ = (
    +        pipeline
    +        | "Create inputs" >> beam.Create(inputs)
    +        | "Tokenize" >> beam.ParDo(Tokenize(cfg.TOKENIZER_NAME))
    +        | "Inference" >>
    +        RunInference(model_handler=KeyedModelHandler(model_handler))
    +        | "Decode Predictions" >> beam.ParDo(PostProcessor()))
    +  metrics = pipeline.result.metrics().query(beam.metrics.MetricsFilter())
    +  logging.info(metrics)
    +
    +
    +if __name__ == "__main__":
    +  run()
    diff --git a/sdks/python/apache_beam/examples/inference/runinference_metrics/pipeline/__init__.py b/sdks/python/apache_beam/examples/inference/runinference_metrics/pipeline/__init__.py
    new file mode 100644
    index 000000000000..cce3acad34a4
    --- /dev/null
    +++ b/sdks/python/apache_beam/examples/inference/runinference_metrics/pipeline/__init__.py
    @@ -0,0 +1,16 @@
    +#
    +# Licensed to the Apache Software Foundation (ASF) under one or more
    +# contributor license agreements.  See the NOTICE file distributed with
    +# this work for additional information regarding copyright ownership.
    +# The ASF licenses this file to You under the Apache License, Version 2.0
    +# (the "License"); you may not use this file except in compliance with
    +# the License.  You may obtain a copy of the License at
    +#
    +#    http://www.apache.org/licenses/LICENSE-2.0
    +#
    +# Unless required by applicable law or agreed to in writing, software
    +# distributed under the License is distributed on an "AS IS" BASIS,
    +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +# See the License for the specific language governing permissions and
    +# limitations under the License.
    +#
    diff --git a/sdks/python/apache_beam/examples/inference/runinference_metrics/pipeline/options.py b/sdks/python/apache_beam/examples/inference/runinference_metrics/pipeline/options.py
    new file mode 100644
    index 000000000000..b32200ed7331
    --- /dev/null
    +++ b/sdks/python/apache_beam/examples/inference/runinference_metrics/pipeline/options.py
    @@ -0,0 +1,74 @@
    +#
    +# Licensed to the Apache Software Foundation (ASF) under one or more
    +# contributor license agreements.  See the NOTICE file distributed with
    +# this work for additional information regarding copyright ownership.
    +# The ASF licenses this file to You under the Apache License, Version 2.0
    +# (the "License"); you may not use this file except in compliance with
    +# the License.  You may obtain a copy of the License at
    +#
    +#    http://www.apache.org/licenses/LICENSE-2.0
    +#
    +# Unless required by applicable law or agreed to in writing, software
    +# distributed under the License is distributed on an "AS IS" BASIS,
    +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +# See the License for the specific language governing permissions and
    +# limitations under the License.
    +#
    +
    +"""This file contains the pipeline options to configure
    +the Dataflow pipeline."""
    +
    +from datetime import datetime
    +from typing import Any
    +
    +import config as cfg
    +from apache_beam.options.pipeline_options import PipelineOptions
    +
    +
    +def get_pipeline_options(
    +    project: str,
    +    job_name: str,
    +    mode: str,
    +    device: str,
    +    num_workers: int = cfg.NUM_WORKERS,
    +    **kwargs: Any,
    +) -> PipelineOptions:
    +  """Function to retrieve the pipeline options.
    +    Args:
    +        project: GCP project to run on
    +        mode: Indicator to run local, cloud or template
    +        num_workers: Number of Workers for running the job parallely
    +    Returns:
    +        Dataflow pipeline options
    +    """
    +  job_name = f'{job_name}-{datetime.now().strftime("%Y%m%d%H%M%S")}'
    +
    +  staging_bucket = f"gs://{cfg.PROJECT_ID}-ml-examples"
    +
    +  # For a list of available options, check:
    +  # https://cloud.google.com/dataflow/docs/guides/specifying-exec-params#setting-other-cloud-dataflow-pipeline-options
    +  dataflow_options = {
    +      "runner": "DirectRunner" if mode == "local" else "DataflowRunner",
    +      "job_name": job_name,
    +      "project": project,
    +      "region": cfg.REGION,
    +      "staging_location": f"{staging_bucket}/dflow-staging",
    +      "temp_location": f"{staging_bucket}/dflow-temp",
    +      "setup_file": "./setup.py",
    +  }
    +  flags = []
    +  if device == "GPU":
    +    flags = [
    +        "--experiment=worker_accelerator=type:nvidia-tesla-p4;count:1;"\
    +          "install-nvidia-driver",
    +        "--experiment=use_runner_v2",
    +    ]
    +    dataflow_options.update({
    +        "sdk_container_image": cfg.DOCKER_IMG,
    +        "machine_type": "n1-standard-4",
    +    })
    +
    +  # Optional parameters
    +  if num_workers:
    +    dataflow_options.update({"num_workers": num_workers})
    +  return PipelineOptions(flags=flags, **dataflow_options)
    diff --git a/sdks/python/apache_beam/examples/inference/runinference_metrics/pipeline/transformations.py b/sdks/python/apache_beam/examples/inference/runinference_metrics/pipeline/transformations.py
    new file mode 100644
    index 000000000000..e7f6f9d44689
    --- /dev/null
    +++ b/sdks/python/apache_beam/examples/inference/runinference_metrics/pipeline/transformations.py
    @@ -0,0 +1,94 @@
    +#
    +# Licensed to the Apache Software Foundation (ASF) under one or more
    +# contributor license agreements.  See the NOTICE file distributed with
    +# this work for additional information regarding copyright ownership.
    +# The ASF licenses this file to You under the Apache License, Version 2.0
    +# (the "License"); you may not use this file except in compliance with
    +# the License.  You may obtain a copy of the License at
    +#
    +#    http://www.apache.org/licenses/LICENSE-2.0
    +#
    +# Unless required by applicable law or agreed to in writing, software
    +# distributed under the License is distributed on an "AS IS" BASIS,
    +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +# See the License for the specific language governing permissions and
    +# limitations under the License.
    +#
    +
    +"""This file contains the transformations and utility functions for
    +the pipeline."""
    +import apache_beam as beam
    +import torch
    +from apache_beam.io.filesystems import FileSystems
    +from apache_beam.ml.inference.pytorch_inference import PytorchModelHandlerKeyedTensor
    +from transformers import DistilBertForSequenceClassification
    +from transformers import DistilBertTokenizer
    +
    +
    +class CustomPytorchModelHandlerKeyedTensor(PytorchModelHandlerKeyedTensor):
    +  """Wrapper around PytorchModelHandlerKeyedTensor to load a model on CPU."""
    +  def load_model(self) -> torch.nn.Module:
    +    """Loads and initializes a Pytorch model for processing."""
    +    model = self._model_class(**self._model_params)
    +    model.to(self._device)
    +    file = FileSystems.open(self._state_dict_path, "rb")
    +    model.load_state_dict(torch.load(file, map_location=self._device))
    +    model.eval()
    +    return model
    +
    +
    +# Can be removed once https://github.com/apache/beam/issues/21863 is fixed
    +class HuggingFaceStripBatchingWrapper(DistilBertForSequenceClassification):
    +  """Wrapper around HuggingFace model because RunInference requires a batch
    +    as a list of dicts instead of a dict of lists. Another workaround
    +    can be found here where they disable batching instead.
    +    https://github.com/apache/beam/blob/master/sdks/python/apache_beam/examples/inference/pytorch_language_modeling.py"""
    +  def forward(self, **kwargs):
    +    output = super().forward(**kwargs)
    +    return [dict(zip(output, v)) for v in zip(*output.values())]
    +
    +
    +class Tokenize(beam.DoFn):
    +  """A DoFn for tokenizing texts"""
    +  def __init__(self, model_name: str):
    +    """Initialises a tokenizer based on the model_name"""
    +    self._model_name = model_name
    +
    +  def setup(self):
    +    """Loads the tokenizer"""
    +    self._tokenizer = DistilBertTokenizer.from_pretrained(self._model_name)
    +
    +  def process(self, text_input: str):
    +    """Prepocesses the text using the tokenizer"""
    +    # We need to pad the tokens tensors to max length to make sure
    +    # that all the tensors are of the same length and hence
    +    # stack-able by the RunInference API, normally you would batch first
    +    # and tokenize the batch after and pad each tensor
    +    # the the max length in the batch.
    +    tokens = self._tokenizer(
    +        text_input, return_tensors="pt", padding="max_length", max_length=512)
    +    # squeeze because tokenization add an extra dimension, which is empty
    +    # in this case because we're tokenizing one element at a time.
    +    tokens = {key: torch.squeeze(val) for key, val in tokens.items()}
    +    return [(text_input, tokens)]
    +
    +
    +class PostProcessor(beam.DoFn):
    +  """Postprocess the RunInference output"""
    +  def process(self, element):
    +    """
    +        Takes the input text and the prediction result, and returns a dictionary
    +        with the input text and the softmax probabilities
    +
    +        Args:
    +          element: The tuple of input text and the prediction result
    +
    +        Returns:
    +          A list of dictionaries, each containing the input text
    +          and the softmax output.
    +        """
    +    text_input, prediction_result = element
    +    softmax = (
    +        torch.nn.Softmax(dim=-1)(
    +            prediction_result.inference["logits"]).detach().numpy())
    +    return [{"input": text_input, "softmax": softmax}]
    diff --git a/sdks/python/apache_beam/examples/inference/runinference_metrics/setup.py b/sdks/python/apache_beam/examples/inference/runinference_metrics/setup.py
    new file mode 100644
    index 000000000000..d6fb9742ac4c
    --- /dev/null
    +++ b/sdks/python/apache_beam/examples/inference/runinference_metrics/setup.py
    @@ -0,0 +1,43 @@
    +#
    +# Licensed to the Apache Software Foundation (ASF) under one or more
    +# contributor license agreements.  See the NOTICE file distributed with
    +# this work for additional information regarding copyright ownership.
    +# The ASF licenses this file to You under the Apache License, Version 2.0
    +# (the "License"); you may not use this file except in compliance with
    +# the License.  You may obtain a copy of the License at
    +#
    +#    http://www.apache.org/licenses/LICENSE-2.0
    +#
    +# Unless required by applicable law or agreed to in writing, software
    +# distributed under the License is distributed on an "AS IS" BASIS,
    +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +# See the License for the specific language governing permissions and
    +# limitations under the License.
    +#
    +
    +"""Setup.py module for the workflow's worker utilities.
    +
    +All the workflow related code is gathered in a package that will be built as a
    +source distribution, staged in the staging area for the workflow being run and
    +then installed in the workers when they start running.
    +
    +This behavior is triggered by specifying the --setup_file command line option
    +when running the workflow for remote execution.
    +"""
    +
    +import setuptools
    +from setuptools import find_packages
    +
    +REQUIREMENTS = [
    +    "apache-beam[gcp]==2.41.0", "transformers==4.21.0", "torch==1.12.0"
    +]
    +
    +setuptools.setup(
    +    name="write-to-pubsub-pipeline",
    +    version="1.1.1",
    +    install_requires=REQUIREMENTS,
    +    packages=find_packages(),
    +    author="Apache Software Foundation",
    +    author_email="dev@beam.apache.org",
    +    py_modules=["config"],
    +)
    diff --git a/website/www/site/content/en/documentation/ml/runinference-metrics.md b/website/www/site/content/en/documentation/ml/runinference-metrics.md
    new file mode 100644
    index 000000000000..e58cf20c0d42
    --- /dev/null
    +++ b/website/www/site/content/en/documentation/ml/runinference-metrics.md
    @@ -0,0 +1,102 @@
    +---
    +title: "RunInference Metrics"
    +---
    +
    +
    +# RunInference Metrics Example
    +
    +The main purpose of the example is to demonstrate and explain different metrics that are available when using the [RunInference](https://beam.apache.org/documentation/transforms/python/elementwise/runinference/) transform to perform inference using a machine learning model. We use a pipeline that reads a list of sentences, tokeinzes the text, and uses a Transformer based model `distilbert-base-uncased-finetuned-sst-2-english` for classifying the texts into two different classes using `RunInference`.
    +
    +We showcase different RunInference metrics when the pipeline is executed using the Dataflow Runner on CPU and GPU. The full example code can be found [here](https://github.com/apache/beam/tree/master/sdks/python/apache_beam/examples/inference/runinference_metrics/).
    +
    +
    +The file structure for entire pipeline is:
    +
    +    runinference_metrics/
    +    ├── pipeline/
    +    │   ├── __init__.py
    +    │   ├── options.py
    +    │   └── transformations.py
    +    ├── __init__.py
    +    ├── config.py
    +    ├── main.py
    +    └── setup.py
    +
    +`pipeline/transformations.py` contains the code for `beam.DoFn` and additional functions that are used for pipeline
    +
    +`pipeline/options.py` contains the pipeline options to configure the Dataflow pipeline
    +
    +`config.py` defines some variables like GCP `PROJECT_ID`, `NUM_WORKERS` that are used multiple times
    +
    +`setup.py` defines the packages/requirements for the pipeline to run
    +
    +`main.py` contains the pipeline code and some additional functions used for running the pipeline
    +
    +
    +### How to Run the Pipeline
    +First, make sure you have installed the required packages. One should have access to a Google Cloud Project and then correctly configure the GCP variables like `PROJECT_ID`, `REGION`, and others in `config.py`. To use GPUs, follow the setup instructions [here](https://github.com/GoogleCloudPlatform/python-docs-samples/tree/main/dataflow/gpu-examples/pytorch-minimal).
    +
    +
    +1. Dataflow with CPU: `python main.py --mode cloud --device CPU`
    +2. Dataflow with GPU: `python main.py --mode cloud --device GPU`
    +
    +The pipeline can be broken down into few simple steps:
    +1. Create a list of texts to use as an input using `beam.Create`
    +2. Tokenize the text
    +3. Use RunInference to do inference
    +4. Postprocess the output of RunInference
    +
    +{{< highlight >}}
    +  with beam.Pipeline(options=pipeline_options) as pipeline:
    +    _ = (
    +        pipeline
    +        | "Create inputs" >> beam.Create(inputs)
    +        | "Tokenize" >> beam.ParDo(Tokenize(cfg.TOKENIZER_NAME))
    +        | "Inference" >>
    +        RunInference(model_handler=KeyedModelHandler(model_handler))
    +        | "Decode Predictions" >> beam.ParDo(PostProcessor()))
    +{{< /highlight >}}
    +
    +
    +## RunInference Metrics
    +
    +As mentioned above, we benchmarked the performance of RunInference using Dataflow on both CPU and GPU. These metrics can be seen in the GCP UI and can also be printed using
    +
    +{{< highlight >}}
    +metrics = pipeline.result.metrics().query(beam.metrics.MetricsFilter())
    +{{< /highlight >}}
    +
    +
    +A snapshot of different metrics from GCP UI when using Dataflow on GPU:
    +
    +  ![RunInference GPU metrics rendered on Dataflow](/images/runinference_metrics_snapshot.svg)
    +
    +Some metrics commonly used for benchmarking are:
    +
    +* `num_inferences`: represents the total number of elements passed to `run_inference()`.
    +
    +* `inference_batch_latency_micro_secs_MEAN`: represents the average time taken to perform the inference across all batches of examples, measured in microseconds.
    +
    +* `inference_request_batch_size_COUNT`: represents the total number of samples across all batches of examples (created from `beam.BatchElements`) to be passed to run_inference()
    +
    +* `inference_request_batch_byte_size_MEAN`: represents the average size of all elements for all samples in all batches of examples (created from `beam.BatchElements`) to be passed to run_inference(). This is measured in bytes.
    +
    +* `model_byte_size_MEAN`: represents the average memory consumed to load and initialize the model. This is measured in bytes.
    +
    +* `load_model_latency_milli_secs_MEAN`: represents the average time taken to load and initialize the model. This is measured in milliseconds.
    +
    +One can derive other relevant metrics like
    +* `Total time taken for inference` = `num_inferences x inference_batch_latency_micro_secs_MEAN`
    +
    diff --git a/website/www/site/layouts/partials/section-menu/en/documentation.html b/website/www/site/layouts/partials/section-menu/en/documentation.html
    index 576b21e38cbc..345ab8bc8195 100644
    --- a/website/www/site/layouts/partials/section-menu/en/documentation.html
    +++ b/website/www/site/layouts/partials/section-menu/en/documentation.html
    @@ -217,6 +217,7 @@
         
  • Data processing
  • Multi-model pipelines
  • Online Clustering
  • +
  • RunInference Metrics
  • Anomaly Detection
  • diff --git a/website/www/site/static/images/runinference_metrics_snapshot.svg b/website/www/site/static/images/runinference_metrics_snapshot.svg new file mode 100644 index 000000000000..a1b41b2c2084 --- /dev/null +++ b/website/www/site/static/images/runinference_metrics_snapshot.svg @@ -0,0 +1,4751 @@ + + + + + From 40b5e54e08e1a5cbaf38e3e2a51278a1b976c2df Mon Sep 17 00:00:00 2001 From: kileys Date: Mon, 24 Oct 2022 18:12:38 +0000 Subject: [PATCH 038/115] Increate timeout for test pipelines --- .../java/org/apache/beam/sdk/testing/TestPipelineOptions.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/testing/TestPipelineOptions.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/testing/TestPipelineOptions.java index 3327ae8fc747..6ff5ded5318d 100644 --- a/sdks/java/core/src/main/java/org/apache/beam/sdk/testing/TestPipelineOptions.java +++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/testing/TestPipelineOptions.java @@ -48,7 +48,7 @@ public interface TestPipelineOptions extends PipelineOptions { void setOnSuccessMatcher(SerializableMatcher value); - @Default.Long(10 * 60) + @Default.Long(15 * 60) @Nullable Long getTestTimeoutSeconds(); From 226bc97d298b1922323c7af3836c4ccc40ede22e Mon Sep 17 00:00:00 2001 From: Brian Hulette Date: Mon, 24 Oct 2022 14:45:55 -0700 Subject: [PATCH 039/115] Bump Dataflow python containers to 20221021 (#23807) --- sdks/python/apache_beam/runners/dataflow/internal/names.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sdks/python/apache_beam/runners/dataflow/internal/names.py b/sdks/python/apache_beam/runners/dataflow/internal/names.py index 4ec94616d6ab..aa6de4f7085e 100644 --- a/sdks/python/apache_beam/runners/dataflow/internal/names.py +++ b/sdks/python/apache_beam/runners/dataflow/internal/names.py @@ -36,10 +36,10 @@ # Update this version to the next version whenever there is a change that will # require changes to legacy Dataflow worker execution environment. -BEAM_CONTAINER_VERSION = 'beam-master-20221018' +BEAM_CONTAINER_VERSION = 'beam-master-20221021' # Update this version to the next version whenever there is a change that # requires changes to SDK harness container or SDK harness launcher. -BEAM_FNAPI_CONTAINER_VERSION = 'beam-master-20221018' +BEAM_FNAPI_CONTAINER_VERSION = 'beam-master-20221021' DATAFLOW_CONTAINER_IMAGE_REPOSITORY = 'gcr.io/cloud-dataflow/v1beta3' From 75fa82cc2e68047246e3b9f9843d4f2a6547455b Mon Sep 17 00:00:00 2001 From: Lukasz Cwik Date: Mon, 24 Oct 2022 16:58:08 -0700 Subject: [PATCH 040/115] Allow MoreFutures.allAsList/allAsListWithExceptions to have the passed in list to be mutated (#23811) This resolves ConcurrentModificationExceptions seen within WriteFiles and other places that use MoreFutures.allAsList* methods. Fixes #23809 --- .../org/apache/beam/sdk/util/MoreFutures.java | 25 +++---- .../apache/beam/sdk/util/MoreFuturesTest.java | 74 +++++++++++++++++++ 2 files changed, 85 insertions(+), 14 deletions(-) diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/util/MoreFutures.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/util/MoreFutures.java index 6f053752d3f6..57074a7d6db4 100644 --- a/sdks/java/core/src/main/java/org/apache/beam/sdk/util/MoreFutures.java +++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/util/MoreFutures.java @@ -19,6 +19,7 @@ import com.google.auto.value.AutoValue; import edu.umd.cs.findbugs.annotations.SuppressWarnings; +import java.util.Arrays; import java.util.Collection; import java.util.List; import java.util.concurrent.CompletableFuture; @@ -161,17 +162,13 @@ public static CompletionStage runAsync(ThrowingRunnable runnable) { /** Like {@link CompletableFuture#allOf} but returning the result of constituent futures. */ public static CompletionStage> allAsList( Collection> futures) { - // CompletableFuture.allOf completes exceptionally if any of the futures do. // We have to gather the results separately. - CompletionStage blockAndDiscard = - CompletableFuture.allOf(futuresToCompletableFutures(futures)); + CompletableFuture[] f = futuresToCompletableFutures(futures); + CompletionStage blockAndDiscard = CompletableFuture.allOf(f); return blockAndDiscard.thenApply( - nothing -> - futures.stream() - .map(future -> future.toCompletableFuture().join()) - .collect(Collectors.toList())); + nothing -> Arrays.stream(f).map(CompletableFuture::join).collect(Collectors.toList())); } /** @@ -207,25 +204,25 @@ public static ExceptionOrResult result(T result) { } } - /** Like {@link #allAsList} but return a list . */ + /** + * Like {@link #allAsList} but return a list of {@link ExceptionOrResult} of constituent futures. + */ public static CompletionStage>> allAsListWithExceptions( Collection> futures) { - // CompletableFuture.allOf completes exceptionally if any of the futures do. // We have to gather the results separately. - CompletionStage blockAndDiscard = - CompletableFuture.allOf(futuresToCompletableFutures(futures)) - .whenComplete((ignoredValues, arbitraryException) -> {}); + CompletableFuture[] f = futuresToCompletableFutures(futures); + CompletionStage blockAndDiscard = CompletableFuture.allOf(f); return blockAndDiscard.thenApply( nothing -> - futures.stream() + Arrays.stream(f) .map( future -> { // The limited scope of the exceptions wrapped allows CancellationException // to still be thrown. try { - return ExceptionOrResult.result(future.toCompletableFuture().join()); + return ExceptionOrResult.result(future.join()); } catch (CompletionException exc) { return ExceptionOrResult.exception(exc); } diff --git a/sdks/java/core/src/test/java/org/apache/beam/sdk/util/MoreFuturesTest.java b/sdks/java/core/src/test/java/org/apache/beam/sdk/util/MoreFuturesTest.java index 4b6790d22c30..b8a107935016 100644 --- a/sdks/java/core/src/test/java/org/apache/beam/sdk/util/MoreFuturesTest.java +++ b/sdks/java/core/src/test/java/org/apache/beam/sdk/util/MoreFuturesTest.java @@ -20,10 +20,16 @@ import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.isA; +import static org.junit.Assert.assertEquals; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; import java.util.concurrent.CompletionStage; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicInteger; +import org.apache.beam.sdk.util.MoreFutures.ExceptionOrResult; import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; @@ -84,4 +90,72 @@ public void runAsyncFailure() throws Exception { thrown.expectMessage(testMessage); MoreFutures.get(sideEffectFuture); } + + @Test + public void testAllAsListRespectsOriginalList() throws Exception { + CountDownLatch waitTillThreadRunning = new CountDownLatch(1); + CountDownLatch waitTillClearHasHappened = new CountDownLatch(1); + List> stages = new ArrayList<>(); + stages.add(MoreFutures.runAsync(waitTillThreadRunning::countDown)); + stages.add(MoreFutures.runAsync(waitTillClearHasHappened::await)); + + CompletionStage> results = MoreFutures.allAsList(stages); + waitTillThreadRunning.await(); + stages.clear(); + waitTillClearHasHappened.countDown(); + assertEquals(MoreFutures.get(results), Arrays.asList(null, null)); + } + + @Test + public void testAllAsListNoExceptionDueToMutation() throws Exception { + // This loop runs many times trying to exercise a race condition that existed where mutation + // of the passed in completion stages lead to various exceptions (such as a + // ConcurrentModificationException). See https://github.com/apache/beam/issues/23809 + for (int i = 0; i < 10000; ++i) { + CountDownLatch waitTillThreadRunning = new CountDownLatch(1); + List> stages = new ArrayList<>(); + stages.add(MoreFutures.runAsync(waitTillThreadRunning::countDown)); + + CompletionStage> results = MoreFutures.allAsList(stages); + waitTillThreadRunning.await(); + stages.clear(); + MoreFutures.get(results); + } + } + + @Test + public void testAllAsListWithExceptionsRespectsOriginalList() throws Exception { + CountDownLatch waitTillThreadRunning = new CountDownLatch(1); + CountDownLatch waitTillClearHasHappened = new CountDownLatch(1); + List> stages = new ArrayList<>(); + stages.add(MoreFutures.runAsync(waitTillThreadRunning::countDown)); + stages.add(MoreFutures.runAsync(waitTillClearHasHappened::await)); + + CompletionStage>> results = + MoreFutures.allAsListWithExceptions(stages); + waitTillThreadRunning.await(); + stages.clear(); + waitTillClearHasHappened.countDown(); + assertEquals( + MoreFutures.get(results), + Arrays.asList(ExceptionOrResult.result(null), ExceptionOrResult.result(null))); + } + + @Test + public void testAllAsListWithExceptionsNoExceptionDueToMutation() throws Exception { + // This loop runs many times trying to exercise a race condition that existed where mutation + // of the passed in completion stages lead to various exceptions (such as a + // ConcurrentModificationException). See https://github.com/apache/beam/issues/23809 + for (int i = 0; i < 10000; ++i) { + CountDownLatch waitTillThreadRunning = new CountDownLatch(1); + List> stages = new ArrayList<>(); + stages.add(MoreFutures.runAsync(waitTillThreadRunning::countDown)); + + CompletionStage>> results = + MoreFutures.allAsListWithExceptions(stages); + waitTillThreadRunning.await(); + stages.clear(); + MoreFutures.get(results); + } + } } From 206df4d0c004a3fc2f89290b711760d8f8dac0b0 Mon Sep 17 00:00:00 2001 From: Oleh Borysevych Date: Tue, 25 Oct 2022 19:29:34 +0300 Subject: [PATCH 041/115] granting ruslan shamunov triage rights (#23806) --- .asf.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.asf.yaml b/.asf.yaml index 721b9f2d3dd7..8c067e7e4ee9 100644 --- a/.asf.yaml +++ b/.asf.yaml @@ -38,6 +38,7 @@ github: collaborators: - pcoet - olehborysevych + - rshamunov enabled_merge_buttons: squash: true From 220902c6ffd4f2e59d61b0b1cb6b25ce4191383a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 25 Oct 2022 12:50:03 -0400 Subject: [PATCH 042/115] Bump google.golang.org/api from 0.99.0 to 0.100.0 in /sdks (#23718) Bumps [google.golang.org/api](https://github.com/googleapis/google-api-go-client) from 0.99.0 to 0.100.0. - [Release notes](https://github.com/googleapis/google-api-go-client/releases) - [Changelog](https://github.com/googleapis/google-api-go-client/blob/main/CHANGES.md) - [Commits](https://github.com/googleapis/google-api-go-client/compare/v0.99.0...v0.100.0) --- updated-dependencies: - dependency-name: google.golang.org/api dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- sdks/go.mod | 10 +++++----- sdks/go.sum | 20 ++++++++++---------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/sdks/go.mod b/sdks/go.mod index 438ffe1b5b6f..8542b91c413b 100644 --- a/sdks/go.mod +++ b/sdks/go.mod @@ -41,13 +41,13 @@ require ( github.com/testcontainers/testcontainers-go v0.14.0 github.com/xitongsys/parquet-go v1.6.2 github.com/xitongsys/parquet-go-source v0.0.0-20220315005136-aec0fe3e777c - golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458 - golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1 + golang.org/x/net v0.0.0-20221014081412-f15817d10f9b + golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0 golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 golang.org/x/text v0.4.0 - google.golang.org/api v0.99.0 - google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e + google.golang.org/api v0.100.0 + google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a google.golang.org/grpc v1.50.1 google.golang.org/protobuf v1.28.1 gopkg.in/retry.v1 v1.0.3 @@ -74,7 +74,7 @@ require ( github.com/golang/snappy v0.0.4 // indirect github.com/google/pprof v0.0.0-20220412212628-83db2b799d1f // indirect github.com/googleapis/enterprise-certificate-proxy v0.2.0 // indirect - github.com/googleapis/gax-go/v2 v2.5.1 // indirect + github.com/googleapis/gax-go/v2 v2.6.0 // indirect github.com/inconshreveable/mousetrap v1.0.1 // indirect github.com/klauspost/compress v1.13.1 // indirect github.com/magiconair/properties v1.8.6 // indirect diff --git a/sdks/go.sum b/sdks/go.sum index a129517f4dfe..9e7270b639f7 100644 --- a/sdks/go.sum +++ b/sdks/go.sum @@ -587,8 +587,8 @@ github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0 github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= -github.com/googleapis/gax-go/v2 v2.5.1 h1:kBRZU0PSuI7PspsSb/ChWoVResUcwNVIdpB049pKTiw= -github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= +github.com/googleapis/gax-go/v2 v2.6.0 h1:SXk3ABtQYDT/OH8jAyvEOQ58mgawq5C4o/4/89qN2ZU= +github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= @@ -1176,8 +1176,8 @@ golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458 h1:MgJ6t2zo8v0tbmLCueaCbF1RM+TtB0rs3Lv8DGtOIpY= -golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b h1:tvrvnPFcdzp294diPnrdZZZ8XUt2Tyj7svb7X52iDuU= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1198,8 +1198,8 @@ golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= -golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1 h1:3VPzK7eqH25j7GYw5w6g/GzNRc0/fYtrxz27z1gD4W0= -golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 h1:nt+Q6cXKz4MosCSpnbMtqiQ8Oz0pxTef2B4Vca2lvfk= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1476,8 +1476,8 @@ google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRR google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= -google.golang.org/api v0.99.0 h1:tsBtOIklCE2OFxhmcYSVqGwSAN/Y897srxmcvAQnwK8= -google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= +google.golang.org/api v0.100.0 h1:LGUYIrbW9pzYQQ8NWXlaIVkgnfubVBZbMFb9P8TK374= +google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1570,8 +1570,8 @@ google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e h1:halCgTFuLWDRD61piiNSxPsARANGD3Xl16hPrLgLiIg= -google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= +google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a h1:GH6UPn3ixhWcKDhpnEC55S75cerLPdpp3hrhfKYjZgw= +google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= From 76761db371c8e33136706872f83d89ea600022db Mon Sep 17 00:00:00 2001 From: Alex Merose Date: Tue, 25 Oct 2022 09:54:08 -0700 Subject: [PATCH 043/115] Initial DaskRunner for Beam (#22421) * WIP: Created a skeleton dask runner implementation. * WIP: Idea for a translation evaluator. * Added overrides and a visitor that translates operations. * Fixed a dataclass typo. * Expanded translations. * Core idea seems to be kinda working... * First iteration on DaskRunnerResult (keep track of pipeline state). * Added minimal set of DaskRunner options. * WIP: Alllmost got asserts to work! The current status is: - CoGroupByKey is broken due to how tags are used with GroupByKey - GroupByKey should output `[('0', None), ('1', 1)]`, however it actually outputs: [(None, ('1', 1)), (None, ('0', None))] - Once that is fixed, we may have test pipelines work on Dask. * With a great 1-liner from @pabloem, groupby is fixed! Now, all three initial tests pass. * Self-review: Cleaned up dask runner impl. * Self-review: Remove TODOs, delete commented out code, other cleanup. * First pass at linting rules. * WIP, include dask dependencies + test setup. * WIP: maybe better dask deps? * Skip dask tests depending on successful import. * Fixed setup.py (missing `,`). * Added an additional comma. * Moved skipping logic to be above dask import. * Fix lint issues with dask runner tests. * Adding destination for client address. * Changing to async produces a timeout error instead of stuck in infinite loop. * Close client during `wait_until_finish`; rm async. * Supporting side-inputs for ParDo. * Revert "Close client during `wait_until_finish`; rm async." This reverts commit 09365f628dc86832190efd03a308ba177755bf13. * Revert "Changing to async produces a timeout error instead of stuck in infinite loop." This reverts commit 676d75226c3f517083baef5dab4a52e5cde0f1a0. * Adding -dask tox targets onto the gradle build * wip - added print stmt. * wip - prove side inputs is set. * wip - prove side inputs is set in Pardo. * wip - rm asserts, add print * wip - adding named inputs... * Experiments: non-named side inputs + del `None` in named inputs. * None --> 'None' * No default side input. * Pass along args + kwargs. * Applied yapf to dask sources. * Dask sources passing pylint. * Added dask extra to docs gen tox env. * Applied yapf from tox. * Include dask in mypy checks. * Upgrading mypy support to python 3.8 since py37 support is deprecated in dask. * Manually installing an old version of dask before 3.7 support was dropped. * fix lint: line too long. * Fixed type errors with DaskRunnerResult. Disabled mypy type checking in dask. * Fix pytype errors (in transform_evaluator). * Ran isort. * Ran yapf again. * Fix imports (one per line) * isort -- alphabetical. * Added feature to CHANGES.md. * ran yapf via tox on linux machine * Change an import to pass CI. * Skip isort error; needed to get CI to pass. * Skip test logic may favor better with isort. * (Maybe) the last isort fix. * Tested pipeline options (added one fix). * Improve formatting of test. * Self-review: removing side inputs. In addition, adding a more helpful property to the base DaskBagOp (tranform). * add dask to coverage suite in tox. * Capture value error in assert. * Change timeout value to 600 seconds. * ignoring broken test * Update CHANGES.md * Using reflection to test the Dask client constructor. * Better method of inspecting the constructor parameters (thanks @TomAugspurger!). Co-authored-by: Pablo E Co-authored-by: Pablo --- CHANGES.md | 2 + .../apache_beam/runners/dask/__init__.py | 16 ++ .../apache_beam/runners/dask/dask_runner.py | 182 ++++++++++++++++++ .../runners/dask/dask_runner_test.py | 94 +++++++++ .../apache_beam/runners/dask/overrides.py | 145 ++++++++++++++ .../runners/dask/transform_evaluator.py | 103 ++++++++++ sdks/python/mypy.ini | 3 + sdks/python/setup.py | 6 +- sdks/python/test-suites/tox/common.gradle | 3 + sdks/python/tox.ini | 13 +- 10 files changed, 563 insertions(+), 4 deletions(-) create mode 100644 sdks/python/apache_beam/runners/dask/__init__.py create mode 100644 sdks/python/apache_beam/runners/dask/dask_runner.py create mode 100644 sdks/python/apache_beam/runners/dask/dask_runner_test.py create mode 100644 sdks/python/apache_beam/runners/dask/overrides.py create mode 100644 sdks/python/apache_beam/runners/dask/transform_evaluator.py diff --git a/CHANGES.md b/CHANGES.md index f1b8df5c5b25..520609504c80 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -64,6 +64,7 @@ ## Highlights * Python 3.10 support in Apache Beam ([#21458](https://github.com/apache/beam/issues/21458)). +* An initial implementation of a runner that allows us to run Beam pipelines on Dask. Try it out and give us feedback! (Python) ([#18962](https://github.com/apache/beam/issues/18962)). ## I/Os @@ -81,6 +82,7 @@ * X feature added (Java/Python) ([#X](https://github.com/apache/beam/issues/X)). * Dataframe wrapper added in Go SDK via Cross-Language (with automatic expansion service). (Go) ([#23384](https://github.com/apache/beam/issues/23384)). * Name all Java threads to aid in debugging ([#23049](https://github.com/apache/beam/issues/23049)). +* An initial implementation of a runner that allows us to run Beam pipelines on Dask. (Python) ([#18962](https://github.com/apache/beam/issues/18962)). ## Breaking Changes diff --git a/sdks/python/apache_beam/runners/dask/__init__.py b/sdks/python/apache_beam/runners/dask/__init__.py new file mode 100644 index 000000000000..cce3acad34a4 --- /dev/null +++ b/sdks/python/apache_beam/runners/dask/__init__.py @@ -0,0 +1,16 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/sdks/python/apache_beam/runners/dask/dask_runner.py b/sdks/python/apache_beam/runners/dask/dask_runner.py new file mode 100644 index 000000000000..109c4379b45d --- /dev/null +++ b/sdks/python/apache_beam/runners/dask/dask_runner.py @@ -0,0 +1,182 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""DaskRunner, executing remote jobs on Dask.distributed. + +The DaskRunner is a runner implementation that executes a graph of +transformations across processes and workers via Dask distributed's +scheduler. +""" +import argparse +import dataclasses +import typing as t + +from apache_beam import pvalue +from apache_beam.options.pipeline_options import PipelineOptions +from apache_beam.pipeline import AppliedPTransform +from apache_beam.pipeline import PipelineVisitor +from apache_beam.runners.dask.overrides import dask_overrides +from apache_beam.runners.dask.transform_evaluator import TRANSLATIONS +from apache_beam.runners.dask.transform_evaluator import NoOp +from apache_beam.runners.direct.direct_runner import BundleBasedDirectRunner +from apache_beam.runners.runner import PipelineResult +from apache_beam.runners.runner import PipelineState +from apache_beam.utils.interactive_utils import is_in_notebook + + +class DaskOptions(PipelineOptions): + @staticmethod + def _parse_timeout(candidate): + try: + return int(candidate) + except (TypeError, ValueError): + import dask + return dask.config.no_default + + @classmethod + def _add_argparse_args(cls, parser: argparse.ArgumentParser) -> None: + parser.add_argument( + '--dask_client_address', + dest='address', + type=str, + default=None, + help='Address of a dask Scheduler server. Will default to a ' + '`dask.LocalCluster()`.') + parser.add_argument( + '--dask_connection_timeout', + dest='timeout', + type=DaskOptions._parse_timeout, + help='Timeout duration for initial connection to the scheduler.') + parser.add_argument( + '--dask_scheduler_file', + dest='scheduler_file', + type=str, + default=None, + help='Path to a file with scheduler information if available.') + # TODO(alxr): Add options for security. + parser.add_argument( + '--dask_client_name', + dest='name', + type=str, + default=None, + help='Gives the client a name that will be included in logs generated ' + 'on the scheduler for matters relating to this client.') + parser.add_argument( + '--dask_connection_limit', + dest='connection_limit', + type=int, + default=512, + help='The number of open comms to maintain at once in the connection ' + 'pool.') + + +@dataclasses.dataclass +class DaskRunnerResult(PipelineResult): + from dask import distributed + + client: distributed.Client + futures: t.Sequence[distributed.Future] + + def __post_init__(self): + super().__init__(PipelineState.RUNNING) + + def wait_until_finish(self, duration=None) -> str: + try: + if duration is not None: + # Convert milliseconds to seconds + duration /= 1000 + self.client.wait_for_workers(timeout=duration) + self.client.gather(self.futures, errors='raise') + self._state = PipelineState.DONE + except: # pylint: disable=broad-except + self._state = PipelineState.FAILED + raise + return self._state + + def cancel(self) -> str: + self._state = PipelineState.CANCELLING + self.client.cancel(self.futures) + self._state = PipelineState.CANCELLED + return self._state + + def metrics(self): + # TODO(alxr): Collect and return metrics... + raise NotImplementedError('collecting metrics will come later!') + + +class DaskRunner(BundleBasedDirectRunner): + """Executes a pipeline on a Dask distributed client.""" + @staticmethod + def to_dask_bag_visitor() -> PipelineVisitor: + from dask import bag as db + + @dataclasses.dataclass + class DaskBagVisitor(PipelineVisitor): + bags: t.Dict[AppliedPTransform, + db.Bag] = dataclasses.field(default_factory=dict) + + def visit_transform(self, transform_node: AppliedPTransform) -> None: + op_class = TRANSLATIONS.get(transform_node.transform.__class__, NoOp) + op = op_class(transform_node) + + inputs = list(transform_node.inputs) + if inputs: + bag_inputs = [] + for input_value in inputs: + if isinstance(input_value, pvalue.PBegin): + bag_inputs.append(None) + + prev_op = input_value.producer + if prev_op in self.bags: + bag_inputs.append(self.bags[prev_op]) + + if len(bag_inputs) == 1: + self.bags[transform_node] = op.apply(bag_inputs[0]) + else: + self.bags[transform_node] = op.apply(bag_inputs) + + else: + self.bags[transform_node] = op.apply(None) + + return DaskBagVisitor() + + @staticmethod + def is_fnapi_compatible(): + return False + + def run_pipeline(self, pipeline, options): + # TODO(alxr): Create interactive notebook support. + if is_in_notebook(): + raise NotImplementedError('interactive support will come later!') + + try: + import dask.distributed as ddist + except ImportError: + raise ImportError( + 'DaskRunner is not available. Please install apache_beam[dask].') + + dask_options = options.view_as(DaskOptions).get_all_options( + drop_default=True) + client = ddist.Client(**dask_options) + + pipeline.replace_all(dask_overrides()) + + dask_visitor = self.to_dask_bag_visitor() + pipeline.visit(dask_visitor) + + futures = client.compute(list(dask_visitor.bags.values())) + return DaskRunnerResult(client, futures) diff --git a/sdks/python/apache_beam/runners/dask/dask_runner_test.py b/sdks/python/apache_beam/runners/dask/dask_runner_test.py new file mode 100644 index 000000000000..d8b3e17d8a56 --- /dev/null +++ b/sdks/python/apache_beam/runners/dask/dask_runner_test.py @@ -0,0 +1,94 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import inspect +import unittest + +import apache_beam as beam +from apache_beam.options.pipeline_options import PipelineOptions +from apache_beam.testing import test_pipeline +from apache_beam.testing.util import assert_that +from apache_beam.testing.util import equal_to + +try: + from apache_beam.runners.dask.dask_runner import DaskOptions + from apache_beam.runners.dask.dask_runner import DaskRunner + import dask + import dask.distributed as ddist +except (ImportError, ModuleNotFoundError): + raise unittest.SkipTest('Dask must be installed to run tests.') + + +class DaskOptionsTest(unittest.TestCase): + def test_parses_connection_timeout__defaults_to_none(self): + default_options = PipelineOptions([]) + default_dask_options = default_options.view_as(DaskOptions) + self.assertEqual(None, default_dask_options.timeout) + + def test_parses_connection_timeout__parses_int(self): + conn_options = PipelineOptions('--dask_connection_timeout 12'.split()) + dask_conn_options = conn_options.view_as(DaskOptions) + self.assertEqual(12, dask_conn_options.timeout) + + def test_parses_connection_timeout__handles_bad_input(self): + err_options = PipelineOptions('--dask_connection_timeout foo'.split()) + dask_err_options = err_options.view_as(DaskOptions) + self.assertEqual(dask.config.no_default, dask_err_options.timeout) + + def test_parser_destinations__agree_with_dask_client(self): + options = PipelineOptions( + '--dask_client_address localhost:8080 --dask_connection_timeout 600 ' + '--dask_scheduler_file foobar.cfg --dask_client_name charlie ' + '--dask_connection_limit 1024'.split()) + dask_options = options.view_as(DaskOptions) + + # Get the argument names for the constructor. + client_args = list(inspect.signature(ddist.Client).parameters) + + for opt_name in dask_options.get_all_options(drop_default=True).keys(): + with self.subTest(f'{opt_name} in dask.distributed.Client constructor'): + self.assertIn(opt_name, client_args) + + +class DaskRunnerRunPipelineTest(unittest.TestCase): + """Test class used to introspect the dask runner via a debugger.""" + def setUp(self) -> None: + self.pipeline = test_pipeline.TestPipeline(runner=DaskRunner()) + + def test_create(self): + with self.pipeline as p: + pcoll = p | beam.Create([1]) + assert_that(pcoll, equal_to([1])) + + def test_create_and_map(self): + def double(x): + return x * 2 + + with self.pipeline as p: + pcoll = p | beam.Create([1]) | beam.Map(double) + assert_that(pcoll, equal_to([2])) + + def test_create_map_and_groupby(self): + def double(x): + return x * 2, x + + with self.pipeline as p: + pcoll = p | beam.Create([1]) | beam.Map(double) | beam.GroupByKey() + assert_that(pcoll, equal_to([(2, [1])])) + + +if __name__ == '__main__': + unittest.main() diff --git a/sdks/python/apache_beam/runners/dask/overrides.py b/sdks/python/apache_beam/runners/dask/overrides.py new file mode 100644 index 000000000000..d07c7cd518af --- /dev/null +++ b/sdks/python/apache_beam/runners/dask/overrides.py @@ -0,0 +1,145 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import dataclasses +import typing as t + +import apache_beam as beam +from apache_beam import typehints +from apache_beam.io.iobase import SourceBase +from apache_beam.pipeline import AppliedPTransform +from apache_beam.pipeline import PTransformOverride +from apache_beam.runners.direct.direct_runner import _GroupAlsoByWindowDoFn +from apache_beam.transforms import ptransform +from apache_beam.transforms.window import GlobalWindows + +K = t.TypeVar("K") +V = t.TypeVar("V") + + +@dataclasses.dataclass +class _Create(beam.PTransform): + values: t.Tuple[t.Any] + + def expand(self, input_or_inputs): + return beam.pvalue.PCollection.from_(input_or_inputs) + + def get_windowing(self, inputs: t.Any) -> beam.Windowing: + return beam.Windowing(GlobalWindows()) + + +@typehints.with_input_types(K) +@typehints.with_output_types(K) +class _Reshuffle(beam.PTransform): + def expand(self, input_or_inputs): + return beam.pvalue.PCollection.from_(input_or_inputs) + + +@dataclasses.dataclass +class _Read(beam.PTransform): + source: SourceBase + + def expand(self, input_or_inputs): + return beam.pvalue.PCollection.from_(input_or_inputs) + + +@typehints.with_input_types(t.Tuple[K, V]) +@typehints.with_output_types(t.Tuple[K, t.Iterable[V]]) +class _GroupByKeyOnly(beam.PTransform): + def expand(self, input_or_inputs): + return beam.pvalue.PCollection.from_(input_or_inputs) + + def infer_output_type(self, input_type): + + key_type, value_type = typehints.trivial_inference.key_value_types( + input_type + ) + return typehints.KV[key_type, typehints.Iterable[value_type]] + + +@typehints.with_input_types(t.Tuple[K, t.Iterable[V]]) +@typehints.with_output_types(t.Tuple[K, t.Iterable[V]]) +class _GroupAlsoByWindow(beam.ParDo): + """Not used yet...""" + def __init__(self, windowing): + super().__init__(_GroupAlsoByWindowDoFn(windowing)) + self.windowing = windowing + + def expand(self, input_or_inputs): + return beam.pvalue.PCollection.from_(input_or_inputs) + + +@typehints.with_input_types(t.Tuple[K, V]) +@typehints.with_output_types(t.Tuple[K, t.Iterable[V]]) +class _GroupByKey(beam.PTransform): + def expand(self, input_or_inputs): + return input_or_inputs | "GroupByKey" >> _GroupByKeyOnly() + + +class _Flatten(beam.PTransform): + def expand(self, input_or_inputs): + is_bounded = all(pcoll.is_bounded for pcoll in input_or_inputs) + return beam.pvalue.PCollection(self.pipeline, is_bounded=is_bounded) + + +def dask_overrides() -> t.List[PTransformOverride]: + class CreateOverride(PTransformOverride): + def matches(self, applied_ptransform: AppliedPTransform) -> bool: + return applied_ptransform.transform.__class__ == beam.Create + + def get_replacement_transform_for_applied_ptransform( + self, applied_ptransform: AppliedPTransform) -> ptransform.PTransform: + return _Create(t.cast(beam.Create, applied_ptransform.transform).values) + + class ReshuffleOverride(PTransformOverride): + def matches(self, applied_ptransform: AppliedPTransform) -> bool: + return applied_ptransform.transform.__class__ == beam.Reshuffle + + def get_replacement_transform_for_applied_ptransform( + self, applied_ptransform: AppliedPTransform) -> ptransform.PTransform: + return _Reshuffle() + + class ReadOverride(PTransformOverride): + def matches(self, applied_ptransform: AppliedPTransform) -> bool: + return applied_ptransform.transform.__class__ == beam.io.Read + + def get_replacement_transform_for_applied_ptransform( + self, applied_ptransform: AppliedPTransform) -> ptransform.PTransform: + return _Read(t.cast(beam.io.Read, applied_ptransform.transform).source) + + class GroupByKeyOverride(PTransformOverride): + def matches(self, applied_ptransform: AppliedPTransform) -> bool: + return applied_ptransform.transform.__class__ == beam.GroupByKey + + def get_replacement_transform_for_applied_ptransform( + self, applied_ptransform: AppliedPTransform) -> ptransform.PTransform: + return _GroupByKey() + + class FlattenOverride(PTransformOverride): + def matches(self, applied_ptransform: AppliedPTransform) -> bool: + return applied_ptransform.transform.__class__ == beam.Flatten + + def get_replacement_transform_for_applied_ptransform( + self, applied_ptransform: AppliedPTransform) -> ptransform.PTransform: + return _Flatten() + + return [ + CreateOverride(), + ReshuffleOverride(), + ReadOverride(), + GroupByKeyOverride(), + FlattenOverride(), + ] diff --git a/sdks/python/apache_beam/runners/dask/transform_evaluator.py b/sdks/python/apache_beam/runners/dask/transform_evaluator.py new file mode 100644 index 000000000000..c4aac7f2111f --- /dev/null +++ b/sdks/python/apache_beam/runners/dask/transform_evaluator.py @@ -0,0 +1,103 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""Transform Beam PTransforms into Dask Bag operations. + +A minimum set of operation substitutions, to adap Beam's PTransform model +to Dask Bag functions. + +TODO(alxr): Translate ops from https://docs.dask.org/en/latest/bag-api.html. +""" +import abc +import dataclasses +import typing as t + +import apache_beam +import dask.bag as db +from apache_beam.pipeline import AppliedPTransform +from apache_beam.runners.dask.overrides import _Create +from apache_beam.runners.dask.overrides import _Flatten +from apache_beam.runners.dask.overrides import _GroupByKeyOnly + +OpInput = t.Union[db.Bag, t.Sequence[db.Bag], None] + + +@dataclasses.dataclass +class DaskBagOp(abc.ABC): + applied: AppliedPTransform + + @property + def transform(self): + return self.applied.transform + + @abc.abstractmethod + def apply(self, input_bag: OpInput) -> db.Bag: + pass + + +class NoOp(DaskBagOp): + def apply(self, input_bag: OpInput) -> db.Bag: + return input_bag + + +class Create(DaskBagOp): + def apply(self, input_bag: OpInput) -> db.Bag: + assert input_bag is None, 'Create expects no input!' + original_transform = t.cast(_Create, self.transform) + items = original_transform.values + return db.from_sequence(items) + + +class ParDo(DaskBagOp): + def apply(self, input_bag: db.Bag) -> db.Bag: + transform = t.cast(apache_beam.ParDo, self.transform) + return input_bag.map( + transform.fn.process, *transform.args, **transform.kwargs).flatten() + + +class Map(DaskBagOp): + def apply(self, input_bag: db.Bag) -> db.Bag: + transform = t.cast(apache_beam.Map, self.transform) + return input_bag.map( + transform.fn.process, *transform.args, **transform.kwargs) + + +class GroupByKey(DaskBagOp): + def apply(self, input_bag: db.Bag) -> db.Bag: + def key(item): + return item[0] + + def value(item): + k, v = item + return k, [elm[1] for elm in v] + + return input_bag.groupby(key).map(value) + + +class Flatten(DaskBagOp): + def apply(self, input_bag: OpInput) -> db.Bag: + assert type(input_bag) is list, 'Must take a sequence of bags!' + return db.concat(input_bag) + + +TRANSLATIONS = { + _Create: Create, + apache_beam.ParDo: ParDo, + apache_beam.Map: Map, + _GroupByKeyOnly: GroupByKey, + _Flatten: Flatten, +} diff --git a/sdks/python/mypy.ini b/sdks/python/mypy.ini index 9309120a8cab..a628036d6682 100644 --- a/sdks/python/mypy.ini +++ b/sdks/python/mypy.ini @@ -89,6 +89,9 @@ ignore_errors = true [mypy-apache_beam.runners.direct.*] ignore_errors = true +[mypy-apache_beam.runners.dask.*] +ignore_errors = true + [mypy-apache_beam.runners.interactive.*] ignore_errors = true diff --git a/sdks/python/setup.py b/sdks/python/setup.py index 8451fc596466..61858fa5d978 100644 --- a/sdks/python/setup.py +++ b/sdks/python/setup.py @@ -350,7 +350,11 @@ def get_portability_package_data(): # This can be removed once dill is updated to version > 0.3.5.1 # Issue: https://github.com/apache/beam/issues/23566 'dataframe': ['pandas>=1.0,<1.5;python_version<"3.10"', - 'pandas>=1.4.3,<1.5;python_version>="3.10"'] + 'pandas>=1.4.3,<1.5;python_version>="3.10"'], + 'dask': [ + 'dask >= 2022.6', + 'distributed >= 2022.6', + ], }, zip_safe=False, # PyPI package information. diff --git a/sdks/python/test-suites/tox/common.gradle b/sdks/python/test-suites/tox/common.gradle index 99afc1d72557..61802ac9c45e 100644 --- a/sdks/python/test-suites/tox/common.gradle +++ b/sdks/python/test-suites/tox/common.gradle @@ -24,6 +24,9 @@ test.dependsOn "testPython${pythonVersionSuffix}" toxTask "testPy${pythonVersionSuffix}Cloud", "py${pythonVersionSuffix}-cloud" test.dependsOn "testPy${pythonVersionSuffix}Cloud" +toxTask "testPy${pythonVersionSuffix}Dask", "py${pythonVersionSuffix}-dask" +test.dependsOn "testPy${pythonVersionSuffix}Dask" + toxTask "testPy${pythonVersionSuffix}Cython", "py${pythonVersionSuffix}-cython" test.dependsOn "testPy${pythonVersionSuffix}Cython" diff --git a/sdks/python/tox.ini b/sdks/python/tox.ini index 138a5410ead0..11997b55c771 100644 --- a/sdks/python/tox.ini +++ b/sdks/python/tox.ini @@ -17,7 +17,7 @@ [tox] # new environments will be excluded by default unless explicitly added to envlist. -envlist = py37,py38,py39,py310,py37-{cloud,cython,lint,mypy},py38-{cloud,cython,docs,cloudcoverage},py39-{cloud,cython},py310-{cloud,cython},whitespacelint +envlist = py37,py38,py39,py310,py37-{cloud,cython,lint,mypy,dask},py38-{cloud,cython,docs,cloudcoverage,dask},py39-{cloud,cython},py310-{cloud,cython,dask},whitespacelint toxworkdir = {toxinidir}/target/{env:ENV_NAME:.tox} [pycodestyle] @@ -92,12 +92,16 @@ extras = test,gcp,interactive,dataframe,aws,azure commands = {toxinidir}/scripts/run_pytest.sh {envname} "{posargs}" +[testenv:py{37,38,39}-dask] +extras = test,dask +commands = + {toxinidir}/scripts/run_pytest.sh {envname} "{posargs}" [testenv:py38-cloudcoverage] deps = codecov pytest-cov==3.0.0 passenv = GIT_* BUILD_* ghprb* CHANGE_ID BRANCH_NAME JENKINS_* CODECOV_* -extras = test,gcp,interactive,dataframe,aws +extras = test,gcp,interactive,dataframe,aws,dask commands = -rm .coverage {toxinidir}/scripts/run_pytest.sh {envname} "{posargs}" "--cov-report=xml --cov=. --cov-append" @@ -129,6 +133,8 @@ commands = deps = -r build-requirements.txt mypy==0.782 + dask==2022.01.0 + distributed==2022.01.0 # make extras available in case any of these libs are typed extras = gcp @@ -136,8 +142,9 @@ commands = mypy --version python setup.py mypy + [testenv:py38-docs] -extras = test,gcp,docs,interactive,dataframe +extras = test,gcp,docs,interactive,dataframe,dask deps = Sphinx==1.8.5 sphinx_rtd_theme==0.4.3 From a5821ed8a3cc12c1b3d35af600eb61e04e3feb02 Mon Sep 17 00:00:00 2001 From: bullet03 Date: Tue, 25 Oct 2022 13:48:37 -0700 Subject: [PATCH 044/115] [Website] update PULL_REQUEST_TEMPLATE.md (#23576) * [Website] add new case-study template, update PR template * Updates to ADD_LOGO.md * Updates to ADD_CASE_STUDY.md, CASE_STUDY_TEMPLATE.md, and ADD_LOGO.md * Updates to describe adding new Apache Beam case studies and logos * update adding new case study * Formatting updates * feat: formating, delete PULL_REQUEST_TEMPLATE.md Co-authored-by: Alex Kosolapov Co-authored-by: Alex Kosolapov --- website/ADD_CASE_STUDY.md | 72 +++++++++++++++++++++++++ website/ADD_LOGO.md | 24 +++++---- website/CASE_STUDY_TEMPLATE.md | 97 ++++++++++++++++++++++++++++++++++ website/CONTRIBUTE.md | 30 +++++++++-- 4 files changed, 208 insertions(+), 15 deletions(-) create mode 100644 website/ADD_CASE_STUDY.md create mode 100644 website/CASE_STUDY_TEMPLATE.md diff --git a/website/ADD_CASE_STUDY.md b/website/ADD_CASE_STUDY.md new file mode 100644 index 000000000000..f5fd454cad8c --- /dev/null +++ b/website/ADD_CASE_STUDY.md @@ -0,0 +1,72 @@ + + +# How to add a new case study + +1. Fork [Apache Beam](https://github.com/apache/beam) repository +2. This [case study draft template](https://docs.google.com/document/d/1qRpXW-WM4jtlcy5VaqDaXgYap9KI1ii27Uwp641UOBM/edit#heading=h.l6lphj20eacs) provides some helpful tips, questions and ideas to prepare and organize your case study content +3. Copy [case study md template](https://github.com/apache/beam/tree/master/website/CASE_STUDY_TEMPLATE.md) to the `case-studies` folder and name your file with company or project name e.g., `beam/website/www/site/content/en/case-studies/YOUR_CASE_STUDY_NAME.md` +4. Add your case study content to the md file you just created. See [Case study md file recommendations](#case-study-md-file-recommendations) +5. Add images to the image folder [beam/website/www/site/static/images/case-study](https://github.com/apache/beam/tree/master/website/www/site/static/images/case-study)/company-name according to [Case study images recommendations](#case-study-images-recommendations) +6. Add case study quote card for the [Apache Beam](https://beam.apache.org/) website homepage `Case Studies Powered by Apache Beam` section. See [Add case study card to the Apache Beam website homepage](#Add-case-study-card-to-the-Apache-Beam-website-homepage) +7. Create pull request to the apache beam repository with your changes + + +## Case study md file recommendations + +Following properties determine how your case-study will looks on [Apache Beam case studies](https://beam.apache.org/case-studies/) listing and the case study page itself. + +| Field | Description | +|-------------------|---------------------------------------------------------------------------------------------------------| +| `title` | Case study title, usually 4-12 words | +| `name` | Company or project name | +| `icon` | Relative path to the company/project logo e.g. "/images/logos/powered-by/company_name.png" | +| `category` | `study` for case studies | +| `cardTitle` | Case study card title for Apache Beam [case studies](https://beam.apache.org/case-studies/) page | +| `cardDescription` | Description for [case studies](https://beam.apache.org/case-studies/) page, usually 30-40 words | +| `authorName` | Case study author | +| `authorPosition` | Case study author role | +| `authorImg` | Relative path for case study author photo, e.g. "/images/case-study/company/authorImg.png" | +| `publishDate` | Case study publish date for sorting at [case studies](https://beam.apache.org/case-studies/), e.g. `2022-10-14T01:56:00+00:00` | + +Other sections of the [case study md template](https://github.com/apache/beam/blob/master/website/CASE_STUDY_TEMPLATE.md) are organized to present the case study content. + +## Case study images recommendations + +1. Add case study company/project logo to the [images/logos/powered-by](https://github.com/apache/beam/tree/master/website/www/site/static/images/logos/powered-by) folder. Please use your company/project name e.g. `ricardo.png` +2. Create your company/project folder to group images used in your case study e.g., `beam/website/www/site/static/images/case-study/company-name` folder +3. Add author photo to `beam/website/www/site/static/images/case-study/company-name` folder +4. Add other images that your case study is using to `beam/website/www/site/static/images/case-study/company-name` folder + + +## Add case study card to the Apache Beam website homepage + +To add a new case study card to the Apache Beam website homepage, add the new case study entry to the [quotes.yaml](https://github.com/apache/beam/blob/master/website/www/site/data/en/quotes.yaml) using the following format: + +| Field | Description | +|-------------------|---------------------------------------------------------------------------------------------------------| +| `text` | Homepage case study text, recommended up to 215 characters or so | +| `icon` | Relative path to quotation marks logo, by default `icons/quote-icon.svg` | +| `logoUrl` | Relative path for company/project logo, e.g. `images/logos/powered-by/company_name.png` | +| `linkUrl` | Relative path to the case study web page, e.g., `case-studies/YOUR_CASE_STUDY_NAME/index.html` | +| `linkText` | Link text, by default using `Learn more` | + +Example: +``` + text: Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s. // recommendation to use no more than 215 symbols in the text + icon: icons/quote-icon.svg + logoUrl: images/logos/powered-by/company_name.png + linkUrl: case-studies/YOUR_CASE_STUDY_NAME/index.html + linkText: Learn more +``` diff --git a/website/ADD_LOGO.md b/website/ADD_LOGO.md index a478ab867641..b6beddf05bca 100644 --- a/website/ADD_LOGO.md +++ b/website/ADD_LOGO.md @@ -18,27 +18,31 @@ --> # How to add your logo - +Please follow these steps to add your company or project logo to Apache Beam [case studies](https://beam.apache.org/case-studies/) page: 1. Fork [Apache Beam](https://github.com/apache/beam) repository 2. Add file with company or project name to the [case-studies](https://github.com/apache/beam/tree/master/website/www/site/content/en/case-studies) folder e.g., `company.md` -3. Add project/company logo to +3. Add company/project logo to the [images/logos/powered-by](https://github.com/apache/beam/tree/master/website/www/site/static/images/logos/powered-by) folder. Please use your company/project name e.g. `ricardo.png` 4. Copy template below to the created file and replace next fields with your data -| Field | Name | -|-----------------|--------------------------------------------------| -| title | Project/Company name | -| icon | Path to the logo e.g. "/images/company_name.png" | -| cardDescription | Description of the project | +| Field | Description | +|-------------------|---------------------------------------------------------------------------------------------------------| +| `title` | Company/project name | +| `icon` | Path to the company/project logo e.g. "/images/logos/powered-by/company_name.png" | +| `hasNav` | Specified logo page has space for left & right nav menu | +| `hasLink` | Links logo image to the company/project website instead of displaying cardDescription, optional | +| `cardDescription` | Company or project description, optional | ``` --- title: "Cloud Dataflow" -icon: /images/company_name.png -cardDescription: "Project/Company description" +icon: /images/logos/powered-by/company_name.png +hasNav: true +hasLink: false +cardDescription: "Google Cloud Dataflow is a fully managed service for executing Apache Beam pipelines within the Google Cloud Platform ecosystem." --- ``` -5. Create pull request to the apache beam repository with your changes +5. Create pull request to the Apache Beam repository with your changes \ No newline at end of file diff --git a/website/CASE_STUDY_TEMPLATE.md b/website/CASE_STUDY_TEMPLATE.md new file mode 100644 index 000000000000..4c1faa1265df --- /dev/null +++ b/website/CASE_STUDY_TEMPLATE.md @@ -0,0 +1,97 @@ +--- +title: "Case study title" +name: "Company/project name" +icon: /images/logos/powered-by/company_name.png +hasNav: true +category: study +cardTitle: "Case study title (different for the Case Studies page listing)" +cardDescription: "Case study description for Case Studies page listing" +authorName: "Name LastName" +authorPosition: "Software Engineer @ companyName" +authorImg: /images/case-study/company/authorImg.png +publishDate: 2022-02-15T01:56:00+00:00 +--- + + +
    +
    + +
    +
    +

    + “Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book.” +

    +
    +
    + +
    +
    +
    + Name LastName +
    +
    + Software Engineer @ companyName +
    +
    +
    +
    +
    + + +
    + +# Case Study Title + +## Background + +[Lorem Ipsum](https://www.lipsum.com/) is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged. It was popularised in the 1960s with the release of Letraset sheets containing Lorem Ipsum passages, and more recently with desktop publishing software like Aldus PageMaker including versions of Lorem Ipsum. + +## Quote Section + +
    +

    + Lorem Ipsum is simply dummy text of the printing and typesetting industry +

    +
    +
    + +
    +
    +
    + Name LastName +
    +
    + Software Engineer @ companyName +
    +
    +
    +
    + +## Content Section + +Contrary to popular belief, Lorem Ipsum is not simply random text. It has roots in a piece of classical Latin literature from 45 BC, making it over 2000 years old. Richard McClintock, a Latin professor at Hampden-Sydney College in Virginia, looked up one of the more obscure Latin words, consectetur, from a Lorem Ipsum passage, and going through the cites of the word in classical literature, discovered the undoubtable source. + +
    + +
    + +## Results + +Contrary to popular belief, Lorem Ipsum is not simply random text. It has roots in a piece of classical Latin literature from 45 BC, making it over 2000 years old. Richard McClintock, a Latin professor at Hampden-Sydney College in Virginia, looked up one of the more obscure Latin words, consectetur, from a Lorem Ipsum passage, and going through the cites of the word in classical literature, discovered the undoubtable source. + + +{{< case_study_feedback Template >}} +
    +
    diff --git a/website/CONTRIBUTE.md b/website/CONTRIBUTE.md index 31a4550be237..b54139280df9 100644 --- a/website/CONTRIBUTE.md +++ b/website/CONTRIBUTE.md @@ -30,12 +30,15 @@ This guide consists of: - [Define TableOfContents](#define-tableofcontents) - [Language switching](#language-switching) - [Code highlighting](#code-highlighting) - - [Adding class to markdown text](#paragraph) + - [Adding class to markdown text](#adding-class-to-markdown-text) - [Table](#table) - [Github sample](#github-sample) - [Others](#others) + - [How to add relative links to JavaScript or CSS](#how-to-add-relative-links-to-javascript-or-css) - [What to be replaced in Jekyll](#what-to-be-replaced-in-jekyll) - [Translation guide](#translation-guide) +- [How to add new case study](#how-to-add-a-new-apache-beam-case-study) +- [How to add new logo](#how-to-add-a-new-logo-to-case-studies-page) ## Project structure @@ -45,10 +48,13 @@ www/ ├── site │   ├── archetypes # frontmatter template │   ├── assets -│ │ └── scss # styles +│ │ ├── icons # svg icons +│ │ ├── js # scripts +│ │ ├── scss # styles │   ├── content # pages │ │ └── en │ │ ├── blog +│ │ ├── case-studies │ │ ├── community │ │ ├── contribute │ │ ├── documentation @@ -63,7 +69,6 @@ www/ │ │ ├── downloads # downloaded files │ │ └── fonts │ │ └── images -│ │ └── js │   └── themes │ └── docsy ├── build_code_samples.sh @@ -162,7 +167,7 @@ $ hugo new about/_index.md $ hugo new -c content/pl about/_index.md ``` -## How to write in Hugo +## How to write in Hugo way This section will guide you how to use Hugo shortcodes in Apache Beam website. Please refer to the [Hugo documentation](https://gohugo.io/content-management/shortcodes/) for more details of usage. @@ -280,7 +285,7 @@ A table markdown here. {{< /table >}} ``` -### Code sample +### Github sample To retrieve a piece of code from Beam project. @@ -312,6 +317,15 @@ To get branch of the repository in markdown: To render capability matrix, please take a look at [this example](/www/site/content/en/documentation/runners/capability-matrix/#beam-capability-matrix). +### How to add relative links to JavaScript or CSS +Please take a note that relative links should be added with relative paths to JavaScript or CSS files with using Hugo syntax, so that they are able to form correct absolute links on localhost, staging and production. Examples: +``` +/themes/docsy/assets/js/search.js # var searchPage = "{{ "search/" | absURL }}?q=" + query; +/assets/js/page-nav.js # img.src = "{{ "images/arrow-expandable.svg" | absURL }}"; +/assets/js/copy-to-clipboard.js # +/assets/scss/_case_study.scss # background-image: url('{{ "images/open-quote.svg" | absURL }}'); +``` + ## What to be replaced in Jekyll This section will briefly let you know the replaced features of Jekyll in terms of writing a new blog post or documentation in Hugo. @@ -420,3 +434,9 @@ Now from your template: Similar to markdown content translation, there are two separated section menus `/www/site/layouts/partials/section-menu` corresponding to your languages. Your job is to take the section menus in `en` directory, translate and place them inside your `pl` directory. **Note**: if you get stuck at adding translation, please refer to [our example](https://github.com/PolideaInternal/beam/tree/example/i18n/). + +## How to add a new Apache Beam case study +Please follow this guide to [add a new case study](https://github.com/apache/beam/tree/master/website/ADD_CASE_STUDY.md) + +## How to add a new logo to case studies page +Please follow this guide to add [a new logo](https://github.com/apache/beam/tree/master/website/ADD_LOGO.md) to the [case studies](https://beam.apache.org/case-studies/) page. From e35ddede9b25b70cc39d877ac06973c4747c66bc Mon Sep 17 00:00:00 2001 From: Heejong Lee Date: Tue, 25 Oct 2022 13:55:36 -0700 Subject: [PATCH 045/115] [BEAM-23836] Updating documentation for cross-language Java pipelines using Python external transforms --- .../en/documentation/programming-guide.md | 35 +++++++++++++++++-- 1 file changed, 33 insertions(+), 2 deletions(-) diff --git a/website/www/site/content/en/documentation/programming-guide.md b/website/www/site/content/en/documentation/programming-guide.md index 4276e7dc3658..8436acb81453 100644 --- a/website/www/site/content/en/documentation/programming-guide.md +++ b/website/www/site/content/en/documentation/programming-guide.md @@ -7282,7 +7282,16 @@ To create an SDK wrapper for use in a Python pipeline, do the following: #### 13.1.2. Creating cross-language Python transforms -To make your Python transform usable with different SDK languages, you must create a Python module that registers an existing Python transform as a cross-language transform for use with the Python expansion service and calls into that existing transform to perform its intended operation. +Any Python transforms defined in the scope of the expansion service should be accessible by specifying their fully qualified names. For example, you could use Python's `ReadFromText` transform in a Java pipeline with its fully qualified name `apache_beam.io.ReadFromText`: + +```java +p.apply("Read", + PythonExternalTransform.>from("apache_beam.io.ReadFromText") + .withKwarg("file_pattern", options.getInputFile()) + .withKwarg("validate", false)) +``` + +Alternatively, you may want to create a Python module that registers an existing Python transform as a cross-language transform for use with the Python expansion service and calls into that existing transform to perform its intended operation. A registered URN can be used later in an expansion request for indicating an expansion target. **Defining the Python module** @@ -7393,7 +7402,29 @@ Depending on the SDK language of the pipeline, you can use a high-level SDK-wrap #### 13.2.1. Using cross-language transforms in a Java pipeline -Currently, to access cross-language transforms from the Java SDK, you have to use the lower-level [External](https://github.com/apache/beam/blob/master/runners/core-construction-java/src/main/java/org/apache/beam/runners/core/construction/External.java) class. +Users have three options to use cross-language transforms in a Java pipeline. At the highest level of abstraction, some popular Python transforms are accessible through dedicated Java wrapper transforms. For example, Java SDK has `DataframeTransform` class which uses Python SDK's `DataframeTransform` and `RunInference` class in Java SDK for `RunInference` in Python SDK and so on. When a SDK-specific wrapper transform is not available for a target Python transform, you could use the lower-level [PythonExternalTransform](https://github.com/apache/beam/blob/master/sdks/java/extensions/python/src/main/java/org/apache/beam/sdk/extensions/python/PythonExternalTransform.java) class instead by specifying the fully qualified name of the Python transform. If you want to try external transforms from the SDKS other than Python, you may also use the lowest-level [External](https://github.com/apache/beam/blob/master/runners/core-construction-java/src/main/java/org/apache/beam/runners/core/construction/External.java) class. + +**Using an SDK wrapper** + +To use a cross-language transform through an SDK wrapper, import the module for the SDK wrapper and call it from your pipeline, as shown in the example: + +```java +import org.apache.beam.sdk.extensions.python.transforms.DataframeTransform; + +input.apply(DataframeTransform.of("lambda df: df.groupby('a').sum()").withIndexes()) +``` + +**Using the PythonExternalTransform class** + +When an SDK-specific wrapper is not available, you cound access the Python cross-language transform through the `PythonExternalTransform` class by specifying the fully qualified name and the constructor arguments of the target Python transform. + +```java +input.apply( + PythonExternalTransform., PCollection>from( + "apache_beam.dataframe.transforms.DataframeTransform") + .withKwarg("func", PythonCallableSource.of("lambda df: df.groupby('a').sum()")) + .withKwarg("include_indexes", true)) +``` **Using the External class** From 735cb7b32811222086a06e624d3a950d9be7cd29 Mon Sep 17 00:00:00 2001 From: bullet03 Date: Tue, 25 Oct 2022 14:01:48 -0700 Subject: [PATCH 046/115] [Website] change width of the additional case studies cards (#23824) --- website/www/site/assets/scss/_case_study.scss | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/www/site/assets/scss/_case_study.scss b/website/www/site/assets/scss/_case_study.scss index 00ed9fd6c933..71a1f1585b09 100644 --- a/website/www/site/assets/scss/_case_study.scss +++ b/website/www/site/assets/scss/_case_study.scss @@ -88,7 +88,7 @@ .case-study-used-by-card--responsive { @media (min-width: $mobile){ - width: 23%; + width: 18%; margin-right: 0; } } From 44787bcd9cd1b978fa40b0c65ea229f27105c0e4 Mon Sep 17 00:00:00 2001 From: Chamikara Jayalath Date: Tue, 25 Oct 2022 15:50:38 -0700 Subject: [PATCH 047/115] Adds a dependency to Python Multi-language library to the GCP Bom examples arche-type --- .../src/main/resources/archetype-resources/pom.xml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/sdks/java/maven-archetypes/gcp-bom-examples/src/main/resources/archetype-resources/pom.xml b/sdks/java/maven-archetypes/gcp-bom-examples/src/main/resources/archetype-resources/pom.xml index c3fb0f26fcb2..a87a506a3a8b 100644 --- a/sdks/java/maven-archetypes/gcp-bom-examples/src/main/resources/archetype-resources/pom.xml +++ b/sdks/java/maven-archetypes/gcp-bom-examples/src/main/resources/archetype-resources/pom.xml @@ -324,6 +324,13 @@ beam-sdks-java-io-google-cloud-platform + + + org.apache.beam + beam-sdks-java-extensions-python + ${beam.version} + + com.google.api-client From ead2d2e8025874ad5c3f09ea0fda06caf027e18c Mon Sep 17 00:00:00 2001 From: Xinyu Liu Date: Tue, 25 Oct 2022 17:10:45 -0700 Subject: [PATCH 048/115] Support keyed executors in Samza Runner to process bundles for stateful ParDo (#23434) --- .../samza/runtime/AsyncDoFnRunner.java | 93 ++++++++++++++++--- .../beam/runners/samza/runtime/OpAdapter.java | 4 +- .../samza/runtime/SamzaDoFnRunners.java | 3 +- .../samza/runtime/AsyncDoFnRunnerTest.java | 76 +++++++++++++-- 4 files changed, 154 insertions(+), 22 deletions(-) diff --git a/runners/samza/src/main/java/org/apache/beam/runners/samza/runtime/AsyncDoFnRunner.java b/runners/samza/src/main/java/org/apache/beam/runners/samza/runtime/AsyncDoFnRunner.java index 7120696aa4f1..76dfe5b720d8 100644 --- a/runners/samza/src/main/java/org/apache/beam/runners/samza/runtime/AsyncDoFnRunner.java +++ b/runners/samza/src/main/java/org/apache/beam/runners/samza/runtime/AsyncDoFnRunner.java @@ -18,7 +18,10 @@ package org.apache.beam.runners.samza.runtime; import java.util.Collection; +import java.util.Collections; +import java.util.Map; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutorService; import java.util.stream.Collectors; import org.apache.beam.runners.core.DoFnRunner; @@ -27,6 +30,8 @@ import org.apache.beam.sdk.transforms.DoFn; import org.apache.beam.sdk.transforms.windowing.BoundedWindow; import org.apache.beam.sdk.util.WindowedValue; +import org.apache.beam.sdk.values.KV; +import org.checkerframework.checker.nullness.qual.Nullable; import org.joda.time.Instant; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -39,30 +44,46 @@ public class AsyncDoFnRunner implements DoFnRunner { private static final Logger LOG = LoggerFactory.getLogger(AsyncDoFnRunner.class); + // A dummy key to represent null keys + private static final Object NULL_KEY = new Object(); + private final DoFnRunner underlying; private final ExecutorService executor; private final OpEmitter emitter; private final FutureCollector futureCollector; + private final boolean isStateful; + + /** + * This map keeps track of the last outputFutures for a certain key. When the next element of the + * key comes in, its outputFutures will be chained from the last outputFutures in the map. When + * all futures of a key have been complete, the key entry will be removed. The map is bounded by + * (bundle size * 2). + */ + private final Map>>> keyedOutputFutures; public static AsyncDoFnRunner create( DoFnRunner runner, OpEmitter emitter, FutureCollector futureCollector, + boolean isStateful, SamzaPipelineOptions options) { LOG.info("Run DoFn with " + AsyncDoFnRunner.class.getName()); - return new AsyncDoFnRunner<>(runner, emitter, futureCollector, options); + return new AsyncDoFnRunner<>(runner, emitter, futureCollector, isStateful, options); } private AsyncDoFnRunner( DoFnRunner runner, OpEmitter emitter, FutureCollector futureCollector, + boolean isStateful, SamzaPipelineOptions options) { this.underlying = runner; this.executor = options.getExecutorServiceForProcessElement(); this.emitter = emitter; this.futureCollector = futureCollector; + this.isStateful = isStateful; + this.keyedOutputFutures = new ConcurrentHashMap<>(); } @Override @@ -72,23 +93,59 @@ public void startBundle() { @Override public void processElement(WindowedValue elem) { - final CompletableFuture future = - CompletableFuture.runAsync( - () -> { - underlying.processElement(elem); - }, - executor); - final CompletableFuture>> outputFutures = - future.thenApply( - x -> - emitter.collectOutput().stream() - .map(OpMessage::getElement) - .collect(Collectors.toList())); + isStateful ? processStateful(elem) : processElement(elem, null); futureCollector.addAll(outputFutures); } + private CompletableFuture>> processElement( + WindowedValue elem, + @Nullable CompletableFuture>> prevOutputFuture) { + + final CompletableFuture>> prevFuture = + prevOutputFuture == null + ? CompletableFuture.completedFuture(Collections.emptyList()) + : prevOutputFuture; + + // For ordering by key, we chain the processing of the elem to the completion of + // the previous output of the same key + return prevFuture.thenApplyAsync( + x -> { + underlying.processElement(elem); + + return emitter.collectOutput().stream() + .map(OpMessage::getElement) + .collect(Collectors.toList()); + }, + executor); + } + + private CompletableFuture>> processStateful( + WindowedValue elem) { + final Object key = getKey(elem); + + final CompletableFuture>> outputFutures = + processElement(elem, keyedOutputFutures.get(key)); + + // Update the latest outputFuture for key + keyedOutputFutures.put(key, outputFutures); + + // Remove the outputFuture from the map once it's complete. + // This ensures the map will be cleaned up immediately. + return outputFutures.thenApply( + output -> { + // Under the condition that the outputFutures has not been updated + keyedOutputFutures.remove(key, outputFutures); + return output; + }); + } + + /** Package private for testing. */ + boolean hasOutputFuturesForKey(Object key) { + return keyedOutputFutures.containsKey(key); + } + @Override public void onTimer( String timerId, @@ -115,4 +172,14 @@ public void onWindowExpiration(BoundedWindow window, Instant timestamp, K public DoFn getFn() { return underlying.getFn(); } + + private Object getKey(WindowedValue elem) { + KV kv = (KV) elem.getValue(); + if (kv == null) { + return NULL_KEY; + } else { + Object key = kv.getKey(); + return key == null ? NULL_KEY : key; + } + } } diff --git a/runners/samza/src/main/java/org/apache/beam/runners/samza/runtime/OpAdapter.java b/runners/samza/src/main/java/org/apache/beam/runners/samza/runtime/OpAdapter.java index eabcd87f5f36..c5353b0e2352 100644 --- a/runners/samza/src/main/java/org/apache/beam/runners/samza/runtime/OpAdapter.java +++ b/runners/samza/src/main/java/org/apache/beam/runners/samza/runtime/OpAdapter.java @@ -159,12 +159,12 @@ public void close() { op.close(); } - private static class OpEmitterImpl implements OpEmitter { + static class OpEmitterImpl implements OpEmitter { private final Queue> outputQueue; private CompletionStage>> outputFuture; private Instant outputWatermark; - private OpEmitterImpl() { + OpEmitterImpl() { outputQueue = new ConcurrentLinkedQueue<>(); } diff --git a/runners/samza/src/main/java/org/apache/beam/runners/samza/runtime/SamzaDoFnRunners.java b/runners/samza/src/main/java/org/apache/beam/runners/samza/runtime/SamzaDoFnRunners.java index 12872b82d8f7..ec1a9f365090 100644 --- a/runners/samza/src/main/java/org/apache/beam/runners/samza/runtime/SamzaDoFnRunners.java +++ b/runners/samza/src/main/java/org/apache/beam/runners/samza/runtime/SamzaDoFnRunners.java @@ -153,7 +153,8 @@ public static DoFnRunner create( } return pipelineOptions.getNumThreadsForProcessElement() > 1 - ? AsyncDoFnRunner.create(doFnRunnerWithStates, emitter, futureCollector, pipelineOptions) + ? AsyncDoFnRunner.create( + doFnRunnerWithStates, emitter, futureCollector, keyedInternals != null, pipelineOptions) : doFnRunnerWithStates; } diff --git a/runners/samza/src/test/java/org/apache/beam/runners/samza/runtime/AsyncDoFnRunnerTest.java b/runners/samza/src/test/java/org/apache/beam/runners/samza/runtime/AsyncDoFnRunnerTest.java index d62a28b374f1..6d4ffc70d5a4 100644 --- a/runners/samza/src/test/java/org/apache/beam/runners/samza/runtime/AsyncDoFnRunnerTest.java +++ b/runners/samza/src/test/java/org/apache/beam/runners/samza/runtime/AsyncDoFnRunnerTest.java @@ -17,12 +17,22 @@ */ package org.apache.beam.runners.samza.runtime; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; + import java.io.Serializable; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; +import org.apache.beam.runners.core.DoFnRunner; +import org.apache.beam.runners.samza.SamzaPipelineOptions; import org.apache.beam.sdk.coders.VarIntCoder; import org.apache.beam.sdk.options.PipelineOptionsFactory; import org.apache.beam.sdk.state.CombiningState; @@ -36,6 +46,7 @@ import org.apache.beam.sdk.transforms.MapElements; import org.apache.beam.sdk.transforms.ParDo; import org.apache.beam.sdk.transforms.Sum; +import org.apache.beam.sdk.util.WindowedValue; import org.apache.beam.sdk.values.KV; import org.apache.beam.sdk.values.PCollection; import org.apache.beam.sdk.values.TypeDescriptors; @@ -116,11 +127,7 @@ public void processElement( return; } - // Need explicit synchronization here - synchronized (this) { - countState.add(1); - } - + countState.add(1); String key = c.element().getKey(); int n = countState.read(); if (n >= expectedCount.get(key)) { @@ -152,7 +159,7 @@ public void testPipelineWithAggregation() { KV.of("banana", 5L))); // TODO: remove after SAMZA-2761 fix - for (int i = 0; i < 20; i++) { + for (int i = 0; i < 50; i++) { input.add(KV.of("*", 0L)); } @@ -168,4 +175,61 @@ public void testPipelineWithAggregation() { pipeline.run(); } + + @Test + public void testKeyedOutputFutures() { + // We test the scenario that two elements of the same key needs to be processed in order. + final DoFnRunner, Void> doFnRunner = mock(DoFnRunner.class); + final AtomicInteger prev = new AtomicInteger(0); + final CountDownLatch latch = new CountDownLatch(1); + doAnswer( + invocation -> { + latch.await(); + WindowedValue> wv = invocation.getArgument(0); + Integer val = wv.getValue().getValue(); + + // Verify the previous element has been fully processed by checking the prev value + assertEquals(val - 1, prev.get()); + + prev.set(val); + return null; + }) + .when(doFnRunner) + .processElement(any()); + + SamzaPipelineOptions options = PipelineOptionsFactory.as(SamzaPipelineOptions.class); + options.setNumThreadsForProcessElement(4); + + final OpEmitter opEmitter = new OpAdapter.OpEmitterImpl<>(); + final FutureCollector futureCollector = new DoFnOp.FutureCollectorImpl<>(); + futureCollector.prepare(); + + final AsyncDoFnRunner, Void> asyncDoFnRunner = + AsyncDoFnRunner.create(doFnRunner, opEmitter, futureCollector, true, options); + + final String appleKey = "apple"; + + final WindowedValue> input1 = + WindowedValue.valueInGlobalWindow(KV.of(appleKey, 1)); + + final WindowedValue> input2 = + WindowedValue.valueInGlobalWindow(KV.of(appleKey, 2)); + + asyncDoFnRunner.processElement(input1); + asyncDoFnRunner.processElement(input2); + // Resume input1 process afterwards + latch.countDown(); + + // Waiting for the futures to be resolved + try { + futureCollector.finish().toCompletableFuture().get(); + } catch (Exception e) { + // ignore interruption here. + } + + // The final val should be the last element value + assertEquals(2, prev.get()); + // The appleKey in keyedOutputFutures map should be removed + assertFalse(asyncDoFnRunner.hasOutputFuturesForKey(appleKey)); + } } From 77f9a1749c186838167dd25970949ad36a99aab0 Mon Sep 17 00:00:00 2001 From: Moritz Mack Date: Wed, 26 Oct 2022 08:34:44 +0200 Subject: [PATCH 049/115] Remove obsolete code from Spark 3 runner. --- .../utils/SerializationDebugger.java | 115 ------------------ .../utils/package-info.java | 20 --- 2 files changed, 135 deletions(-) delete mode 100644 runners/spark/3/src/test/java/org/apache/beam/runners/spark/structuredstreaming/utils/SerializationDebugger.java delete mode 100644 runners/spark/3/src/test/java/org/apache/beam/runners/spark/structuredstreaming/utils/package-info.java diff --git a/runners/spark/3/src/test/java/org/apache/beam/runners/spark/structuredstreaming/utils/SerializationDebugger.java b/runners/spark/3/src/test/java/org/apache/beam/runners/spark/structuredstreaming/utils/SerializationDebugger.java deleted file mode 100644 index b384b9b9d35d..000000000000 --- a/runners/spark/3/src/test/java/org/apache/beam/runners/spark/structuredstreaming/utils/SerializationDebugger.java +++ /dev/null @@ -1,115 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.beam.runners.spark.structuredstreaming.utils; - -import java.io.File; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.ObjectOutputStream; -import java.io.OutputStream; -import java.lang.reflect.Field; -import java.util.ArrayList; -import java.util.List; - -/** A {@code SerializationDebugger} for Spark Runner. */ -public class SerializationDebugger { - - public static void testSerialization(Object object, File to) throws IOException { - DebuggingObjectOutputStream out = new DebuggingObjectOutputStream(new FileOutputStream(to)); - try { - out.writeObject(object); - } catch (Exception e) { - throw new RuntimeException("Serialization error. Path to bad object: " + out.getStack(), e); - } - } - - private static class DebuggingObjectOutputStream extends ObjectOutputStream { - - private static final Field DEPTH_FIELD; - - static { - try { - DEPTH_FIELD = ObjectOutputStream.class.getDeclaredField("depth"); - DEPTH_FIELD.setAccessible(true); - } catch (NoSuchFieldException e) { - throw new AssertionError(e); - } - } - - final List stack = new ArrayList<>(); - - /** - * Indicates whether or not OOS has tried to write an IOException (presumably as the result of a - * serialization error) to the stream. - */ - boolean broken = false; - - DebuggingObjectOutputStream(OutputStream out) throws IOException { - super(out); - enableReplaceObject(true); - } - - /** Abuse {@code replaceObject()} as a hook to maintain our stack. */ - @Override - protected Object replaceObject(Object o) { - // ObjectOutputStream writes serialization - // exceptions to the stream. Ignore - // everything after that so we don't lose - // the path to a non-serializable object. So - // long as the user doesn't write an - // IOException as the root object, we're OK. - int currentDepth = currentDepth(); - if (o instanceof IOException && currentDepth == 0) { - broken = true; - } - if (!broken) { - truncate(currentDepth); - stack.add(o); - } - return o; - } - - private void truncate(int depth) { - while (stack.size() > depth) { - pop(); - } - } - - private Object pop() { - return stack.remove(stack.size() - 1); - } - - /** Returns a 0-based depth within the object graph of the current object being serialized. */ - private int currentDepth() { - try { - Integer oneBased = ((Integer) DEPTH_FIELD.get(this)); - return oneBased - 1; - } catch (IllegalAccessException e) { - throw new AssertionError(e); - } - } - - /** - * Returns the path to the last object serialized. If an exception occurred, this should be the - * path to the non-serializable object. - */ - List getStack() { - return stack; - } - } -} diff --git a/runners/spark/3/src/test/java/org/apache/beam/runners/spark/structuredstreaming/utils/package-info.java b/runners/spark/3/src/test/java/org/apache/beam/runners/spark/structuredstreaming/utils/package-info.java deleted file mode 100644 index 3d7da111a9c4..000000000000 --- a/runners/spark/3/src/test/java/org/apache/beam/runners/spark/structuredstreaming/utils/package-info.java +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** Testing utils for spark structured streaming runner. */ -package org.apache.beam.runners.spark.structuredstreaming.utils; From 5c0ff090d62df3d7e146a137d652dc5de48ae43d Mon Sep 17 00:00:00 2001 From: Rebecca Szper <98840847+rszper@users.noreply.github.com> Date: Wed, 26 Oct 2022 07:06:13 -0700 Subject: [PATCH 050/115] Fixing Get Started header link (#23490) * Fixing Get Started header link * updating Get Started header link from top nav bar * Correcting Get Started link in nav bar --- website/www/site/layouts/partials/header.html | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/www/site/layouts/partials/header.html b/website/www/site/layouts/partials/header.html index 328396b35750..76735af60da9 100644 --- a/website/www/site/layouts/partials/header.html +++ b/website/www/site/layouts/partials/header.html @@ -16,7 +16,7 @@ Brand - {{ T "nav-get-started" }} + {{ T "nav-get-started" }} {{ T "nav-documentation" }}