From 5115df2f37dfb1d4560085a9cdc432b9ec61b3ee Mon Sep 17 00:00:00 2001 From: Buchi Reddy B Date: Thu, 2 Jul 2020 22:43:13 -0700 Subject: [PATCH] First commit for the Query Service. --- .circleci/config.yml | 137 +++ .github/CODEOWNERS | 4 + .gitignore | 25 + .snyk | 13 + LICENSE.md | 660 ++++++++++++++ README.md | 2 + build.gradle.kts | 15 + gradle.properties | 5 + gradle/wrapper/gradle-wrapper.jar | Bin 0 -> 58702 bytes gradle/wrapper/gradle-wrapper.properties | 5 + gradlew | 183 ++++ gradlew.bat | 100 +++ helm/.helmignore | 22 + helm/Chart.yaml | 23 + helm/templates/deployment.yaml | 84 ++ helm/templates/logconfig.yaml | 44 + helm/templates/query-service-config.yaml | 31 + helm/templates/service.yaml | 14 + helm/values.yaml | 125 +++ query-service-api/README.md | 12 + query-service-api/build.gradle.kts | 73 ++ .../query/service/util/QueryRequestUtil.java | 191 ++++ .../src/main/proto/query-service.proto | 16 + .../src/main/proto/request.proto | 86 ++ .../src/main/proto/response.proto | 38 + query-service-api/src/main/proto/value.proto | 48 + .../service/util/QueryRequestUtilTest.java | 54 ++ query-service-client/build.gradle.kts | 16 + .../service/client/QueryServiceClient.java | 52 ++ .../service/client/QueryServiceConfig.java | 27 + query-service-impl/README.md | 17 + query-service-impl/build.gradle.kts | 42 + query-service-impl/config.yml | 11 + .../core/query/service/QueryContext.java | 18 + .../core/query/service/QueryCost.java | 32 + .../query/service/QueryResultCollector.java | 15 + .../core/query/service/QueryServiceImpl.java | 108 +++ .../query/service/QueryServiceImplConfig.java | 110 +++ .../core/query/service/RequestAnalyzer.java | 170 ++++ .../core/query/service/RequestHandler.java | 22 + .../query/service/RequestHandlerInfo.java | 34 + .../query/service/RequestHandlerRegistry.java | 34 + .../query/service/RequestHandlerSelector.java | 76 ++ .../service/ResultSetChunkCollector.java | 65 ++ .../query/service/pinot/AdhocPinotQuery.java | 52 ++ ...DefaultResultSetTypePredicateProvider.java | 15 + .../core/query/service/pinot/Params.java | 102 +++ .../pinot/PinotBasedRequestHandler.java | 281 ++++++ .../service/pinot/PinotClientFactory.java | 101 +++ .../query/service/pinot/PinotColumnSpec.java | 31 + .../service/pinot/PinotMapConverter.java | 91 ++ .../core/query/service/pinot/PinotQuery.java | 61 ++ .../service/pinot/PinotResultAnalyzer.java | 152 ++++ .../core/query/service/pinot/PinotUtils.java | 10 + .../QueryRequestToPinotSQLConverter.java | 365 ++++++++ .../pinot/ResultSetTypePredicateProvider.java | 21 + .../query/service/pinot/ViewDefinition.java | 93 ++ .../service/QueryRequestBuilderUtils.java | 32 + .../service/QueryServiceImplConfigTest.java | 166 ++++ .../query/service/QueryServiceImplTest.java | 332 +++++++ .../pinot/PinotBasedRequestHandlerTest.java | 333 +++++++ .../service/pinot/PinotMapConverterTest.java | 94 ++ .../query/service/pinot/PinotQueryTest.java | 25 + .../pinot/PinotResultAnalyzerTest.java | 156 ++++ .../query/service/pinot/PinotUtilsTest.java | 19 + .../QueryRequestToPinotSQLConverterTest.java | 830 ++++++++++++++++++ .../service/pinot/RequestAnalyzerTest.java | 315 +++++++ .../src/test/resources/application.conf | 66 ++ .../src/test/resources/log4j2.properties | 8 + query-service/build.gradle.kts | 27 + .../query/service/QueryServiceStarter.java | 83 ++ query-service/src/main/resources/banner.txt | 6 + .../resources/configs/common/application.conf | 61 ++ .../src/main/resources/log4j2.properties | 23 + semantic-build-versioning.gradle | 11 + settings.gradle.kts | 18 + 76 files changed, 6839 insertions(+) create mode 100644 .circleci/config.yml create mode 100644 .github/CODEOWNERS create mode 100644 .gitignore create mode 100644 .snyk create mode 100644 LICENSE.md create mode 100644 README.md create mode 100644 build.gradle.kts create mode 100644 gradle.properties create mode 100644 gradle/wrapper/gradle-wrapper.jar create mode 100644 gradle/wrapper/gradle-wrapper.properties create mode 100755 gradlew create mode 100644 gradlew.bat create mode 100644 helm/.helmignore create mode 100644 helm/Chart.yaml create mode 100644 helm/templates/deployment.yaml create mode 100644 helm/templates/logconfig.yaml create mode 100644 helm/templates/query-service-config.yaml create mode 100644 helm/templates/service.yaml create mode 100644 helm/values.yaml create mode 100644 query-service-api/README.md create mode 100644 query-service-api/build.gradle.kts create mode 100644 query-service-api/src/main/java/org/hypertrace/core/query/service/util/QueryRequestUtil.java create mode 100644 query-service-api/src/main/proto/query-service.proto create mode 100644 query-service-api/src/main/proto/request.proto create mode 100644 query-service-api/src/main/proto/response.proto create mode 100644 query-service-api/src/main/proto/value.proto create mode 100644 query-service-api/src/test/java/org/hypertrace/core/query/service/util/QueryRequestUtilTest.java create mode 100644 query-service-client/build.gradle.kts create mode 100644 query-service-client/src/main/java/org/hypertrace/core/query/service/client/QueryServiceClient.java create mode 100644 query-service-client/src/main/java/org/hypertrace/core/query/service/client/QueryServiceConfig.java create mode 100644 query-service-impl/README.md create mode 100644 query-service-impl/build.gradle.kts create mode 100644 query-service-impl/config.yml create mode 100644 query-service-impl/src/main/java/org/hypertrace/core/query/service/QueryContext.java create mode 100644 query-service-impl/src/main/java/org/hypertrace/core/query/service/QueryCost.java create mode 100644 query-service-impl/src/main/java/org/hypertrace/core/query/service/QueryResultCollector.java create mode 100644 query-service-impl/src/main/java/org/hypertrace/core/query/service/QueryServiceImpl.java create mode 100644 query-service-impl/src/main/java/org/hypertrace/core/query/service/QueryServiceImplConfig.java create mode 100644 query-service-impl/src/main/java/org/hypertrace/core/query/service/RequestAnalyzer.java create mode 100644 query-service-impl/src/main/java/org/hypertrace/core/query/service/RequestHandler.java create mode 100644 query-service-impl/src/main/java/org/hypertrace/core/query/service/RequestHandlerInfo.java create mode 100644 query-service-impl/src/main/java/org/hypertrace/core/query/service/RequestHandlerRegistry.java create mode 100644 query-service-impl/src/main/java/org/hypertrace/core/query/service/RequestHandlerSelector.java create mode 100644 query-service-impl/src/main/java/org/hypertrace/core/query/service/ResultSetChunkCollector.java create mode 100644 query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/AdhocPinotQuery.java create mode 100644 query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/DefaultResultSetTypePredicateProvider.java create mode 100644 query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/Params.java create mode 100644 query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/PinotBasedRequestHandler.java create mode 100644 query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/PinotClientFactory.java create mode 100644 query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/PinotColumnSpec.java create mode 100644 query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/PinotMapConverter.java create mode 100644 query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/PinotQuery.java create mode 100644 query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/PinotResultAnalyzer.java create mode 100644 query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/PinotUtils.java create mode 100644 query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/QueryRequestToPinotSQLConverter.java create mode 100644 query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/ResultSetTypePredicateProvider.java create mode 100644 query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/ViewDefinition.java create mode 100644 query-service-impl/src/test/java/org/hypertrace/core/query/service/QueryRequestBuilderUtils.java create mode 100644 query-service-impl/src/test/java/org/hypertrace/core/query/service/QueryServiceImplConfigTest.java create mode 100644 query-service-impl/src/test/java/org/hypertrace/core/query/service/QueryServiceImplTest.java create mode 100644 query-service-impl/src/test/java/org/hypertrace/core/query/service/pinot/PinotBasedRequestHandlerTest.java create mode 100644 query-service-impl/src/test/java/org/hypertrace/core/query/service/pinot/PinotMapConverterTest.java create mode 100644 query-service-impl/src/test/java/org/hypertrace/core/query/service/pinot/PinotQueryTest.java create mode 100644 query-service-impl/src/test/java/org/hypertrace/core/query/service/pinot/PinotResultAnalyzerTest.java create mode 100644 query-service-impl/src/test/java/org/hypertrace/core/query/service/pinot/PinotUtilsTest.java create mode 100644 query-service-impl/src/test/java/org/hypertrace/core/query/service/pinot/QueryRequestToPinotSQLConverterTest.java create mode 100644 query-service-impl/src/test/java/org/hypertrace/core/query/service/pinot/RequestAnalyzerTest.java create mode 100644 query-service-impl/src/test/resources/application.conf create mode 100644 query-service-impl/src/test/resources/log4j2.properties create mode 100644 query-service/build.gradle.kts create mode 100644 query-service/src/main/java/org/hypertrace/core/query/service/QueryServiceStarter.java create mode 100644 query-service/src/main/resources/banner.txt create mode 100644 query-service/src/main/resources/configs/common/application.conf create mode 100644 query-service/src/main/resources/log4j2.properties create mode 100644 semantic-build-versioning.gradle create mode 100644 settings.gradle.kts diff --git a/.circleci/config.yml b/.circleci/config.yml new file mode 100644 index 00000000..4a06e420 --- /dev/null +++ b/.circleci/config.yml @@ -0,0 +1,137 @@ +version: 2.1 + +orbs: + codecov: codecov/codecov@1 + snyk: snyk/snyk@0.0.10 + +executors: + gradle_docker: + docker: + - image: cimg/openjdk:11.0 + helm: + docker: + - image: hypertrace/helm-gcs-packager:0.1.1 + +commands: + gradle: + description: 'Run the provided gradle command' + parameters: + args: + type: string + when: + default: "on_success" + type: enum + enum: ["on_fail", "on_success", "always"] + steps: + - run: + name: << parameters.args >> + command: ./gradlew << parameters.args >> --info --max-workers=2 -Dorg.gradle.jvmargs=-Xmx2g -Dorg.gradle.console=plain --continue + when: << parameters.when >> + setup_build_environment: + description: 'Generates cache key from a hash of all gradle files' + steps: + - checkout + - run: + name: Generate cache key + command: find . -type f -name "*.gradle*" -exec shasum {} + | sort > /tmp/checksum.txt && cat /tmp/checksum.txt + - restore_cache: + keys: + - v1-dependencies-{{ checksum "/tmp/checksum.txt" }} + # fallback to using the latest cache if no exact match is found + - v1-dependencies- + populate_and_save_cache: + description: 'Downloads all gradle dependencies and uploads cache for later use' + steps: + - gradle: + args: downloadDependencies + - save_cache: + paths: + - ~/.gradle + key: v1-dependencies-{{ checksum "/tmp/checksum.txt" }} + +jobs: + build: + executor: gradle_docker + steps: + - setup_build_environment + - setup_remote_docker + - populate_and_save_cache + - gradle: + args: build dockerBuildImages + - gradle: + args: jacocoTestReport + - codecov/upload: + flags: unit + publish: + executor: gradle_docker + steps: + - setup_build_environment + - setup_remote_docker + - gradle: + args: :tag -Prelease + - gradle: + args: publish dockerPushImages + - add_ssh_keys: + fingerprints: + - 'bd:a2:a2:90:46:6c:51:d0:af:8b:1b:c2:d6:e2:f2:e2' + - run: git push origin $(./gradlew -q :printVersion) + validate-charts: + executor: helm + steps: + - checkout + - run: + name: Helm Charts Lint and Template Render + command: | + helm lint --strict ./helm/ + helm template ./helm/ + snyk-scan: + executor: + name: gradle_docker + environment: + GRADLE_OPTS: -Dorg.gradle.workers.max=1 # Snyk doesn't handle parallelism well + steps: + - setup_build_environment + - snyk/scan: + additional-arguments: --all-sub-projects --policy-path=.snyk + + package-charts: + executor: helm + steps: + - checkout + - run: + name: Package and Publish Helm Charts + command: | + CHART_VERSION=$(git describe --abbrev=0) + CHART_NAME=$(awk '/^name:/ {print $2}' ./helm/Chart.yaml) + export GOOGLE_APPLICATION_CREDENTIALS=${HOME}/helm-gcs-key.json + echo ${HELM_GCS_CREDENTIALS} > ${GOOGLE_APPLICATION_CREDENTIALS} + helm repo add helm-gcs ${HELM_GCS_REPOSITORY} + helm package --version ${CHART_VERSION} --app-version ${CHART_VERSION} ./helm/ + helm gcs push ${CHART_NAME}-${CHART_VERSION}.tgz helm-gcs --public --retry + +workflows: + version: 2 + build-and-publish: + jobs: + - build + - validate-charts + - snyk-scan: + context: hypertrace-vulnerability-scanning + - publish: + context: hypertrace-publishing + requires: + - build + - validate-charts + - snyk-scan + filters: + branches: + only: + - master + - package-charts: + context: hypertrace-publishing + requires: + - publish + filters: + branches: + only: + - master diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 00000000..99aa59dc --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,4 @@ +# Each line is a file pattern followed by one or more owners. + +# global +* @buchi-busireddy @tim-mwangi @avinashkolluru @inespo diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..b7a3b059 --- /dev/null +++ b/.gitignore @@ -0,0 +1,25 @@ +.gradle +build/ +cscope.* +.classpath +.project +.svn +target/ +bin/ +*/bin/ +*.iml +.settings/ +out/ +.DS_Store +test-output +*.log +*.patch +*.log.gz +*.code-workspace +.idea/*.xml +.idea/libraries/ +.idea/dictionaries/ +.idea/codeStyles/ +.idea/.name +# Local config to handle using Java 8 vs java 11. +.java-version \ No newline at end of file diff --git a/.snyk b/.snyk new file mode 100644 index 00000000..05716d8d --- /dev/null +++ b/.snyk @@ -0,0 +1,13 @@ +# Snyk (https://snyk.io) policy file, patches or ignores known vulnerabilities. +version: v1.14.1 +# ignores vulnerabilities until expiry date; change duration by modifying expiry date +ignore: + SNYK-JAVA-LOG4J-572732: + - '*': + reason: no available replacement + expires: 2020-07-31T00:00:00.000Z + SNYK-JAVA-IONETTY-473694: + - '*': + reason: no available replacement + expires: 2020-07-31T00:00:00.000Z +patch: {} diff --git a/LICENSE.md b/LICENSE.md new file mode 100644 index 00000000..cba6f6a1 --- /dev/null +++ b/LICENSE.md @@ -0,0 +1,660 @@ +### GNU AFFERO GENERAL PUBLIC LICENSE + +Version 3, 19 November 2007 + +Copyright (C) 2007 Free Software Foundation, Inc. + + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +### Preamble + +The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + +The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains +free software for all its users. + +When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + +Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + +A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + +The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + +An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing +under this license. + +The precise terms and conditions for copying, distribution and +modification follow. + +### TERMS AND CONDITIONS + +#### 0. Definitions. + +"This License" refers to version 3 of the GNU Affero General Public +License. + +"Copyright" also means copyright-like laws that apply to other kinds +of works, such as semiconductor masks. + +"The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + +To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of +an exact copy. The resulting work is called a "modified version" of +the earlier work or a work "based on" the earlier work. + +A "covered work" means either the unmodified Program or a work based +on the Program. + +To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + +To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user +through a computer network, with no transfer of a copy, is not +conveying. + +An interactive user interface displays "Appropriate Legal Notices" to +the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + +#### 1. Source Code. + +The "source code" for a work means the preferred form of the work for +making modifications to it. "Object code" means any non-source form of +a work. + +A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + +The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + +The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + +The Corresponding Source need not include anything that users can +regenerate automatically from other parts of the Corresponding Source. + +The Corresponding Source for a work in source code form is that same +work. + +#### 2. Basic Permissions. + +All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + +You may make, run and propagate covered works that you do not convey, +without conditions so long as your license otherwise remains in force. +You may convey covered works to others for the sole purpose of having +them make modifications exclusively for you, or provide you with +facilities for running those works, provided that you comply with the +terms of this License in conveying all material for which you do not +control copyright. Those thus making or running the covered works for +you must do so exclusively on your behalf, under your direction and +control, on terms that prohibit them from making any copies of your +copyrighted material outside their relationship with you. + +Conveying under any other circumstances is permitted solely under the +conditions stated below. Sublicensing is not allowed; section 10 makes +it unnecessary. + +#### 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + +No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + +When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such +circumvention is effected by exercising rights under this License with +respect to the covered work, and you disclaim any intention to limit +operation or modification of the work as a means of enforcing, against +the work's users, your or third parties' legal rights to forbid +circumvention of technological measures. + +#### 4. Conveying Verbatim Copies. + +You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + +You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + +#### 5. Conveying Modified Source Versions. + +You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these +conditions: + +- a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. +- b) The work must carry prominent notices stating that it is + released under this License and any conditions added under + section 7. This requirement modifies the requirement in section 4 + to "keep intact all notices". +- c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. +- d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + +A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + +#### 6. Conveying Non-Source Forms. + +You may convey a covered work in object code form under the terms of +sections 4 and 5, provided that you also convey the machine-readable +Corresponding Source under the terms of this License, in one of these +ways: + +- a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. +- b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the Corresponding + Source from a network server at no charge. +- c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. +- d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. +- e) Convey the object code using peer-to-peer transmission, + provided you inform other peers where the object code and + Corresponding Source of the work are being offered to the general + public at no charge under subsection 6d. + +A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + +A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, +family, or household purposes, or (2) anything designed or sold for +incorporation into a dwelling. In determining whether a product is a +consumer product, doubtful cases shall be resolved in favor of +coverage. For a particular product received by a particular user, +"normally used" refers to a typical or common use of that class of +product, regardless of the status of the particular user or of the way +in which the particular user actually uses, or expects or is expected +to use, the product. A product is a consumer product regardless of +whether the product has substantial commercial, industrial or +non-consumer uses, unless such uses represent the only significant +mode of use of the product. + +"Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to +install and execute modified versions of a covered work in that User +Product from a modified version of its Corresponding Source. The +information must suffice to ensure that the continued functioning of +the modified object code is in no case prevented or interfered with +solely because modification has been made. + +If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + +The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or +updates for a work that has been modified or installed by the +recipient, or for the User Product in which it has been modified or +installed. Access to a network may be denied when the modification +itself materially and adversely affects the operation of the network +or violates the rules and protocols for communication across the +network. + +Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + +#### 7. Additional Terms. + +"Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + +When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + +Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders +of that material) supplement the terms of this License with terms: + +- a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or +- b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or +- c) Prohibiting misrepresentation of the origin of that material, + or requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or +- d) Limiting the use for publicity purposes of names of licensors + or authors of the material; or +- e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or +- f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions + of it) with contractual assumptions of liability to the recipient, + for any liability that these contractual assumptions directly + impose on those licensors and authors. + +All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + +If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + +Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; the +above requirements apply either way. + +#### 8. Termination. + +You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + +However, if you cease all violation of this License, then your license +from a particular copyright holder is reinstated (a) provisionally, +unless and until the copyright holder explicitly and finally +terminates your license, and (b) permanently, if the copyright holder +fails to notify you of the violation by some reasonable means prior to +60 days after the cessation. + +Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + +Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + +#### 9. Acceptance Not Required for Having Copies. + +You are not required to accept this License in order to receive or run +a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + +#### 10. Automatic Licensing of Downstream Recipients. + +Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + +An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + +You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + +#### 11. Patents. + +A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + +A contributor's "essential patent claims" are all patent claims owned +or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + +Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + +In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + +If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + +If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + +A patent license is "discriminatory" if it does not include within the +scope of its coverage, prohibits the exercise of, or is conditioned on +the non-exercise of one or more of the rights that are specifically +granted under this License. You may not convey a covered work if you +are a party to an arrangement with a third party that is in the +business of distributing software, under which you make payment to the +third party based on the extent of your activity of conveying the +work, and under which the third party grants, to any of the parties +who would receive the covered work from you, a discriminatory patent +license (a) in connection with copies of the covered work conveyed by +you (or copies made from those copies), or (b) primarily for and in +connection with specific products or compilations that contain the +covered work, unless you entered into that arrangement, or that patent +license was granted, prior to 28 March 2007. + +Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + +#### 12. No Surrender of Others' Freedom. + +If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under +this License and any other pertinent obligations, then as a +consequence you may not convey it at all. For example, if you agree to +terms that obligate you to collect a royalty for further conveying +from those to whom you convey the Program, the only way you could +satisfy both those terms and this License would be to refrain entirely +from conveying the Program. + +#### 13. Remote Network Interaction; Use with the GNU General Public License. + +Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your +version supports such interaction) an opportunity to receive the +Corresponding Source of your version by providing access to the +Corresponding Source from a network server at no charge, through some +standard or customary means of facilitating copying of software. This +Corresponding Source shall include the Corresponding Source for any +work covered by version 3 of the GNU General Public License that is +incorporated pursuant to the following paragraph. + +Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + +#### 14. Revised Versions of this License. + +The Free Software Foundation may publish revised and/or new versions +of the GNU Affero General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever +published by the Free Software Foundation. + +If the Program specifies that a proxy can decide which future versions +of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + +Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + +#### 15. Disclaimer of Warranty. + +THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT +WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND +PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE +DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR +CORRECTION. + +#### 16. Limitation of Liability. + +IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR +CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES +ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT +NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR +LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM +TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER +PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +#### 17. Interpretation of Sections 15 and 16. + +If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + +END OF TERMS AND CONDITIONS + +### How to Apply These Terms to Your New Programs + +If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these +terms. + +To do so, attach the following notices to the program. It is safest to +attach them to the start of each source file to most effectively state +the exclusion of warranty; and each file should have at least the +"copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as + published by the Free Software Foundation, either version 3 of the + License, or (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper +mail. + +If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for +the specific requirements. + +You should also get your employer (if you work as a programmer) or +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. For more information on this, and how to apply and follow +the GNU AGPL, see . diff --git a/README.md b/README.md new file mode 100644 index 00000000..f989bc54 --- /dev/null +++ b/README.md @@ -0,0 +1,2 @@ +# query-service +The Query Service interfaces with Apache Pinot Data Store diff --git a/build.gradle.kts b/build.gradle.kts new file mode 100644 index 00000000..e4cf01bc --- /dev/null +++ b/build.gradle.kts @@ -0,0 +1,15 @@ +plugins { + id("org.hypertrace.repository-plugin") version "0.1.2" + id("org.hypertrace.ci-utils-plugin") version "0.1.1" + id("org.hypertrace.publish-plugin") version "0.1.5" apply false + id("org.hypertrace.jacoco-report-plugin") version "0.1.0" apply false +} + +subprojects { + group = "org.hypertrace.core.query.service" + pluginManager.withPlugin("org.hypertrace.publish-plugin") { + configure { + license.set(org.hypertrace.gradle.publishing.License.AGPL_V3) + } + } +} diff --git a/gradle.properties b/gradle.properties new file mode 100644 index 00000000..13e3631b --- /dev/null +++ b/gradle.properties @@ -0,0 +1,5 @@ +org.gradle.parallel=true +org.gradle.daemon=true +org.gradle.caching=true +org.gradle.configureondemand=true + diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar new file mode 100644 index 0000000000000000000000000000000000000000..cc4fdc293d0e50b0ad9b65c16e7ddd1db2f6025b GIT binary patch literal 58702 zcma&OV~}W3vL#%;<*Hk@ZQHhO+qTVHwr$(CZQFL$+?np4n10i5zVAmKMC6WrGGd+F zD|4@NHj-D$z)bJV;MYNJ&!D%)v-fQ%q0JG$_z5GVUJTPg0MHPf1TvicY#6DXYBBQ4M`$iC~gA;06+%@0HFQPLj-JXogAJ1j+fRqw^4M` zcW^RxAfl%+w9SiS>QwBUTAfuFAjPXc2DHf6*sr+V+jLQj^m@DQgHTPmAb@F z8%GyCfcQkhWWlT31%4$PtV4tV*LI?J#C4orYI~WU(cSR{aEs^ycxY`1>j1po>yDMi zh4W$pMaecV*mCsOsPLxQ#Xc!RXhpXy*p3S2Hl8t}H7x#p5V6G5va4jV;5^S^+>+x&#zzv4!R}wB;)TyU zE_N~}nN>DTG+uZns%_eI=DL1E#<--Sccx30gvMT}^eu`2-u|{qQZ58(rA2aBYE*ZD zm|*12zg*@J$n|tbH%Mp|d|O9W%VT~xG})R=Ld5z<(z%DOO6=MF3Xh-aF%9Hf$?1N9%8Pkev{wun$jZ2 z^i*EhRt8Ve<7`Wyz~iMZDye+XVn}O%qbhV`wHL+%P+n)K&-UMuZw^RRfeQ)%K=k*m zq5l7mf`4K_WkV5B73~MxajljrjGiJqpiV#>0FkyyrB)@HY!;Ln(7JJ*W(>d5#^ubU zVAkTMs*CHzzvUa^nRu0*f-(ek+VZw+@P~}a;;(K=|!9Mhv(~y-mlW);J zb&bB=vySHG`u?j&_6dh^*se*l_B3avjlE|!!Cb0pXyEXRbLy*@WEQ4|)M<`p8Q!rfDJ2RI!u1hPzNjy&)(kcY~GaD6?)7#dCbm`NFh?Y_g$#!+Qrie7%<7P}<-+W@{sxi4JYI{iY zk0(>m$DxOI=~-&eXf2bfh^&(U@o)>(iA1_wJ%B(+nFH+ceib%HEck32QL=J(BNFh`f>St1%llF8chX7#cp*;z}& zcTeXkwsXhf+e;##!FS2yi=2cChcYfzm$wQJ z9%4kAq)wLHf5wfcj!A|xDsAiAOHRzf*)Z-|daN9y5jK-*R{Q0?xaSX-3m|WeuZ`BJ z>eTi@uQ{OGSDIJ#Iu@JPtOy!C?q)g*6SHORg)eAJGh8b-I*X_+xNqZ|OXEsQ-RWte ze`zjjeV9PpE3ac2za+Rs=PA;%QZ>T{x(TRzwWLp_X^2yC-DOEMUy5So!npzL&-@}u z#>uK#&`i&c%J$!bsntEJhY@rF(>6eY;6RoI5Qkn!&<80X5+1(x$T|wR-ad?4N1N^a0)nBj#&EkVvQ?I_+8t*%l#VK&I?uo$ERI1HMu4P2rLMeH%m3 zZ|HA^*O^dA$gb$`Cw;z9?G?m3@nH6TNYJ04Fd-M2wp8@(;vAvJ ztFoni)BLwncQ3@cO*^+6u;(&D<;N;RKb)_NQ_Qu&?@h3MWvo>6FHG%%*smTwj3;dG zQJnT7Wb?4!XmV^>N@ZkA7Jv9kAfD-gCHu2i+!A!}y98SO><8g}t;1JOOxj>#l zM!?y|j5fR3WY2(&_HSGjgMa?Zif<M@d8W z)4>Ptm@zj|xX=bbt$=j}@a_s|xdp6-tRlq6D|xb_;`9oJlkYF1AH%?Pzv$eIAogMi zf(_H*5t({Arfs5XAPj46pjiudQw?dulW-=OUqBVa)OW9E;^R+NDr&LES&m_nmP>Ga zPf)7_&Gn(3v1qu_a^qW9w4#XIEfgiHOQ(LDi=E&(-DcUSfuQE0`ULsRvS}fpS@<)3 z|CbQSi49rU{<4|XU;kiV|C7}Gld$}Yh5YXjg^W$~ovobybuZ^&YwBR^=qP3G=wxhT z?C_5Trbu~95mOoIXUmEOY646_j4ZL)ubCM{qFkl1u*%xs%#18a4!(*b<&edy<8t2w z_zUxWS5fypUp9ue+eswoJSyv*J&=*3;2;q9U?j>n^q?)}c8+}4Ns8oToBJgD;Ug=y zOa0>{VFrLJutjR{PJmm(P9lPzoPi{K!I{l)pGwDy59p-uxHB9I&7zl11lkCu(}*A< zh492AmxsgwEondBpB^{`I*L&Ut40fjM^JS8VdAWQMlwc>_RUM5|Mjes!36DGqW`xs z4tU4`CpOk|vew8!(L}fEvv5&-3#GqZ(#1EZF4ekDQ@y*$tMDEeG?nOUiS-KXG=rAZ zHUDlMo@X&yzo1TdE6b6!s#f{*45V-T3`e2)w5Ra3l>JWf46`v?Y6B&7*1$eS4M(3% z9C~G@N@RXm)8~EXL*9IObA+PwD)`%64fON_8}&pqjrg|2LmP{W^<0@W`9s^*i#F}V;E8~`-}(4@R4kz?t(RjA;y-r%s^=)15%C> zbF;NZET~nybEsmUr8sH^Hgq^xc^n$ZP=GcZ!-X-Go7J4nByj8%?aQ`c{88;p15Kf>|0h+5BLkM&@KI-(flp^npO3MC~W@Uyjv* z6Hu!4#(NtZJ0*;_{8^xcLrC4-zK$BVo7S5V=eg?R8P;BOpK3Xwms+Jt-8R6us zf_rUHFYHn~lu!)U$e$#%UBz7d8YS;mq}xx$T1PIi=4={c-_cY6OVc<=){mOVn>~J$ zW*2PB%*40eE^c+d=PP7J@bqIX_h4u6b6#W|ir<;IlR`#s`Q*_Z8Q?*s_&emuu8D;NSiPX9mK?>$CwcbjhCuv zO&u(0)@}8nZe=Fl*0uMri02oYDjs#g$OHCZ6oTXV2Y0TrZ}+o%{%i)OAJBj2xHC|F5o+`Qmq`$`2EaL=uePwq%k<;6S2n=w%_9vj$8NO|{` zTEg*tK8PU#DnQ#dQ2mMJaaL|HV;BCn?eQ%d0vY@S7Pu@7 zsf5u`T=bL7NfyYO?K^PR_|jap@K|qQ zmO8CK+&O3fzgEnp2|_=^K9ln~QhxjgMM>EQqY@k@@#np@FnZq|C{EyEP7^NurUm0q zW5rKmiy%__KE>YItATyMhE({0%ve10la=mUd<^AcB{T_$Y`2_N-x;F#3xTORXvhPZ7psmqhXy?WxxB5w!m*4&Q;?t$4Kt?m_em-htVDxora24&6~5z$MG(RT{trtp(L( zy&VDT{@p9_DGoq+I|abw$E!TyTO7j6dWQ25dqdKV*z3E?n-p|IG42ZUnNok? zY4K{y{27bUT@#|Zcni!tIgjE`j=-0rl(tVlWEn>5x7BJBkt0iw6j^4n1f2i^6ebo; zt^&Yb##}W0$3xhH&Nz*nANYpO$emARR6-FWX;C?(l7+}<97Ay#!y%BI6^st=LaJ>n zu{ORVJ9%`f*oy85MUf@Fek@T_+ML0-0b$lkEE2y8h%#P^X6+cn)IEXa@T7CQ{fV z-{^wJGN*+T!NsAH@VNM3tWG;%y{pVF2m z2*0+i?o40zSKVq_S18#=0RrJIse+;5cv#a`*`wNs+B%Ln8#e0v^I>7a_33h?lHo14 zg)CbDfGMyH2cj%7C`>|Rrg;U?$&y!z(U10>(dHKQsf9*=z)&@9u@w%y+e@*CnUS|E z*O^cQqM*!sD|e!u(yhXPi$Sl<$daf3sq@Iexafxt3F#2R&=cK z!gT-qto{oVdGUIxC0q`tg)B-Zy(pxGx}&svoA}7p=}jb3jEjQ!v6=afKI!2`&M{#tY$~3LR}#G#U2up2L{} zMGSX>Yjg6-^vWgeX0i;Nb0=gQmYa!|r0rRUshm2+z3AlehjfTqRGnRAmGhHY3`R_@ zPh4GAF@=nkRz;xMO3TPh$)9Iq?Fs5B@~)QIntSyeBy^10!ts?9Z@tK&L6xJd9 zNzaaz6zvrtr&MPQ@UD)njFUtFupwB zv+8%r`c@#asm}cKW^*x0%v_k3faHOnRLt7vzVFlqslue32rt(NNXnkS+fMSM&^u)8 zC`p{on>0pf=1id|vzdTnBLB;v%*ta`o_lzj21u+U-cTRXR%sxE%4k<(bU!orfsJ&v z3FLM2UT_*)BJm1^W;Z{0;z^_e=N&QXSO>rdB`*cp>yGnjHJt$ zcJd~52X&k1b<-`2R{bqLm*E(W{=|-)RTB*i$h4TdV12@beTkR&*iJ==ck*QlFiQ52 zBZ|o_LP06C?Sgs3VJ=oZQU0vK6#}f9gHSs)JB7TU2h~}UVe%unJA!URBgJ# zI~26)lGD4yk~ngKRg;(s4f@PccDZaL{Y=%6UKHl&k|M@Zc4vdx-DX4{belQ);URF? zyxW+|Ziv}%Y!sFdY@YO))Z|f34L(WjN*v#EfZHn6m)X@;TzQ@wIjl4B_TieZY}qY`mG}3VL{w?; z&O>sZ8)YnW+eLuW@rhClOOCZe2YP@4YWKN?P{c~zFUj*U?OayavPUo!r{uqA1<8h! zs0=rKKlwJYk~34F9$q6fQ&jnw_|@cTn{_kA8sUZ#2(Lb@R$NL*u>08yYGx{p6OeX~ zr7!lwGqMSury(v5=1_9%#*MORl2apGf(MQIQTMN35yE3l`^OS7r;SKS6&v-5q}Gw* zNWI*4OKBD&2YbCr8c{ifn~-9w-v+mV49W+k)$jjU@WA+Aok01SA#X$Sspj}*r52!- zNqOS<0%uMUZeSp+*i1TEO$KGKn7EwzW=s?(b5X^@3s5k*80ns2I2|bTHU+bWZ$x;j z`k@>)1G#JgT=F!8awgol?DqK^S4R*g?e}2rOYRVMUKKxSudO(hOLnnL zQqpxPNouLiQFYJs3?7!9f6!-#Pi83{q3-GgOA|{btKup4fYDu-JFOK~Q1c3KD@fdJ z?uABYOkHA^Fc~l0gTAy4geF<-1UqdS=b=UM6Xi30mPhy1-f^aQh9H(jwFl5w*X`Mh z=Ee5C?038GEqSVTd!67bn9*zQg-r8RIH3$$ zf8vWEBbOc`_0U{b)t)Toa~~<7c-K_=G%*iTW^?6mj9{#)@|# zku9R^IDzbzzERz~fpxFrU*it;-Iu&m!CAtM&$)6^2rMyV4 z$+e!$(e)!UY(Sc9n6hkr^n&cvqy8}NfZz+AQc8fU9lNczlP>5D3qzWoR55YvH94^* z-S%SVQ96pK3|Yo`75D&85)xij9Dl8AO8{J*{_yhs-KtsLXUYqwieO(nfrkB@%|OyI>yF+1G?m7>X&djb(HBNNw3KX;Ma*oMV)cV0xzxmIy+5>yz>l_LLH)VyRnYYce zw$?q!hJzX0TlE0+o5QJDM~sPrjVCN7#|32#rUkc>?-eN6Q0RqQTAl~`&isrQg)ass z+x5XapaYh{Dj`+V096?w)w2!Cnmh?x1WmFC$jEFY4;V)XAl3*tBS)V)3TbL)g46_g zCw9pl^!3OCTOcaEP!?==guEAw;VZ}fE6K-;@qD-Rx~td+j(N>)Wv$_mqFTH_wVZNEEuDG!0T`HXLsf+_E=X3lw4`_&d5&YMl%H733ckO){vZm znFLS`;5J#^`5~unet`V#*Y5In3yb|Ax z|A6b^F37!_z$_{6h{7l~<{u7{Fx*A*#zw{GD)6e}n6f<|)&7`S-txiz3Jm4S5hV&8 zm|Ncc{j_~`^pQ*I#w21;(jwi8GnH4efO;R|r4$tH~i;Bcmp^sP9) zjhJne@yzU&XvFNoc~i(wQ?nE`o6Hk~!;x(%xh7?zvigH2g`!v8L-vEN0DvV3?m( zSW(TZ%2AWf`rS}GGMqUj!8yCp#|fR--Vxfj=9}YD97Gocdj=S z0zkF-jsO>EcPTB1zRO$++k^bH%O`=UkHdHT^5?{$)ot<-K2XIE7js*4OjF)BsVjCJ z*KN)!FdM*sh=fB$p8*EzZmGJp?B_=a-90$FI{S$LLjBU$(lxUj;9 zIBszmA*129W+YE;Yy{J~3uyOr<2A(`*cu0IJN#tmUfz2jIWQi_h)_-V6o+5CjbX!1$lz6?QYU za&|O#F%~hmGUhil{M+J|*0<3&{a1%ONp-^!Qx*LOTYY}L!r9BbTxCjHMuUR0E(uH` z!b$*ZMdnB{b2vsb<&P6})+%O=%a8@~$fjbtfF@Z>^Q@enTOJ%VT)Rdc!wX|@iq9i}HaFZAeY6g8xGZY7h-r1sy_<#YU6}I?L zwvf0ePE5PKbK>2RiJOFO5xNhMY+kt`Qi?Oxo&@xH$<^Q;Nb(&rjPBAcv;XtmSY90z z;oIFFl%lDq$o&kYQ;aSHZHD@W({Y1hw<-I>7f_X8wc?%hNDlo~Ig;63RlHNhw~#R3 zA*f5D_Qo`4_ajY4Gr{mLs*(Fxh(U%oua_u3r%`H!TI)@R!!iqV8IOhIOzI@=7QJ=G zV$(9mEVL(7DvPn0j%_cOZN|vvNg8*PHma`6+oS;PDz%iOFyo0n0e%$<#A3r~$=I0T zDL*{AREUGx&C2}?I9cVL`UcPyawTqA4j-4%Mr-4`9#8GX1jiJkKGpHVr1~Rj#zFaZ zqmE!<|1JCi!LDG?1^Ys62xz(p;Uu!QZB7!C0#piy1_9=e?^s@-sd1gs!h$;Q`TNtf z3N4Elsgl#={#U`~&}FNvH78MLjjavl1x*4pNVr338>%sfHu>bxo2#eZN2ee9q#*Jg zDk_=OBR;8t6=pBN0aj)&Nj}pzqqUYW(tfk?bXTdKbNQFSUMCyN-!b0#3?Z;ijzx$M z^Eo6Eq*NO!Y8K;84H4MHj_xwBYc|3>+D(PFj7ejhECG@5@Pk&8dG<)HwwO2~j7KV6 z0$s}=*D;ek#8$a*sxVlC_`qFkM0%BQQ@v2H&Aq@G9XCQt^^x<8w*=MbZV)@aPrrn; z`6r*&f`x&1lp)`5>-|-4%l&W4jy~LydfN;iq?Y8Xx>Sh#2Lx@FXo|5{WKp@y-x;)7 zl;;_Y*-Nu3pcH-)p0(tP~3xO_u~>HpCdEfgyq7V-!ZZ{?`6v_b-vx< zuu|gm5mG6c@D{FYMLuzvG+A2T&6&`n>XM%s`+Qtj)5XdpyFOnz3KLSCOxaCEUl()M z3b~FYqA3FT1#SY{p36h%M^gBQpB2QzEdtM9hMBMRMu{|rf}(;S85&|A!|Aj}?fMKaju!y>_AS}#hRe_!&%8V=6+oPPtE zOOJ-Rcrf>hNq@lG{{@$H?6ikt@!A2OePLe{MBIWSPz7{u(I} z$PXzD;leHG?Xl0FnWt+Wrkrk*|e3P~YVF@N$y&L929cc=#-!*k)HZKDo8!#+t|?9p0z1KSDKclB&M6~hN5<9~^DIltXKR$+iK*h9k$|@Qoy9H}PSI;b(v>w`8(k70@sfa4nRweeiwZ-syP3zPSsyK_8Te9*(FQdm+ z84ZDah4PGehH72w=Q8bx;pK5juT67rJKb|ovD#COI^l6z0eBidn$!Y?T2;5sN+vTV z$`%Edb<%-Oq@NPZy<2Z3m;$}!9JzIuVK6;fJi>>m3q!Lr!2xXRq+l0LvZIR_PNYrP57E#sCvD^4UU2GVr*Rx`QcT}yQanF z3i~!-2Vkk4S%4Hd2baDvrM2g(&1jZaA1!vLi!I#5wX6g^&PE`0-TovM(%wuaPXAno z`a&j{ai=TsgKpc1C3|)tY#!4>SPBbMnchi}glCBwaNE(4`gi}JY0;`|m`s{HtaP@& zHxwCt#2&z9A7O+=v>za}LW~}G>_tWo$dsRX)f1L=+tZF5E&RBA#jUC|N9ZPa_&z5= zekCOsIfOh`p(&S8dnkE~9#(;BAh8qzi5JYT0nP7x&Hga3v`XFdRN|$5Ry#mq*AN$J zV)l~LSq}2d{EJ@%{TLnkRVn*sdM{_b|4!x73|Ux9{%S;FPyhfZ{xg;P2ZmMuA*cMG zipYNeI7{u98`22!_phwRk|lyX#49r%Lq1aZAabxs6MP79J3Kxh0z1E>MzLS6Ee5u+ z@od~O#6yMa;R}eI*a|ZB$ar0BT`%X4+kyxqW4s+D3rV176EAsfS**6-swZ9OIPRZ& zlmIH>ppe;l28`Kd0z(alw^r<%RlDpI6hv)6Gs?GIpffKApgx^)2-6jAzjZE0BtPBC z0z8!#C5AP${zTF$-Z^v%^ie8LI*rvR+*xc=>fa;`SRUSLAio?qL;jVFV1Bw4K>D+i zyEQ}vyG2HTx>W?Ul&MhxUXK7n;yfN)QS`foM!4>4-(PGwxW!^^UyKOz(v+1BejI*& zQSkV|m5=JF4T0k*+|h|3dx`ZKBVX7H4{5iakAxnD#J=9igW@LS;HE_8$lZy1l|$wX zn<8-$u=7&li+^MB(1y~Mz7lj7?oYf%1k{wT#?(Mep094qqnPv7*OYkQ#7$pkU5U24 zzPLEwAb<VIp_uUE~+r5)jt(>>Bg48_{)twH$QJDSBrUS!j{lX z)SK$6dfLWt)c9%Cml+sRp*OHXB?e4hbYZQo!@=6 zBPTpi&6&atD*#Cn6f@5<>79Mq7o0^E!NH)bD26g}?@qg%*AYeE6Tec@F?y9Q8i}^s zz`)l`8>;h75!kL!`&*_hsX1%2)(lWr|7!}@gn%MfwY8vN0=pMm3WesCRv5e*5m4z|u(zbYCpuxO9$bY)hkL|}mRj{3dlRgNK)#PJp#vR=ka^TZ(tKVI<>M~ekIfd2 zm3UDUNW*ZvS5L|SF334|YD>LJk(EqgPpVxtzwclUNaH70zWDVt^1+cz|F?RdF4HHn z@4~Gs`lj!0dWi2n#>7C@B$Qf7|t{1!3mtrO1H7 zi{=I#^Oa1jJiFI!j>PualW+ncHJ)TelW$bv2MqUG1xK7R z%TsQfTn)7D3}XYU+{?Hq!I&fqi4>DmryMiO?!aN!T4fnwq2vsuB^s6fPW@u*h-JwG zNniJFR(RI*?5HV=tqO)lv}CRv_eNEBR%z}Vnftv0+DUH^OCODH#&;{+aw^1vR z-c~|Mk+o?j-^Z+rR4s z-gNA5guTuab7N`{Y@eT&)!xF8#AeetvQ6d!W4BlO;0#0TxS_( zMm-A-u+h7-PjmOQHlh{Hxn+J$jh?uEtc8RG8tu->og@ z86A%eUt+P8E3oLXIrq#K(nCF@L12>=DVT3ec6Vn=B^B;>D=O%op+0BT;T)FHZ`I93 z^5|bpJC_kB92`alM40Am>Yz5o1gxkIGRYQ)x^+R|TCK)r;Qyq6+~S9Uy9nr^nkvc- zxw~#_9eBBJcZNK0yFZxUK4h>u$8;4k-KpNTblRgS(y&u~u&J;O!aqAMYJp+(BED*d z^I#F7vPOEADj}Pziprs=a{%qgz#eso$j`At7pN~bDw%&ba-+4pI}T*?w-z^_~DfD~Z3Tg+#M#u{s&uRF^dr5RFZh7<|WNEG;P z-_SzXTbHc^yD$r;WJqqJkA7^(zN`nzQ5V16nG~Zobuy)a)(T@Ik>V!qOfw;e z)?AZXjzDJg%BkIEY&bm&BczLuWY~k}3Zyx#)jxg1A9R`sz!_dCb!|13b*3PiA@(E6 z9HmG2R>-YrW93UMQO}XE4loI(*er9J*wDUd1se!pzdpoB_v6^lQl}+!6e5MS`+bU#_b*a5Pkt;o+lOV4loyn2P z$3;z-cX>$R{6M4q%b}aMBF}6N+0RCE70bB;XwHV~JLO&!EB)Cgo9ta_>>Os1HNfaY z4PNu7BGhw`6}cm>glh6i^)Ja{rpLHix?C?u;(e&GI{?!E7$9hd*5c^iL?;6Kwn z@qbBE|3UMF|F$Ok>7YY?CeMzMes@CZJQ?&|R8v5M@XvW}jjxhjl`gzl;rvy6Nn9$K z;1TKGpUgZs`vR!t-sD~2ar{58-;2k`H(MIWr_cujtSCpjue(R z(a7R{q`G+;8qD8D1e?1zWv+pPFtk=k#>f`yqZo)3KwCBgABgQbq%hu4q}h+Bdyh?* z#Rlr*$38^Ru%m9FUTQL2Xy^j|f%*4H*{zWFRsMbs6@u{JM{48fq;F;QFV%6Dn!6X0 zEAr2G{RmY8;Jlmws#%7Hl_TvQMbLnN0KGK=9)1u=Vb&#V27UwM#U+)$hn#hlXxBxO zM~<3s(W;fe-0%mVWtZ)oN|h-01@5z=u(z!V>)I9-IepH|_q6NR_DA>2hxGKt-QX;H6(^FXwcBndi1s%qn2sH-rsuON7*ARP6Qt$2XIy3d#cn8sLh&7#USTFn3 zQm-o6-Bnofon2V;oq-v1@Ye@NuH$Z~+th}Cs>F7=H#=4PKLp%-!EwR&0`a}XL=br< zF>&?HNr}9ahB-EA7a({^_6`taBwmB~hJG)p>8r^vq0J_+o`sOq<{s2~2t}W&1f5`l zj;E0nmt?YRp{ONhti9{4&rvt5uoS0CO@%+Yv>+}ROQAGP3VLu^S4fe{ZRoGviEXMF zhM=I=Eg2~^5PIwEq{~Wt?inz13!axZU3knx_)Ey9<)z<=!TnCPHvs1l^spF`@INYQ zY|J1RWri-^D9mVY5Z{u+bXg#}3rUwSXX>&@PN+017W@!L5H8CvZf0wZxQ=UrHJ{Um z$Z;~3t6ARGql*O1^YY(h4awy!h_brE6&k9B&5l;ya>jDyW5?o$q~=1iV!t7#8&QOx6P zhQIm55sij*Ef-G_?k^$AjK2j?=QQ?^=r{MDaGZ7`Yo*Kp1uoZ=&5|O)D#xAHL)n9_l6-E!b zVV@8ny;`XU#X2((4cTmv5unmYzUmJ>Hm+Kvht&a+j3nr!sljTHUZn^0w@L|WKw2TO zRO>T!>jutIzNI5U_KL}vd00oi6$aJqPeJwq)lIr(2Gt#52i@sqCFaWC)pS$pYoRCK zd*$)r6FCClYp+n>gCqVF>x)ghAbl+h${~Mc_sQGk@+sR@b(88l zcx?*Usr}v|kV!RPfS%HK>Bn{7tdEV$CB5Z@=uy4>^(o(%@R|_7dq69s1(X_8szPZ! zSS~$LCX>-}F=io=YcY~9!vqo3&dh9_Mosio`zO6i|$&p;-9%+~sdYNrVE?Q8rS+eHx z4O$l|b3FUT#2jb(WU<`oKAjGQUsoCgE1(c>3byBNPhKeJ7f4S-hBRqRyePY)im;>H z)hyFuFTDqx*ZgXo$hn+u>TGs~=Bjqr3bhPmXG)v8){EU;N*58NKU5;EIZl z9%|JomX+b6M#jS2`B%~!+`EStMD{|y^P=`xPbD$o6;|!((h!+y%7Y{DuC!NCKDIN1 zER-J?vZ$2el4y~!-0vWjNRoC|ARB`IX@M&;?ZpULcAIu`zlH9 z&JK#H);Ij~fqoT{59}OI#ViA%!lPYyd@kHg*hyI;iMdCtw2&eLHOd1*N%2Y!BG*H_ zu@E?VbtZlI{7B{C>A^b3njh=KdF!=rQ!)oIjwkP{t^I{2q&emQ-C1&U&fPC_viACTbT;(A3qRJeGINz^!0N26vQ~o|#pmjp-Zq46%+{X9n zLGKqhLh4`-(*oDHqHU~-45_+pe(BICF$*0jD&FW?ED=vn=t?p9X(%AH9+;6NcJ8JF zASkf}LfT7Z3u*#i$ml`gKIS>3jrTla--x##EDM{w{>Iu9qV!x95ECU*W_O`q>hcCa zswU!;H3R{}(A6aQ(B)lImTF$BzF;$V_?It*+8ZeiZa|b8n_DN4jUfI0jIA6Q6*c0f(uq~DxrNm!$~G=Uz=qP*)?qc(}|7MQZT&B=Um zr{Lj_R7QJAlwD=CoYpjQsUyu1)C9p5CE)%3nb)~WtP;@6(qGG`*qDT zS(zM>&R<;Z23V|80%3s!`0QpTt0Ay;*xLJeE|DP5@x?a!1)`g= z-1}G_LxiiO(*?R*{(yH#&yl|Seyx6*+ETayQtv7Htk3WPvI;U!@h-e$)gw9>pyKmB zk8#$3BF-ou%=`9_3)Q`0ttk$cymvULFS`Khmjes=2(-QY@eVjJ)rSD)z)1No&o+dz zrGItPZ$QuD;Nqt~U{J?9VlM0g{kx!4$?!?=o?um>#7tjMzrLfv<@pI&cp*5H>XPPZ zu8Xh&6y7v0pGDiQqd-~tBjK%-SO8$8kG&44|{09|FO5BoNkV6~JX>g{b#NHJW?gmM# zhbcS|M9fDc44(seG%$hK#va#4YL98mddGDi2qr;@CeiWO!!`DrF<%=_^*3JgoZiSj zdEv30G5`7ex`XP4#6cG;AQ}(|>CcCTGiom^pc*j-Mz1_oGp4iP*>N125YeWCw#L4H z*>u2Ih8jVRJ?rOj-7KbU7KXpYs2UZf)Vf}(lsM(oiB>tgqX2tILJitw_x z&7gq;`b}qrL{lEA3DaXDOi~HQ!^?xxjjVW|#Z+Ek&GKA2dYgO@zB2V*eY zx>@D06X)(FUz3xz99V3v*k7x|wxiFxv>=N$1Chfp>CErJq)gnf=P!u-QKrYnulzdQ zP56u!AH2^QVnuxTJjcQtlflq>PSm4C!$^fv4V_XsIO2d=O8|J`4bUDtjBchJ!14~3 z#mgUPYF*Z?k;Y)Igdx3yQg8L)M=c%}p3!P-0KOuXI+{*LXJ&w)$gzxeTyr`)h-Nc! z`$xa<>T2pbuU0VR?#FPEM44XDRw+cM6U1R2aLQpGHX40=4Er=lp&2aN#P1IA3|r+L z?5jaRyCgN)b(KuS+(x9rPLLjY&4^YY{0T2Ai%`f0p}sG*R!}{DSf7GdPJ=C2MT1ND zUJ@#y06`CNc9n?13R2KY1K*SYeV87wG%bjcIbn+AR8*FS<{?wWomTT5@`}~z3bFAJ zLR-wmE$iwwJ-TnVEhl{{?+??DJ?DWk~VaX-L3-RLtprT2%z-GfD{UVBR~T}zymA0 z6VZ;1Qr%5q#+Oz#3)`D(%WVWWS4BW6%ZvAtt!u25FO@e{X`)_LH>p&pFzx(wvNEO- z!2$Z}`iynmY2j&UCmRNB)9Cn3MXRls&PFVHzkzr;)B^BCMY~6lYY>0rsKT zm4}RV`Q7tbn)Aseay%@-I6ZT~PBsO?D|>kG*%(PGo=|gZ#0zsmE})xxtAvaCe&$1? z(7GyH&^jm!cguuMo@CPA&-lrdE&Aq8GIOuUK9jt{K0ldcvJJp7I`ZMx-EYj$)hl~) zFM!U~HxgO+lb$1cIK-nvz<5OPs(@d4tB6DUa3?-bJ98|dv-kIdtMS;9BuLc{a~_wW zO$u`rNymsAeMH9zh(|w=<*V z&&B{&O0Am`<$iBa)>pNZ6cO`d^3B5%=gmsH(HYZw6!U(c@}#)19F}`BT+yOfamJY$ zYOmy2m^k+ADH2klhAJMLq;6>t3)NREUgk*cjJHg{NBkVhDORNK;v5362&NN=y*Ef- z$vxYTG5Ga{SI&C93^Gsu9G-osqbC9PbsC&@xxGlF?o{!rs9|YpEE?P8ix#yS`7JUy z%ez(_Q%I^RwPrW%rFF(+mE}rp#Wtg@^>O7T(@LFA7j{LNrL=XGDyB-|3<*mqLL_UA zUZz?ulF$5O59-WWZ!d@hRxC@4d6?okW%`1$#<5w9eh>4Cyr#xe5%VPG@TBe#HA^O} z1&q{T_TMTr($f<()ah%TXapiGp}`MAC7>0I=Cx*t+bXy+gMyk*#(A~ft=&4YBdQki zQ}I=c;etc@sD4?l`eYaksPtJnx5OUaZ6u;7p64DUuI`omrWjht5$8+cqb6Hw75WNX z@D(fl7tDl2H)H%QYyX3>cL0*DZPv8+ZgaP7+t_W}wr$(CZQHhO+qUig`^@>y%s1~j z6Y)pXii(P=SQS<4iS=aOnR(rqe#b*BR~GN+bMNQSnhcMHxhVf6D7_zYs}@oo$eK9sZig1_lH0|C z&<1W;8dh6lutS+|02t0VqRfh9R+%!~9YsQ>cw-uGi!YMSo?19?Sty(u{GRqmTx8Zv zLz|nph}CNn+4a~dDzMog(j+NForDvDjLwub!b;p@dLHSBO0kjaI0CPZ)8B2(HNL&A zdr8Pw@u(POF1J*groJ~!1|E(GmnR3L6`P*3C;v?R zDw-pBC=u%}<}P_);mn-_cE}am&b1_WlqnWVzFS;*NhwoOb%+#0nI|H*Bw6_0R(=Kj z;7@eEqYkW2OvWkoz|yY1gZAJw8=>KShthS*ANzYdDT61^AK)>0H%LV4q3}hw?bkA$ zF$tz;<5T59v0Zd$)unmJ{vu_7eGDP6+pe(H&n^3E)g^rB?pn?GT9l1gztAUpR*+Kvt=FE~M zq5rZM&9v>ww1mzrK)vx*0;;?tnqA@Q;FBC@$2~=gy#jW$bAJUNIl_YpT)``*9nnkV zF!&XBK8(PeQfnScH*JaYqy{1bN4MwF=&g2)`!Kuo165*d^1Sc_d{I4>6V=>74c%g4 zXE_M`b@syq%jQx9VRp@ba!rY|MRhr!S3bN!1RT}^I(2gXE`KT57Y;maGA&dHM#`4* zy%@6YB0A6Z^?fg!$4Gq0auM47(jE$Y4osH zhydBwQ-S~vMS7)hg;AC=MRf~AHZu|Ue*bk=ff`!Ol1%=|W-a+~l)QH04q^oeMZHj~ z8$8jQn(n1#O!_7sg1hi;{v%?nd&gK7tfN3I{A0j zcg`ISk^Ir4G=(SvV$v}DE(nE+%rgFkT%cu5VR0Qa^H4-xPC*7Y*+E8#xvyepS#xYE+FyIIi0|5$J%mKAB58%MgleT%Zx42e^L`TdA~Ips z=NvgHNpYZju?*J>oNcmd^(nFUc+-bu4*+9)qIwU^g?1_4-&-`uZm&f7F^1?@3IvJc{gnlh?no$E9jFIfJ8i+33;o-!b2hD@}}{o}J4{l{44v z3Cd{3Lj%9^E43SBXmIvwsA2_8sXgRu=4=H{j9R(fYcCzOXriTZ51l+HcXr@)^?rK* zmc89=w8MW+txdobBh`X4rMvY#vuv0GIEO67sgL}mIw$pNW6s8Fd=t z@58{pFs^Oz&g}CPr8EL~QyUjk&}1qyO4;-6m0MRd4J9T2r5_j+YdeKP%Q+jnWNdV| zUJLU&d%m|g&3B83R^8K^WM{0at+=9UdVAzTnL+CqdcT#($38|-fQ|BJbHY4vk=ANj zvX?ek_oYp6t8bQz-T){|-5OGrv`IGd?>X*h(s{MvQ{j>fZbx<^-)&(j8(N+z^sftB z;V$0+Wd0oUR^&)Q+2bHfLt#V~jZT$UPUbkd#vD#zZJ&huG+-;T%sU~ONA?a`Va|T%I0yd%0*Xr3>p#slVg7Y<6o&Bx856S zg;7Q>mCFF?xq_m}VG5`(0fIX(V=yvQ;xjpwNhrLFMui8xdBw2aFOvI3t6-NG3%+d= z>1un%A{1+tFrn2nu2%`-hiqYhXDga3%{ZVkC@ROtTcA;g*E@K4i_G1&^P#Pl_9*m& zwBVKqZhrf4bhw@M)78cm zBMB!;A)H{6h6AjEv&|DGxYRmY|e_ARf_dMIvm*-i4hR#IU_#A_QYP@L|sHs zo@Ky_Bx6e2??_k;7vjibD#pM*T7`h9V&s(moOn_x^N|9{gkOtFY~gDqSo+7meUjBR zK2jiOsA%PwD|1*KC^m(-WZ5j2AWi;81kCi5t)KouHKt|R6m{m!!n|4YN3yyBo0mSZ zN^yj9>I9Y6dI&$!T7&$%3Ccxua0-&DoNJFbCV%1;h^-U&1Q+@47qrKld+QNGOrh{a z27PfD|L06XuL1+ZMc{_7rB7bd&WD%*lbypj>|K|<#2#t+qPXH zTm`5QC)ktLW5+G&4lhvX8DgOK)|mvQ_b^HuJ&=wP%Z6%;E+Bx|#|Q}vOoGR(jK}sD zk9x4A-V%Hs#G>J5XldT-W&|Kv(!mEi;J38jdK>L|Q7~<_no&|~Fdc~yhC~%VqQc2e z2|pva(YaxgaE`xa5=u=WkhtI|f`XRHhA6|>1`)hDgYzt9kByS$l*OQ2O-a#Iq%SLz zV^&-mn{^KrM6&BueyiV}>&)9rr)de2+DkV8##PSmko(<`nqPVr^n_V~UoIi`_yVdB zzcj4`b5QijKNrR%0AYi<`{NDb!y1^#Pv|K2N8<&wlO7-JDa5Yp?eM)pf>PbMq@)Wr zvki0Y1yLr2WfDb`RBPgq^VC(KH;ofR#9^i$TaMi9J6p5TP5F8<&ofnvL|`*(;urRO z?0k?7WiOd&^v);ux~R9Hznc3moOxE+O$lYV0Ku|hENFV~?Lt!QZlMNp1%d#^Rv!pC zfq`*V)n<`Io8N2XGBOjLYB}#{g#>o-?Hmb6$VyvSN@nI?3{y-pdNvcYe%&%CIeh?s zWfdM@$o~R)P|M>ElHW0BAMI=ozdH-Fle#Dvq-bpmPg-!rDY|1*o|1dvDh9{`{gt%n zFemDyrWMrywXJ+rV5r%UR~0T*75`i&rM4=%7}ulJyHu{rZw;C$r+nn@cLyLgh0d-A z(3SS5tW>ZK0in8bOH$vW>HIcipgUXYGUq49#>Ixff27cCfWz$0vR4Dmq}CBw<~4Sh zDe9adM$vVItE_)3FJT5Bgk}V=1g+Qvf5+hpxwh78gHe$<|r1^Nh?B&_~xSq+nVdY+~dc4GJ?e5EpV zXs-H~6poV`Kh5kok2qSUMD?0&WXKs7T0?Z-J8zti^WD-*_fo zhAqM(p+l2*(|b>aZC+?aK~^_VCZkP0>}TxdEC-KcmAx*YS?wTK?cW>PjS+NxM==Wg zg}e_*NcH%2(J=+WVL+;P)kz0c@48^4ZuemowCO=rriJFSD|#7D2oO{}$kCbL0#0%2 zQe&D2wwJ3%d|+L`bE=&9k_~(BOe$ZFap$YMGL$&$D0=mJ9n%He#RRlC3f=|WyrI0L zA_qS=kzzw8f_QiJYg_b?xA6UgBS0tT_Y$!9>(J-Q|m=O+8+wIPlb5i=-aU~kBf=4dD zd6Q8*EoKqRCcMNO5q%nez-osz1XT6PZ+r7r7A_{!vpDIfE$$yCUU66H>HOUO>u7aE zs*>|KS24COy<^3O^xXssCI`2iF%;A&7{j1UDk9dvv< zsUbj2HMoFr%{j!bRrmyt%jM|4UKza#}%Vf*_fEvi$*6J-h}oRdsdinr_W1-)p24zB*p9tfDdUa27+yi5W`#8+~eE_NyvNZgCP48jF8P; zgYS#IP!@sLe^SeCy4jwre}sC*A4Vk3|EzFISR4QEai+j{bL%-B#Nlt4WJN3eh+Uo) zVtaBF&A%PtbaaH`A~$h0I(5#|WARn>4Hbxy+Jn-$LdJWL+&({?oGdxCC?@gw`D44O zZ)fV$Yi@4u-zGU|!cfh6Eq?2C3Nn%TL2ZoA1+5g5O#q6$QGS|1C!;H{)PU?dDlSGU zLGKxOa;zm!C-Zghet4U7l(%LaEQnKF+>ECNt@`F07q-JO?%%X~*k}Yndc#f*iq0`hgW#iOvymYI0Ur}T;8qZ+%f1paM#v7e! zUS~+CMQqEbYZ%Ix+4iKAGa>>DLya7d_5zQo_zm&bP6F_75Qk^L7A%?p74r#_+3V6R z@m)%h$SZlQi)PpLLYyya^FulLkrPuM%+!YnWBCX|f#M*ph-`6S5IH3F;Os;ZZ&cDq z<~WF?be7SQre3OHq63A%t27ee4>e--Q*N)lFkAI_P@Yoq?Bd0s)IIqLY)xtXU`k>x zfQK0;b2n0v{oPhQju4$`uD>)Syw=X_l}YEfVF8)awhULL-sJNdq;z8~(wyAEW&sDx zxqHk8ufaTXHNnIUP~eE&k>D!g#IVt73wHY+ugJwtuy74u* z1qC32jRV4EWbz*0B5d5qGm7FB;V0Z>C63g4n6hW?!BfHU=hqZbuGx&ccdij#|lWok>4#{m^Fy>{`JdOS zjIM(Tuf4sYrJltP%2vW!U)Mt5hd5_vs^{onYW=T{?nF6taSUF>uPLMY@>8Y#vd&fU zJg$MqI>EOkIj}Gpu%?+k{%zvX7zqvMeuMm%YD6eLoHxL?e6eW>J~|~Z&lHB^r_Ag0 z{*SlMeG(r}i;4UY6e1TDhAnY@tyh=*e7>7?vlwq>&py69o*=hIE389P!iE)Fe1v;HN5fVGS&&jBzQk*Q}Rb%{FF5H zt;vL@*J)TU^_AGy%>+&9)+R@9XQHe9%Cr#w>Q$NM0~WAiktZl>9`I-Ypc0UjVU1rn z_FPNg@88w2iz;NHBJ8)vM$%1oe7QzSs;NxSieG5h->Cq6`M#YqU;tx=1hYym@h%fi zzWLOcEgsbZ>jW|mkR)qpxv-Z}J6iTzy?L3sZiv!nbZ3a;A~Hu3j6-^%FcrouBW^*9 zwOO;eD$2J8edza=ZDF&}5X#=B9O(;A4zyM&5yTvxuoqjP+FZY!ZYI`_D=;czTJF-e z1-$=(BE%9~*+c%p5UT&+n27&>tc8D77L`o(F_e)w^~KRuv4^AdNE-D~2I(p(SCPRP zc{V^gm}JdYd(~~{max0nhdPp5j3){eJ z$LuzR9V>9)451K&?27Aps3vsd_bU(1EDOA~g;@vOO2Ty`4MFO9u=`!_wEKPQp>9L& zzuUbCBGHhsuxYBy-^Uw`)=n5pSF5)!a6qfH$^u&=0GA(}B-Ixjj|ce?Bp(~$q^7BqWU|H8 zKU!?5P@+8*_63=^7)|h<=`vW)2%PZF(`Q0Lr0x5QLjWKIQZB9)OOB_ISy!Mx`E{lJ z1=1d&Ic*{{_h#6sNH^Hz)~vB7gCTbuUkVrOm(pCye57-0NUsKiFMeA#@NBB+F5<+s{(H7mQAPQx`OR z8xRz&uf&f&-?8paW&Q%EHCq$Lv~}lCIW%s>Wxj&$Majn9D~*{Yn8jBZ3b9-fuz!82Hn?&ZI2_JZYAy$kb_?7m*?J z7EcrbL2*)gJ(Wl`yg~c)vC1w>dR$LezB90-T0%EZo|KuQOirNpKJAd) zr+w2F#9m@j64vevMEx_$M}ESx!oajKsI7|Q#c-fWRsS7nAgMlxf$l`eoBx6_u1LP` z5wVEEAYNPN*iXKJza7=aP+z_r$z;5})SQGWl0SrU7qL5T>MpzjZPVq~an6pv29s{gIn1Rh z$*Vp>0p=05JN|HRiyOCbpgpZ@;9Xj|o3DNV!%Xn6t3hE>(=2$dFuEx{osGXYv`m73 z@j>86*-gsSS^3mR)HB6Bj1fy+E{@9e{bcRLU_iAqDzdQUqG)+sqNE`h1 z$3w4loJ+!{F4NdK!E7Vu6L}j5d=VnffP!j5b(b5(u}{;?o9PB`YLsrEsOeE8IUM8F zj!}~kYF^$l^i7CS$AnS+a4#EnWySE!?hNnzWe>=ETyc4WCXpNzZ9R&vLWR9n2)aFS zeT`FE>ZzLpjPr*qdk%A3<`U8cpr3K~?abpqM})l-j}Hz+9tJcw;_-BzCtzpYoNVk^ zd4xI@9~_|+Y_6S*Kx+?A$c)OqC718Wiat0Sl%qFMhix0?j{gw1XO9$zQhjjoeDj|S z8hS*$R7Ol=9=Sd-9s*OgZAC1sMC*(iexn}3CMYJdNZu8^S5)5@Bxo7ayS4fG2D@ns z(Y9t_4DB(20CAx~=eL=RM?RRc4|4V{?Qe z=>g3K7H^2nxwHm|*N+zhk9ET-=0ak5wZAxM<)DFY7|^q+@a_=>AXMj@vZG11mH%nQ zn9XfRt7)!V&u0~v+`DaED;5~WX_cQ6~@iQ$)`#bKdk&+uvYtZMGQ??&zRmpw zbc5donS&q;jPQE_7rh5{ONJKBM;cxKH>r!f)K=VDf}bfc1B4Nv3C}__D{B|kU4Q04E((6!W^q+&Xb=m`c#S!$wEEp4py_0 zDJO?v%A16hzF;#-Lt+DUyec?VXUS?%21=wBiJ<}TTQMa&n$+5wnHr4sni_Hb`tFO; z((Kg?Xh0p)JZnUc=-mE(Ls`z5)+Qr8;F0R92sj9yEJx1kK&wQ8S2S`)h+Qk?^jShBw0n z^g^Pht7xCZvs&|5W95{bypf4acXhX`O_>*QyEk183j48^Ws>JcasVrhs5G9;&2dyi z%>jCf;J1W^x5i(=Cvt|^PAWSdNG}XTJ@;UD+R!_#xn5!VD8@`C$I>Ipes@q*x>0`l z)z8=i*VF~+bxTYjaCr)lzaDau^|9V&q!IlGwQu0TKbn4oBljDL$D`d(xUR1D_M2H5 z_D)E{)YMOgPe9j&Ta=X`w!K8L8Fz1tOon!uWan9)huounS4Mh4dF)BRXPW~rZ){=b z8GKrX8h<5U_7;gkNu2?Vha=mHR?g_-tDJ7e(~;kBqw^DncZb0-heR1$Eu84i7(X`&aR*AQIwovW z>fz)N@L0uBeI%!;>fF*(y?aB?LspSl*h;#V3|hH@lSBCC>z%=##r4vBD?~% zIcaMD#Ep&MMR|QloYSVm4m`6&D~o=K)KUR!2dn`e7}AFYi4ni=M| zwlXp`cKoTc{O?pVGTu@effshzIQL;~Uran3$O8b$6lS*o0sT!BoyZd(zz&P7axA%@Nz)_qI zkD$LWxQoOtM=CJA^aux0eMxT|$TTV{XcUf%R6YWWWpb~~Wr+7tk~!$o(-O!M!{#H? z)jCw2taNz0WO)=*Gud3!7Hi9?DqB;9JQ_pLDASj_PC!c^M|om%q>Zz+S3oK5Y^V&l+!?6vHO@6@c? z%)vqVE`pRD|ItbFC1kt4ApdNC)&9im8NW=RUr>

@up^y4&I8N>~wvL%f(S2W%NN zf&x46sN${5Gh+I9cd>g-O|x3@x#@hdvU54zx*WtnC#5%quWk43w{;_G!4&;N;wy-O z?urjbDnKfp2u4gknf&*wBJS`YfdzBa#pf^Lo9ei}Z)MCk6MP}h0OYrd8`jVipqsRTq}lh>h#|o4yiA zbPQLKXatZ+L=I$?XEGfd7x*_lf|=3xKLi)yj}jQ9pD+OPrv;Mqe+~uywe$sD4D}uV z4@_J6*&E>)?K_L=^f9)ZpbIb0tyI>qF^OuZ;8LrA_T9JRowWUXNjyBVFxj7 zcFv)I!ZI!9%3&ro1=#}qZ!W@`!*%Do@xlC)>lS-KJPYY3@3mXj^ZUgyXXo8DiZ)0M z@ORv8NQ5xIiv%yy7WuvM3l7ZnaX8M-u4s`LZ2-*e2V%BIin4U@4b=3ps|#~L^v#DXv3GDk8H#;lK%qAV<%I5Z8dd3-sIMfqq2WY52;$Y7| zC@8Z_G%EJ3tOhCq_Ad3l4=IN9=Ee$7k#R%^@JPd7SnqL~*a3EWdfPj^Ft)B}bgnkr zBT1I)!g2ha@JU#wQW1op@1SkuaGVJcEJVhstebVvoHV+n`EI?;^p~M~tfk#K1CBi- zF<+3FQvDXkoVE)E6Bj9T)Vlo9rjgCj>S}EH&DnJgn49L@7ZaI=v&F?OY*>NLOQ-u43cR-0P{LGZCyKsW{^hNC8iDiqJ{~) zNqU!S?7Gb=jXSc_T>xTosLbq!#)VKVs^hKlReb|!_v(O0B(=A8tA0Fic+K)>Lc!(J zge-eb*cuWjJCE_q)D}kLQ`X73XAD=didg`EDAk|uw*rjJ1Yj*bj<;`v&pOnps=(g<^CaeJRd*q!NQ`O zTAcA*KCphxtD>M<0l)OpWo@|W=Vs)XFpM7C;96VQR+W3~AXoqC9@yN@7J9kuboR-H zHL8|U?V*D#Jg&`hR95a1#ByH}mfw|kcIP#b2%C}r_nxhIoWdo%k*DB;N)%#~P458H zR&1-?mh?}HxGi(-dh@nkK_H45IB{y)%qwup^p85vZeUpqh|G;9wr%q$_*4*|PS(bw z3$<2M;y;*(WAtHSM--PRyA1<)1Xe^(yuRRaZX9nR0oP5%Wg)P(ak|_q$^7Cd)NP#f zFt*;;hP)je2EkvO_Juc*@6Fd}(xbH@+`c?h1(9yjJzcLY^!{hs3;2?q^IfrF`+D{7 zeAjrrb~tUbxms|met4=I%jCVN6O3DEeY8_%NiNb1EvTu>AI1J!n@36jd$2##c}B>0 z4L;|^v$`6=K#^tk;MTA+ji{smQT)gaODj-((|WI%X2JbpJ46#0RZ&FMJeh+Z<&>04 z)cI;7Dm)CZ1Q9H0Ge@zDXKAsB9dZbg4?1joh3}_)K2k;c^(s6)kl-$}hLll_T0$(y z-4SgpruNv#}%R(l@3!%tj5l!d~Np>{BXo}gF5QWAP7*n?JW-N~>|I~-Sokci&_Ho87f;meu+(2@Yz45X{^W92m`3_^%9FadE5^cGO72ffn`$&G} zGOIPIF?FsLh^0eater8)<@~LjNIyP(W7F~ackhd7ase+Gfo@-RBG6$Q+CeDbE-eiO! z66k;0^Ze3P9kEj(yiZ!_vx)K5>+Jrl2af_iKMbiG*Z6y})9{?`w@LyvBpEEC99HEm z94J&4%248p>c%Nb+Y?Mm9%w8P;5(?F8nINf&_*-><^LeQ6{hj_UPeUhLmtxd+Vmgt zX+WF*G|x;d1!gF0D5?$*b6|tDV#m<_?(f{b+Jd?J92?)y8t>gZ+-KQ+Bj*PJW__xR zdf03Su)GBsi{L~F7m?zTiiu`Wk!YO=QO{H#)PP2?loJ6bfRs0oKxO3+aYm9`#}5V$ z`x646$5C08JvW-c>mV&jy+a+V^zH9IQ#Inj?BmB?I0~jhx7qLD!cSQ9{<) zCB(xvh>|7z&?P1A6fTeZ=vH4`HaRJenyQMrBMl$uNuOX#!uWTr0YsU$pvq9H4wY>t zl^X-E=|ppy073iT6Xv?zU&~*SOz)S{s$uTKR(W@_aAsUm!9UD9D`~`uK!3`Buc{%2B4{J%ioRlMx&#kB{e!Avb zJrlj#<)~p=4r6CfO9_3Cn1xhg=x7nk+LY}yn%fvBEBY;q4p`CSxj7WfX^CU5+@tJWJi(W&KcO*jj5x;xDLZ*AxFvIAYA@P8yW`o)9#pos(U zSgS*I-N9vd=^11lccI*yNQxzMgJ!_I?64MNHZL9-U_DIfm>8g{k^fj)WeFHM8I_z& zZ3l@3<|n0jQSo~R0*Qcqvf~?+vNohOl*bzy=)XeN;2a3p1~0V$$gAWoVuI=*iPkyO z;E~luur&+0{@(mshrT+g9pcf!^T48w$vch$Nigsv6ylw&q=E-ICa#nDgi$8vmBC($ z=yLuLM0U-^2^S`{_ZwTz$|kB|ZzUr`AM@J;{X1nZJEj`$4skl+fss?6#-GZt`JdU# zvVUW}%8!tF0rBe>`+r}#|FsnVkBs^MUX+ze>dHSpWnWVCqdl~T@Zci3NHq%q1q0&Z zjiRz*rIA75MSd&j>=Hq=uts|mK)cc}S884FYT9`Ym2Gbq-?zNU&7M-!u<)j1^s21K z7oJaB$L#M;cjw#E-oI~{yJTr2o((;6binRCTJm*%J0nrPf%?1jgigQI5bI~2dsFN451~NyCYYvfVfu5!YwE`!Uv%`& zB-2spw{|p}vcNP<;@k3}sV|3_r|H|Z4JC9~&KtI*)@JhM?U=mg#m3PjRVoE+M zVYM5uWSO==K5bE81EEz2?F$jdRB^ec45FWK&Dz+e}E=Op=h#{z^;qey2Dx+2Q2qzwA-MpAB% z6U&685w0+}tjouEmcVXOF$U)7w=8u*B7piVzASTr-X|xfrQR1uvc@IZr$CD4MUVF| zMre!R*v|cBT}rB>9#r~c4@(}lBCp$9)X`O$7f_9s)8|{>$Da!Go_qr=;4rtnr7TgXUpffMV9akHEvEw*Z&g!2Env6(!b;)$Zkq!j9UGy>Zopi zUQ<$5Ex<;BxM?&1+E#8>B$er2c?TqH!q^=LX)1lV=@=!xtMbm`$gt70@|} z8AM$V_n1o@=*E15EncO@{DFc)hEBSA@Nbk=GkNsF#}_mBtmF20k$-)eOP+G`q*EAP^>>5d@ea zg6^gb37{ol+=uYC3->5=jbqd}&J|19Oh}yYviQ}E@&>94`r85c>mo=XKA{q~2C*8q z1(8IqD#!fuWdW8DT^RfX)ssdyOzHq^sC=mmY``qcE8^g-o852h1`FBL)_0fHqqzW%Y(brO+X5H!1sl*7|2>*^XZQ^Um1qp- zj{+=uY~SxwTj1)2rmt7luK=kSptJDqqF#W3sech+R{=RBs5U1mcd@_EU~~8?dsmUjsf7tKBg%yZYVwFEDFu zWWQwnb~$%v)IaYXT;h~afPZz{4^@br zn($GS68Obz0BZLqKb0MyvEEp-F z%XZOu9nt29ll>hIY!o7Ulpi znv6Q&d-;x1Q#smNV37IAjmqJ`f>4;j)zs}@5Ggb8NHQ&r9}YcFk1=s0qSmfDIT zL}IzQfY+Hb7z3YWw>3^;vPtIw+@lL;+6f0j=R`K1?Rs$3&Ft1)@NM5zV1L&`Vbl&7 zswRx&Edg?U7fqYMBpWQ6jO&vI*KI5odc0(9&B?LUS$lNhs$&T-QLab-p|8suK`a9N zU;>Q)dneC-M2!FT|4RScQqNRUcScY|-Hb2FWK7ixX)w*zIKVgM!)R>CsoYSb9@Lsy zLJk9)H;@1=N~KM;fxCA80PT1w>bSwB_El6JKa7XzdPVs_qfTy_HegHLC>RgUxX-lj zs_$O^k~(_!_WADl_zRBtc0-mj? zs$_XlVRk8UA;TzI%p`NZo^_F0EiGU(u~@&bF!!jgly!a1es#9LBez7Usio}j;#J*M zYwchj{qF*wFL`?T^AP-=5n(>kT+$T_0iGHp4PM3Z+@Rs&k(ghDz;|7e>IBW%Q&>Q* z*|!8m`k0#8(2SfZzjS1JdAS)iL*a3Q>Tt-uHB0^>6;1Ac&)lXvA#A+^~TF&^<-Px{Arzw?$8;b z6(xcC)ary#!{#M(-LV!}WvwJ94Y}p+dl+)^9$xeZPD9+g#b-y4E)=6{dZvMSy(4bs zQqd@m1o^6YxMp0{hxGGmxj9Cv;|d+QcXE|*vQbI!0Pil2SOuAXlwDZl!rN-01kujv z`f06S5M~gsjn6G_ql(Z9v;Hz>hvm)t+G*Reo}Oz2DoZC~IJYFxV3=*1bcDI#V-ehb z`yS4?O;M_uUKUWRm9-0*%jA%+L}L(ouJ)NW*6>k4H0cLNq(fNgHv4Jnoecj0zTR!} zd#20Z0rVivt#5;(=aRdjZc}W37m&` zO8hf+O$5W$AK*8A8`$z*=vRHy=*QmoFlAg=(s#RhNTHVYC1}1K@hC|GVLZ=F6-*0x z{+sO$vPen^=y*Dt6A!PzJ!}(6LIqT()R5jys9m(YH-ka(Nn?~~Rtl-H*pP{zU-MQ? zlXus*&2qLymA^@KO>Y@ZjhbR)e1(|kVQ~2STn}zH$Hv*3wWt5KBjg$eN#@{G$fcMS8-`5K^IA7m_aM6 z`$)$n`bVh3x<&!)d?X1WLQ9uG9!?;qPGiS*BaH;RE}RifZm9eNEHWtim)l0DD^SyZww8iac z7r6e^#bzT+IQYWSF&Kq!LAalh*r_;Wzi*>jtu~LuXq%d^sr49_?y34lr!u2w+EXxL ztvGKYoa^y*IC%Ypz%YnJV8{reNW^fpBHc9m`O*l>0iqm+au0Ze=X^~VrnQF?&PU+5 zvDnPzI3)KOpigkw6k+Ys(1~ggta{l}hmoJQoMZf-VJ+IOf#vtk(!25;+d@FGwm{aR zAx2bT?D_&PU}I*Rt}$?_UtrnE;npz+3Wm#cQDminaPZX-ZsD&rZgNMlOP>~lPs)5- z1VY9g@uu8tU)@>Vy33Lo9Nkp)j+fdu6g^!Frwn87+^Rz~KEqIZNvGPU)wR*jLB$B}I$TO*f~!7t4654oLO6t8V2r?1+T_Q&0K0 z4682u*_{u6j(?P@{;`Y5=-T~Y%Kr<77Z}0&gZ+aQ{5EN9gm5}+3o-ZC$|VI0^CJnl zlu@4piaXoYaQOv8RMg_I3w0k1bN&6lEJ=n~1W@$^LZ*+5?6;J{!0RU%BNqm{<~-t- zYBiVcsKMtWrxI-wsbMy>B;oLhCnBi?O$~EZ4$9!UcL&30S4}6G<>y$P0t(I%#Lna} zX_$_w@IIB}3veH9GP|^0P;_>@eR7vav@g)kd8j3{^_~v_K#JRObGNy!PKV z%zyngxUd z^s@D@xs>D?9|0^XQSe9+5fMBr9-1rL2ipylxZmKI{+KWoVU3B__h9-y+tCNq0iyqW8C?N<_=wTWv36hc-;u6_5$-8<-iG^wVX{rs#%*o<0 zP`zZD%9FKz8kA)Pi`QrR2c(!`3^|x4*s*D2BB*E3p1pCB6wSJ(K~r=?GY2zKWbkSM zk97>~}>cv zb$Jz&BN$J`J1%`SPSlD!*ydwZh|}u@DspA$4$sz zuve=&^SCLUwSd_bGS|G?7q|}mlM8;PN?3s*Qn`LoL_I|_0v+g4G5lm(&>D&~sR6?l znI)Ws=bL^}57Jk}tm&JypgNPrn=57ljDoPx5vC%_rIdlHBI-9tCQd3ccs7 z8t-*ywH72aUrR7)OSDPqV2JeQ%}`Fj)8^<7+S({A|0d~}AU_#mFK*xIuPXctHbR_6 z0>4#tdv;L;zy3>@ngEyuC~{UEld$Xby%R!P6GeG0aQ`p@>*JR7p_5+YHPKN^V4fk3 zP=|o0bY4goP@xf7HieU5*Pudrp}QZK@B~{n6cMl7DMdWz@t^;~@D^eU<>!6(45Z(_ zk$+hp^uOOo|9MRR!MG0pHBKn;ANR0%BC@7!gZmJPZJXt>$m&mX8a!}cI&=T z^1$X1PVvlD`DVXD#eo%T9Hq`v^hcCB+%v=fj3To3%ZWn%=JZC_ zoex%j4J+ zbQX)n1VtYQf2U6; zl+lO7)ctA65@v(JWy3f!Jhj+syx9tcQ)P2qi3?*W-Zw#Ork|#Fs{k`fVV_!Mn!xL3 zIk}JIQwGd7Ve?#cLD_l3;B&IP`k1Ad;eT4RS=pW5A1i9B3J!lo3 z!WN4Denb)1o>9tu9*MQeIgR3$ z0rD%TiSRC-!526-Q_<1bGYn58#9j%95VT-muFHVK2w+EN#G8i;i`sA@UJgGpB~}7x zXT$xV`dKsMX!X;9Ku-Kvd`_&(SCYV;p<-2TVNbPS!mBJ-Wd&_+BDCO7!-ztt23Z4X=cs@kswD@}xU^1g^h~pu=^6pW ze8CszeDle6mmn7p6^EWdfD|dyNB$Hf%@?7eA4}|ajD2dyBKnD5ou30#)271<>qDF}GnvD)t$ z2fj&M*=&%VGF>YIAwtb!y?Ie|YWR?x(XuT5a+5#3i=W?qc_A~KjWxnJccu=Xz$PiiuHzL7#&Jt#VEx6v~-8J%V@+^q|MYi z{c+eNd4k(vCCT3b1G%D0UknFNZ?%lsqRm{_Bk#15n|;|H)9O&HOroVE-FG(hc4&ZE z(2P$V`Y^c7#KE)tx3Id<0tT%cp7~`AFs#cqf_JH!mS_Fm3^W1T!JXma96S=IrQy{} zb0%%7OB-G)J8g)5WpUWTd10Kg^gMRt${vh%)nB};`vmNAbL>TCRA6}wIE<1qWykbg zPcCUTMV-!d>owCDM3^BD{hCpJcQE*pH$gV#ErC;Wx|Pm9SnipSi4GEzX%cltZ8sf0 z4GJEGTyuxoh}YL_^g{rSCj(Mn9xB&ZpEqiyz-a5H?)=3b8E8s zNV4xhy4dT&cqJb_1$w&<_Ly*)afAyxX!#R8gU)gG)(#SXrbXZnoP4uq5;X(XFv+a6 zX>3lBn@9^3=&!a@Iy7C*kVuccxvO@qV6GM z%IEWSgV;mL3SA>lp*KOzvB5IVgDpwgX_;?gI5YK6==zNjtGgy=}3pI7Ml z*K=k&-d*&zJ{n?u+*PW8qBhLLy>UlMZiEIK|oHw$2rs9WFwD^(_d8L4@aT5=s?a8c%PT*VUVg&tO4QDy2SY zjm2bF%vg0dwTFqL)$eqaDox6HxHo5b zNFgp5r*h$E+lpT*h%KuH+&3V2#-tv2SyzkL$JGiwZeF>fbV(hQ2BwSr_!rt3?1T{# z3+p)Tl>z*Z!>MQQ>u0C#>Grq9WuFghUm2<38IZ<^qz{5X#CQaF zf*+9#(YJ9s#v$mL$-q)RasrGY`j8?J&3!QZLlA<|;QEREfPSG;1T6Zobq2^_0kt5q z09VRDG;Z8JCf6j{ENFc;@3BBW=)L0zw=Nv`9rTWlU%SG*pCtHSWjNhK_eeShOUWc1 zguBW=S8?nd=TBUyH^szUGwHcZ_085TFwz#|m8>-DLDz_i63t}Q{&1Hz4#&BBM00Rg zVBLmTo3$&AFIBXyzJFV$-LXKdTj9!w1s4u$sTtwJ%L#eIW7Q-qMV*+xeM-%y0(?Xu zYf$T);aSqS%JCFk#=-}_oMlbLI6SL(vsS@VW3P{axttW?Aj^|nTNjt{WwB<@*PDZT z83dbE=PjR;JkTlb_0}gc$vw%DL8IuHL48?t7bk-p_2$2S%@_`iYL2H6r(tbXtG6$H zi1#UpOr)gY$kAjz^D_2qA(d?Drx*fE7ciOz|S65GQ?@VtM-pB2z zI4+D&hV8ICIAo>$0u9M+c}S*w#r~(Y`X!*Ot*s<>_$|Jy`Jtq%-UyXuOq-?62R=8(;>I?z9KdCKML;#{YLY$;T>XZm?=UMn_|2rJTDP1Hb8tg|jxd^v+7b=!NmtTqBeh&ZS#8&>3NHz5w>{Y4R_ zO^gPq`R-cbRMDwPNbP_#R>)zaj_`d(XF|e#kUT~iLdsnipk{POw`}Y61ZAD0nZ%DK z`9$<-)~~Drk;!X=k_bh1nq3~u>-~rbzMYZ?_?z4aK6~P}R|Rp=V)u!VrbLFxIW+2b z>QCbRY0tN4TkELh&c0Z?EZk3qPr_Z~pM`RmqbUOkJ-FMoK2VOdHC4y-G}8eV+DZWk zX6jN-&=s0$n)ykYm32Cz^-9AHW)kRCfBXP_Rx{TG3mN7#g=+BS3*~Hwshl1}_t0Tr z@>%){i8cncHw7ld83d}Tbd$lY)kp&6w=djR4OnT|iOe!>@!}5DO!8*$5^bG9=g)2C zhntFe*FYJuTv6y}J@zbU^Oo(_A470wLp;z+iI}Hu+#FvD9GC*|JoXx#vUsEWFMWzs zrZu`29dr4^OWAsvC}BUpF4b3865d`bCI=`twM+)7OHA!s+~FKJo5g*Z3)bGBekB6l z{^OH$w2KEi*_gGoh!}k-;;t>d zONzdN&YtPqo8~CDbOb*JqmAK3!_<^zKpEMCm1_Aw;5Ap z5mLu5wB~x0{)K=s#@QHe4QB^QHDEk8EK5WS~XtNf1f;f+>NG|?7@i{z{;oEixJ8NF5> zqrFoEMY^>gJf2r0h7)7!AZa0;Q)Gm-_udiHd6-r+nLkdP8Idjb7YZHg0a|P*pi7*?SHZmWTU_)ek9rzu5jNMxZ1-PQ*8;dpg0KMZ+ zvg<$xcKwT1PCU?+SNM$wAHJ2tf2-A$Hg|CNMu7i3u;2Rm|Lb+l{H9sv<-UiSxL|KC zp<+^oL`w;+0@uOD5|ltr1!It<>CyM9qAyLPU7^`<<=sZwJj}lcAO#Jed;j1|xZP-) z_$diC9(R?o{+&~-z0B_J_6ANFjEe%X=ZqU66Q?A1(h!AWTU?EZ3$shuPcfd!pqaK8 z!fD0;=)T-Z(rPPKxoI++8v5w=@#2 zMjXbSXl5Z|#_JGO8fUn|tFn|N+D7@TQwqfCT14gR8eKfo(XD8)29;&w))lNX3C4^C z4_yvO`*Vokel4~CYWw|m?mdP`6}1AN$VtBqzG;7rd!*;vK*TA97s|PqHCZ{xFnm)~ z9s2x4@urFRS56_BvH!qM3*$k#n1pR|IB6|zmWY+93=<3xqmsN1=9s}qAI$)aN{!JH zA_;b-#~mdM`1_d@qW?<#VVuI_28>DS-W;HRhS3j+m07d#0Xp|#ZnIhhr8t)5s_EE` zT3JNF4UnQUH9EOWEO^G^5&wflY#veqIXg;kE-My3<3l<9gfNQkP1q**CvbxQNd9i4 z?}rC`rg%nf{cI18sklEK1$F*5M?}!fAVS$8bbE-G#XWNyeA8y{>>3X2v0d-+Oj2Nm zDM~hDkKQMEUONW4)V08yH^lSkurW|St2O-qg*X|7z@2eK@Q#PRzc^?S&VF!iHkZ9r zQ|_p96s8ueJgP3de8T?u*X4X7*PB1c+u43Z4}DJ|zhVoT0A8Fiv)KyX%2cjV8ZN3c ztL25YZ~Q;dWu@}E_5AmW*7O3qy%ypGR;@9T0t)F($+h1UowgLH!l=2w zK!qu7u!lkB2db9ff@F80U3Y&HLxo6uuR{t-k=~4>KaMap`91+%-=X4x zPIjb`(iwV6mt`gQh|&>5t)M7K(0ED|DJt@k5JMGy`CcbL;4X9eMpYv9y3t4yjy&B0 zXf?}(|7;DEY^&|$+8O=?lHh`ed24Gb-U*!6TTaZ0@pw}Q7YzJ;?~UHyTPQ)J#Zvh? z@zWJEmhvLkp>o(em;{^vHcBnExu;CTR9eB;(I!)lr!hG6E{)ZFyun7Nb=JW@0qs@d zEkQlh4xOnd+KSSjO@HD@I=o=|<+>iix{rdun$Lsk$f(=9m_IWJCWN&~H&6?b*q;D~ z_z1*N#2($~+O|WY^B2XDwT~$_Z>S36GLjfaX(W-3%cth0B?O@ffccd9nP^2UYXi03 z4uGbbTuq5S1&7(wk?e{h zVAQ9y(!U+Xu-73g-D=uy!XCaY0}{*g46Aw(uj3Y^`bK2@ecVX7t+Z{Sba#VZYI$;U za)t(vXQ(p)x&2Z1>e|kteyh;gzRHrGHZFI%Py~Mt0qoEdxHKWd^)3)GmjLTWKW3do zAjEvy9GP>k;}a@@mp%Hf?5FySdRRTR601M)xPFMIdDtwb#x(F{<^lxbF(}O2M7WWp zl2Z1I|46W47x`fC9WM8*U=}&;9?~EtEz$n{MNV}jhKm(Yw$~vO&R{W4Hb*>XipJ>;XH2Jpx|a+wMXI;lt6wo3Z)Ljs`DHXyJ)$LIq``b zD^gxc6cys%uUQ7+5cWzYV*7mU@Rfg|8&gPjCfdIbLD}~qVEcDktbY!{zmfonO8n{L7g&g|Bl-aN0_nVe5{2&8e+`xB zMjki8%CJ(Aq9@AD?tZ1GGLZ5Aq1*=~L5L@!tSX&ponNexPDz*N=h8YKH9L-P81rF9{!7(z-F7_b$_>=@tomyjdThM!y<6Bae zY{vdG=_1{p8)N}8ioS;C@(dr@R_)}T5C%c>V|b~c;5LhRi;iAu8)R}ulL@=&s@Zk6 z>}ySWoQ>vDwvcTPx>kHaVbZ+SX}@rki*GH~J4+^t9PC z=u|fHt=14)lle{6cYvOX)mZ&GBJ2{g$@KN8b~e?65RAYOh7N;tzih~EAExjN@1q+I z%{fZHMf2P&Y=78aW10S)9?~lu7_`s|<`1A++aoC^NWXxm+jurhppAHvH?dRhvT4g} zhq=&!vD%Yows`SWp3OsVWit8a_qg>5DDv6w@3>Lm9=CAtDXgJv-m&d;~GjW^oz$Nk(#o z1@_a2@uE@10q#}vxN(esT?KbwBA8PA?NrPEpYyT)cg5-dgKbER+m`sAk2Ta?uU_9) zg!RR|*tAsgGaqGH!bakI{!w92PLLRFM>=soXI*OIYUm4;7fv+@-Rlppk~yYy-;f~Y zcJ%Gk`t85CQyCv0$GhmhL<<5aHHdw~BEFM9lm%|p%#Hbwp&mQodTollzGque(8vY{ zR52gtrQ4dcCO!$xA&Ru#v!AX@CL$(HRaHtn!s|1duc@egD!o=UGEWK_r5cS7tNhs` zXU)qVDM>CVNreLwc-GFA*S^Fo;8zo42_DKC(|j8o_}K(;FZ+tK^h}zcEzqyTWWgS@ zh9q-VNo7ZrCv?L8M>F4XBPFc`LGn%7C|ap&BD@1pRflYD?8kcG=Bv?7FhDcF#Y3#* zBRajkVLtbCw0g{{;BLZUXNXE4Z14wHVE*azZ*o4JS@ma$C)d8`c`ZbJk2~_fGvavN z!>{FFkFc8!sb3(TVQQgHCSQ14xZrpu4#;GuWJm0@kuVUqKsRotYGY2ARIOEe##N}v zbX>=47@whw*!`#5H)A98{>QVNI>*K~_FtOT@KY!+UcqjB1B4c-kBRlkrvGYy$QybV zF8{s^o4$h=|CZeN&(Hsd7yXB2N>uui`3|dpKDi%`*(GRz2+1RcH;9hQ4`lzsvXF{^ zASDO;(yU6hckQ&eg3FKILw=zn1_~wR^}Q~zbJj$#j2DQXx|*2syq}!7`gpznAoJzm zJ{9JZ${c8jVh$6aDWuQe$D)R<=VV3+B8O&3?z7tEs@|;vc)&p7En(D+ufG#Db6+i2 zG_pH>tN{ti&V+3C6i?=zx8Hu>Rb89an+j^Ca#Z|_`WR}?UZ%#yU8jLIFGa^8Qht-2 zPIzqsHkga93Dl`Ym)3uh-Nbi}_SsrnFPardtK(KG0R0Alo=5;j>-W%a zv;YBaW_n*32D(HTYQ0$f1D}mzt}0b00pREwqaDs63=9t4-W0$vOrgWA$;f-Z?&gN` z#Y@8Jh((?U{Aty(@Y^H#kv>kR!#)il7cQQrqnK(M8+N!FX;TKysz_yWVeZyih+bxz zPFhwq*I9wiJQZaX@R@Fd zhm)M^g4J!ocM&Sr#Je(})eKrZfmJTtsBOj#%QhS~p?;xq0xat>K!`S6yqJ+fOHe7RiPEXH z=n0VtGLibuH)7tE89ep3(GVosQpm zp|j;a@eEz7Rpe-uw=-^hN9oU9&rT-Yo*rL_J%lQb4~8PawCJ#I-}SFFF?tvaaBG!b zTBym%9f;9t*5>+-4c`T6gEj75YQhMztT$#gMLkh}wXQgjGilvp^{t|I(d@IA0>GVn zVpcietfni2yDnL&wq|Q@girp$h%7qMbnk`ys)1-$xqmNOeHiRAOobh0h4dia@LIh{ zy#XGd*48bZ$YIF~Nt-&b2;LJ)iLy;M0aw48LMd|`3NK3}exvO%Kva$Hkbmypq|qc`#aotE2e&8Cg`toXsxK7lp#v2NQs4T)#v(*T` z4V-l$BJ&{B?HBmT8)3|K-ss)Yn$YH3|v82T4{qFo{drP++b-XdQ8sW`iIaxs@bhmv(W2Fxcau^uSMsEK>Rj z73{pi-93B=GkRE^q(gv}Me`lRD$4u##NtahUMW~WV<_G(mZgpxEkT>ktO&T}AiKv) zYPQQC9FaFTI5u-gy3R1+TJ&fCfwY)wTXYdcPDt(be=m1EX>Vna?{aVX*1{P79o+jr zI=)23ZJRl{?>rL)3bcdo`T_?kA{z$wVkc$8Dd{}$~`4ejC5hO@{QnXc#T z0QlFBFY^6Xn)J?tY@wU`ojVNF&?|( zbnfCK%xS|Q_1F^Kz7K?C~u(8lI(naxFtb;QU!&?z02`H&FF z!mkS)m6y@=PwvK@>EsMeD+WefGIOsvHuV@0?F+bwogS6kg5}ae=zx=nP;tE?I({Q9 zVRtg!inDjc7#8DG$VPEZA`5Im)BVEC9nv_2iK;;wK}ioH&CPgGbexUQ@(Sj9_!r)kvXCJ%encU1>SYu&bJCU4kM% zu&#jOS{6FHo~6ie5+zx|y)N0k&eb>APMu|luTQ!uedH$Hsv?C|)pDP8od%Zf@L%DB z?d11_^zWLo_?E2r{+*gqwzl}c2v(iS;|kx#LLQem@jm+B5D2$HA>`r^fywY7wJ~#Z zlu(rd>NV}eigu2Sg3_d8bT4$Y1!1Cz(0o0K*t*bc)*B~uYRT4w>&?@r zUBxz}*FN1|;CfKaECVr%Gk{uFjmY}Z+SHu@@koWD{1&W1mY!%e<_Q}MIwi={u_m2rB<#9V4J9>?*vl5oRZfXJTmY|e!7f;(GLTw$3dyXdC-ur& zs_ZQKr0CpVi2L-7ErFzqvnpB^fdXWKiYzKQQQ2%ZnB1O5i8%H>MR9pfj2#q3(f2sp zVrO!56^9YP@>1p*qBZ4b(z8B}iwWo#QPzJfZ2n5J5;l5WWJQI2))jQh@YnAnpn|kj!GlSHn`h1%4Pf10 z#$`L|cVl)t_`K}u(j}W>gTh}T{@E_S>wj}-5oWCtG&&=!2_|H?_mnV%zl1v9mRA+J zCMJ^31?>7-WTFszA&y6w3_lSx!8<+n4o@pN{Lvn?<(T0BQ29+UM7(g`QwA~LQZnP4 zU<-r)B?xOkj>kLd9>>fmqNQU{&&ZyHsS0l7`|r20kw*Fg+V}Ep%kOXy>A!Ju{=wRr z>gIY{gR!3yX{l`P-^*cF>v;4mcY)877@BGh6?uPPO0p)^#==jixyOm%O^2i+HnD$i ze?W{vh|)s_^3w|j@ozPP_FI*1=|dX1LRy)u(_anX@r5O@{4qT2{jrrkJ8^;;`Yz`p z>!R$W?6kPNC|ix|@r2;3ey4=Td0YGEQ?Ht>j(7H!;}2=V^6W0W$^`7 zI4ep!?~O!v5~B<=*F@yi7{w_Ts5@e*KyKL4voF&)g4EC{VF$Szr8e2F46~Y@w1hMV zB%|OUt0FB_LN@$5!IPUVer2bGG~Q`Jtd_L+EQLyuIkjw*8Ta0}ElPt!T7GJ#Kxo*& zonOLfp)?We+vTM-Y)^7ym3oj22{2xeP&!pdpt(j%`AtU70i5Ar?K>M$lchY5>M(Uj~|*+YrLz+Z9N3Kui`=?Fe|1= zh!)mB7k+gDHRK;^CKd1GKRWJjSI>*YMszDj=op$RO-x?XI{$YHU5cHrjt6NIvle|B z#L$juDFK31N_xp**g>|YiJyMW_!Wp>UXUE`c*Np>XD~WQ6<0EWeTxkBn;XiVq$xQnv48#Lm*K9f1Q8ZhUc3t@ zaByP4iMp@`I;U1fwS$bkGAwxxx!D;{Fr(r!oG;(WaktP|&V_b?=8BQmip6Luj5$0| zhc~53_*^ZlbQ-2(Y8FF)29@X0^xnMcQ5Se~#b*hLhQt+n2DLTSmsT`OMuM0oSz=k* zm^XohSF%XMksLI`ycclL8ia^bIX9+^&a4uqXvT>sPv0wq!P{{4E3DjB=sm@V$Y7%! zC+sm1RYq9hN$~{yN{e7VltX_cA)c|!n;*q?dYXczgf!fg(noPLrnnxesgD==To z8kL8^Xe6-n;aMKLfz8PlRF#MSv?4>??F%vaeY|2;u^2((FqEY{<}^6LdJYlC1ZqB3 z2{oA5)w({3mp4GtYs<#=m=-G}^`WExESws{F`1^KHG35pCaemZYTNP4S&coDVz1)h z8*Z79OCNUVzXp0;MeWe`E?DxliQF|%2gv+p-JXPDdv`g^VtVM@?JFJ?P6J_C73sK& z0ASccOU!}Lgai6b!cl)%Gh6~G=;U>AUOIwkc2>p3YGZLOhFEDwM3HA02;!~cRX5T<+xEU;Np547z(7REiT>>AxDj?=02(=YF7$%UbodGTeWgW)mhUq%ohVGsscH}xZ zFvAmi7P59!*J~lG8ifrnwf6T!fOnxnfy+8QVkBu4a81qdeDepEiW>$<4BTR0#DoQW#Xh48w zkOr5#77d`5aa;OS*H+0?*2SoI*}r^XC-_7qOqyh=csx#Lg>hkQ;q_?!}lL-SJD0?H4&BRTO`(T7`&1=fH z0g9@7?8b;wGwu11oSm{o@(2a)+v}dEcFaqdFJr`Tp%QNrqmIDFSa17nefwd?;NaEU z(#gt`FJTu}HP<`XFin|1%8^^}AmpUB1EQQ$c0SzBm)=_Eg<(8417DwupI)rljtaNr zZ!AN8cyEV!L^3VFlg#OVE8?Kq_gdBKK8{@L9YI6kM5O`k4C2vLnrurQ>zRO>*pd){ zz3B0|ccsUkB^<*IiL?N3Kcj2iHMHJbD41!e)8V1H5xSTc=e~^O90+yHjLh1Wa+A!h zsoiZ6;mE2e)6``%fiuL#d5-M={fwoxF9fU!#-A*n=IWKM&w6fl-e<0p zdsn$Tzxt~Hkl3`0vvVNwF?#PRg}gj1OfgXZX(wfV=*t!t0bR$4n!F}W{m&0LlNF>A&2Jm-taK&Yln0GU5z zg!R9P+|Jc4c&$~?;e0^r=y@EmV%*K6r^IyM+Jo+v?U}Zaph@_=ol40*wb0{(PeHbw z>xTsnVu8b9`43^L!`Rw3ZM>{%%-%P=J3nCihI4UopHu_=f*oEV;eU>t>SB?$kzDv;~WH^`S`elYG z*-6@0jA_omI-bj}^^@vts~0>)LPgL8s+ErVUw*UB zn`>FfTXiWa>Yw|TgrdG!mqU0}+vBytAJ2b>*|<^jXExZ(40s1!Ut^ay;5%C{%nu$2 zbZvhO{fsa>86G*RgW~X&k394u-+}H!zIo7Z&};6f5()C}?n}|IG45FpuWdi9^=+;x zLEm@I&%xhMM?DW5^0LP-2JU1xXOkf`?vdP!_h6`9Lce+3LqXD#@fSzqSMJfQsX>po z@MJYcqzFT;M4JJ6KWrV@<4Ke*#febLn_ z>w@cZkC(cLHm<6wz6*Xncuo@WbSZYya>K>a#F$Q|dc{UKB&?WBzW0e+N)Jg&82PLQ zj>?XA{Sm?dxM?5gAqP{{fM{M1+0cp!ZwQS$68d&|B}{jputRd}xdt{nA9Q$@l1OjN zwPBRPEZM+OjDqt}$}*WW&=}cSj4W?1h_)37eOx+ZRA=B&{?i+b>yYDNWV}UbYk=)Q zP>aH+hvg2lDxPoOodbaFV4spi`Gh}cc6QhgZ_BsdPLKH=`oZCekYCCWnS}93Y+G@} za!L0GzeR8iHDvG>isJs$IH~dIu+43%6sAgXN?`AKa`S4wTD&sOfq!yL+ooa`CK*a5zP0v<5_Vz--GC62C>eyW3Jv6(Yq3-K%NWL6Xy!!|CEm|)Mz%W>E z8o}p}6cv@1RSD1*Et%D)=A1BlM=CzT0YvvVP&fOXK}KZ{D8k`P?nVeeRZiT)*pEM% z=FU_qeKs+p%;7KvQdJQe#e{H?@5!Jesxq)<)e46sH(6w?SKJ)^FkwkxQ^6~{Jy>!L z?-0%cPaPB9Qg7@EGm^=Q4d9)a>IGPIM!an+Kj=s0)XsqsL{vM{mxvH33e!z(xV#6{ z`Ke{~DFS`$k{wC!l};Mz_P4M{A9wg2cg30(J!DExlI6~DOy0jNOTs*m^C+sdVS>|8 zKQbY|-cZxXWaaYAPh&a(6n8nMC$E#4Ax1dG1^7U`kbyP)eNt<$z# zeKqf8_zvmg@OpT5%}K7@-KjUNJ3r7^Rf>FD;loeDy{U_?lNQ`5X zXHyC%i3!D^8iGWLS`tcKhJXqJ60@d+&adg%I-N)y%VpG8B@euw1mA7gj8|K2kPH>G~2^m))x1XKx$48W}sSyxP{S^wVRF|HV zSk#xKrLp;$DhJ9vDqaY%EILEM2Ie>ubBPA(l^rv|ENJbGe@9V+j@`0`*N(IrXNb+t z205{qs|n4g|1uYbn6-A<23RGq1$3V8EW-~7xP9?syH(BlAPhezomNa`j4br9Fz z)=~FT)xlItaCuX3-KK2-mJdlf2&(s_-7;NWiW66eC_FeWNyhAkMMLJM8Npo?+Ozl3 zBevk_Vd?ByzGrXwCsVhv6s(Tp+}Ppw3y4LwYlS3-2BbkP8R^(QNOla#O~s?%vbkoe zBg7QnQr#UJByEJVsd2iM+}^v!s~Q^P|b?a;Rxpn}(?tsFwEWKETpFp4?3BvCi5gy4)HQYE#UD<7N|{(C=aHd(2(eQrshhDxlelF8qM>` z?!0>eag8!)0GMz9P1*xxHa$t6>2EWBNqBCD`#9Y24Ad)Tu`6xK*_p{(M;4Dbj0LQy z%O9jFpEv&AJWr7I^R~32?HCc~v6<%wf!D(hX9T6A8GT&3cqG%Ov}t_I^NJRnkCk?) z40aie{3tP3S-krhh($@gBH7JJs$BGY!0`02RLo%7Lxm;5!mS%1%yUC9v`4f>ieE4H z#l!OqX^|s43*g(cuhNd>V;JW(jq>3?_#5Zu!R`cQIIF)&sZ$kIb0@Y*8LZGeMsTds znrK>jN8=W3HoVhJ8%0!N;w!@&QL5YHfg-HJ%tTy__Huju0)K2$Wl{|%)5`w*z1p=m zqk(I6-12zJ=u`GR8QMYSslPAtZ@0EflK#cS$XoUTvUzAD5C{~PM{Op$pD8|ftE~PX z{g+?P+@KCOnx(#?cP%8e!)k;X?=ysdA>^SgL=k26OVx%=wa~L|(d(mYv!{8dcze6j z_h|LI<1^Y z5rl?QRzUbq<^7^<3Nrw4iZW@%LvB%uj&Gr+rJ~GIy%hkFrYABRAUnS$q%D0>;?e0F z*YC*NTZCx#;`B%J6dANYbnJuKuiyJ@rPo1!W(yoV9-N|E*bi?ZPSQpCp{sJ6NZ*CU zkKUycUA-@@e-CT-x2UC~bWalsYqBGg!6ArFWmEw1t)0(NT zZ%ah9P*p#+ogxb4pG<{n=s1{w6yf)5Pnc7k->i4J$D=#oy!(LeDbH6emaBR=LFm?bmTzLCYIaUSX9i+(Np3Ech~* zZHTPZ`qMW7@!C0m)ySk|8>=iz9uk3a={c)1BmX_(iy>YbGwBzbB70ITRD;4)n5Re3 zv3feudeh@Wv$Z^3LRkfij>W8`O&Xe0GmItv={wtBH*eWd&MAov7wPat zRX+eoZInHV$FwzpEE#?ASl&^}UDi!0=un=cDFEG_WE^xJtRnhKeVAkBcPLe5t$F(B zdMxkAZQBM_DexyTjp?KgPItFnTep?d7nJi;%7+2_B3wz#V@$6<-6N=m@0Eb_ma<*2 ztl1m5s--y1ew_AvXWGOBMlS{P^oSw+WJ3-`l?LTUxly?Y@u^I6d#dM}QeckO61;u5 z*oLSY({aV(R;c;E4J-16B^vd3ZXp@#!TXInjaahq0>{!8;$%ZPqW!!dTfeZcQFyZ1 z>`NnKReAcFyh{VoCo(Ecg&r#L7$AT&J50!dWuZCSI$7O;2*rs6tQS_bbKP5x$#Btj|uuR!tp8n*%I3T z#I*o#zgxZ75dLNmV{k-117H-Xi89zDKYCfrph%G{*9i8aW)#fi>{Od&bOn&EF~ftt z+7Pq>z)@g8x%{iNrNriHjL8#Tcz|$oqk6D3K2kKbzn0Hlx!8MjN0IXyEo3x@M3g3*q)7 zf=$>mM3McVz#U|myVoDXx{f+xFGNmwCa95_dZ&z|Bvtyn?%{DPH&dD&SoE3s&_z0x z;~M43AnS-z%h+87s-#;(dqrM5{(uxI-x``q{p*WxUWkEWpcdlud)Nt*NWi7ZdDIrC z_*E;|%V30~wZFY1*p<%OpJEBchiO-F5;>!XwzZz1kddp zLZ#w8zx>=scB@Ztd0c#j?z|9PpBNz*-EK)g4%Ib=AD#i#u%c_fz|}vELP1yJH;%_G zBIz&kcdB@=G(LXklqV+FuusvJHyD%Dgh&vGat^kil{edhO2WkgZP$cFd57ALEfGEm zA{ooH`(!1zw_6z}?LjLUIq8nv7yXTl)rjW5#`YLa&C~01FLasqF-bD~i?@MUFJQU& zSK^=jJ}|QE;-6WsfAZ7xKB+J(n3l$B6d_yYh*tf=XlZKuwE1eZmsuk&H(f!fH*$*- z=8VRBrHYD*9hKoEhI<&FNX$4HtbcL+-fc8Vrj^C=axFkI+|CN6am>_(t&OL%n-LR| zXL0(#i=SzkCh-Z&b)93uyM`NMyhTR&m(~3<4n_DN8BWx=fa0lu|1Wo@HZ_;#WnRA` zFqhUtg=`xdz#g5)lATxmS6KhH?*TGIn9kY;$7BRg7*A5X&9B*MBPkOrMH%aA`I`Ybng+8#5_=~W4X{{&s zp|@|-*oP4uBv0IA7toH!!d(J7dy@Ny_DjwVaC~P;D|)N5{HHp?{K9H-kn(a+Nk${B z{~CaG+Xi)9`xa=0zdbJ0|5IlAA7J1gd)GgZAo4rry6_u?XS4cB)X(^@9Ed(@ps{>e z$;(f|5Hm3q2K9j6W_=e0u=dNMOQhZ68_T_L_>>Y5@dZ<#gj*R+J$2&S-1*dXk7=Ic zjqk;++de;1`r?`E$jeg1i2Mzpa9gs94gq1K#1G6!EvdaUQY3boUDqWoRNM3Rt;Ks? z|EIDufroPId>lu~1>khSb`Z}t=!`zW%eR6~<(n0XDNNTWf@b}bdxZX%T;np@o~ z(jpSKP@+_Hy(&v?mP+^bo{8~rj4|)&GoP_^zP~ePd(Lw_=l4G;fL^t`kw|tiVN}*L z&USsIm7Jk{c%)>R9*x(!@`lVOub%65yrN#sRP#t;S$u}Rid7@pCX|9Mh#q$0D>wVy z`ks^`e)vp6hryw}6~U=;H&Wd3y($#i=Gfb3f0I37m4Co6CP43!Z(x-N`X5osp1tms ze%c3}6kDxdVi;xvDg5Kk=TLkvqlYWfL@LvboWsVW+U`h~6rz383{`x@j1I34O>A9u z(OF!w(7xw%ab7W5$HpM}K%Mf9$YGm+jk=D;r>mTjH9CcgYjXwbLtab1OI>AUy5g{C zP+qH{X$!n|DOCvC7Z1h zLb#ijLmCEVemlBALG`lx+>j-CJM z{h@xv#Js&KqkRhBOy1ko*g1^9E1Qrp(!v^?%anZ^SMoN$#p>Wa#eciXlWFTD1ES($ zH&V4-ltR*P33%k}#G;=mJh;o#As5=>+aU21_EK|k|9@jb19hYPwg}ym-xdxYfL#h6fHhzqHN zYkcGRSE)zjf>t}WM{V$3mj0`ekRsBM<`vXf`EFyewPD2G@^lO3*a69qCC@P{(GljB zE`En-IER~AWiM9AR!j4{Uk=#yOt;C+#-Op<(;EA!y|FJxLO9WFXBeaS><3EcaP&*( zzo~{Dmbt3xpYxQDABzsC^mB-j_Y4fixsHDJ@(yo#wk?L1;9ELcW8OHntM9o~DYh@8 zuPLcd@fq&(3&k|dQ~tzN!->&}k}9$L;?Dn7wRQCA2?Hg$*v-@qnn$E{Tf&&2xYXs+ z_LD(>AN;Ua#b*3^n-u!hwIU%`r>>7{oU5eb3t#wbl-7!T;3rgjJ92pfS?_rEApy7Y zS9*>cy#}|gS#39hFKYTV!#^#)X~5`sPNONB&!GZCky=_LR?Jg)3KK5)P-{=pn-RD7 z|KV4UFm2h_XU&_LWA-qv&zCnd!%S81{Fg%;N=8@A{_{GzSaQPzz=BLBF>Q^P|%BeNnwjwq79i}r|@D4J&`6WOqN zeY4?>G@M^Cmc%VrU_17)(9zUH(3Np8iJwT-!F6ng7(=exsw5C*3 z$^`UBU)w+AjcY3CzPctu1(Qyh&@|3*@)ERG>GdpMP7qb49B)w7x`l3AJg7h}x;0XH zOs6_OLo-O7?~z)8VTm_**C=p9U)bW;@Ae%!8vjrG)&fz`lo;@0df-oa--Bn=Is4xK z#g*H=;%p+BqtiVPugD@`558mx$YcUuh-p4BSDQ-0sDU59vNdxwQMcM|u4!j8JDY#` z79(TupPA21fk;WyiB1KNgrKIg*_v#(GB2B@A%#i?(d?zypHcFT)lO%(98W6yOD8?n5M)czS{wx5WqGz2>X%9Wh`BayD&NpQEt}Go42UWTnwA<_|%>>Wwvn$^e4>v zR$*TaG$)R%LWU<(G(D&=EHM@W|V)P*a|Qn z4hw+b3E`aZ&|L|Ph28KG?7aw1*qPfsFcbDhMwm-!oR~lMl;&Nk!8XJQb&MP8{HDZk z@nIuXL@4_N7sa1zs|pLiwv~uL@+mF^IG9+%O0bI^qVyq&3ni{R?O;vVhz!xpO5sA2 zlPwu61)H)UQWF_mNO7=eft6tY3qjn5ACL*xp{QoJiP>sQd;1H>C zumXmzaWkg(sYz|Yx`GcxA$*%sF8G{}N5KsPpCLiSqRSQ*W8W6=(*p?eRqY(+kLsBF zECF0j_>T|>v%g_sCZ}r@ymgC^g`4J*x!=fzKLNa*i0Hg+o}&Y=W@mJx1uo<878fG( z+vDkl-FzEfaG9BzS*t|m?iMT2se)iLW5(_odEUJ)I~zW5%Y{PefPe47&D?g75rz66 D613UA literal 0 HcmV?d00001 diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties new file mode 100644 index 00000000..4e1cc9db --- /dev/null +++ b/gradle/wrapper/gradle-wrapper.properties @@ -0,0 +1,5 @@ +distributionBase=GRADLE_USER_HOME +distributionPath=wrapper/dists +distributionUrl=https\://services.gradle.org/distributions/gradle-6.1.1-all.zip +zipStoreBase=GRADLE_USER_HOME +zipStorePath=wrapper/dists diff --git a/gradlew b/gradlew new file mode 100755 index 00000000..2fe81a7d --- /dev/null +++ b/gradlew @@ -0,0 +1,183 @@ +#!/usr/bin/env sh + +# +# Copyright 2015 the original author or authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +############################################################################## +## +## Gradle start up script for UN*X +## +############################################################################## + +# Attempt to set APP_HOME +# Resolve links: $0 may be a link +PRG="$0" +# Need this for relative symlinks. +while [ -h "$PRG" ] ; do + ls=`ls -ld "$PRG"` + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + PRG="$link" + else + PRG=`dirname "$PRG"`"/$link" + fi +done +SAVED="`pwd`" +cd "`dirname \"$PRG\"`/" >/dev/null +APP_HOME="`pwd -P`" +cd "$SAVED" >/dev/null + +APP_NAME="Gradle" +APP_BASE_NAME=`basename "$0"` + +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' + +# Use the maximum available, or set MAX_FD != -1 to use that value. +MAX_FD="maximum" + +warn () { + echo "$*" +} + +die () { + echo + echo "$*" + echo + exit 1 +} + +# OS specific support (must be 'true' or 'false'). +cygwin=false +msys=false +darwin=false +nonstop=false +case "`uname`" in + CYGWIN* ) + cygwin=true + ;; + Darwin* ) + darwin=true + ;; + MINGW* ) + msys=true + ;; + NONSTOP* ) + nonstop=true + ;; +esac + +CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar + +# Determine the Java command to use to start the JVM. +if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + else + JAVACMD="$JAVA_HOME/bin/java" + fi + if [ ! -x "$JAVACMD" ] ; then + die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +else + JAVACMD="java" + which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." +fi + +# Increase the maximum file descriptors if we can. +if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then + MAX_FD_LIMIT=`ulimit -H -n` + if [ $? -eq 0 ] ; then + if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then + MAX_FD="$MAX_FD_LIMIT" + fi + ulimit -n $MAX_FD + if [ $? -ne 0 ] ; then + warn "Could not set maximum file descriptor limit: $MAX_FD" + fi + else + warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" + fi +fi + +# For Darwin, add options to specify how the application appears in the dock +if $darwin; then + GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" +fi + +# For Cygwin or MSYS, switch paths to Windows format before running java +if [ "$cygwin" = "true" -o "$msys" = "true" ] ; then + APP_HOME=`cygpath --path --mixed "$APP_HOME"` + CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` + JAVACMD=`cygpath --unix "$JAVACMD"` + + # We build the pattern for arguments to be converted via cygpath + ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` + SEP="" + for dir in $ROOTDIRSRAW ; do + ROOTDIRS="$ROOTDIRS$SEP$dir" + SEP="|" + done + OURCYGPATTERN="(^($ROOTDIRS))" + # Add a user-defined pattern to the cygpath arguments + if [ "$GRADLE_CYGPATTERN" != "" ] ; then + OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" + fi + # Now convert the arguments - kludge to limit ourselves to /bin/sh + i=0 + for arg in "$@" ; do + CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` + CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option + + if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition + eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` + else + eval `echo args$i`="\"$arg\"" + fi + i=`expr $i + 1` + done + case $i in + 0) set -- ;; + 1) set -- "$args0" ;; + 2) set -- "$args0" "$args1" ;; + 3) set -- "$args0" "$args1" "$args2" ;; + 4) set -- "$args0" "$args1" "$args2" "$args3" ;; + 5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; + 6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; + 7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; + 8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; + 9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; + esac +fi + +# Escape application args +save () { + for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done + echo " " +} +APP_ARGS=`save "$@"` + +# Collect all arguments for the java command, following the shell quoting and substitution rules +eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS" + +exec "$JAVACMD" "$@" diff --git a/gradlew.bat b/gradlew.bat new file mode 100644 index 00000000..9618d8d9 --- /dev/null +++ b/gradlew.bat @@ -0,0 +1,100 @@ +@rem +@rem Copyright 2015 the original author or authors. +@rem +@rem Licensed under the Apache License, Version 2.0 (the "License"); +@rem you may not use this file except in compliance with the License. +@rem You may obtain a copy of the License at +@rem +@rem https://www.apache.org/licenses/LICENSE-2.0 +@rem +@rem Unless required by applicable law or agreed to in writing, software +@rem distributed under the License is distributed on an "AS IS" BASIS, +@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +@rem See the License for the specific language governing permissions and +@rem limitations under the License. +@rem + +@if "%DEBUG%" == "" @echo off +@rem ########################################################################## +@rem +@rem Gradle startup script for Windows +@rem +@rem ########################################################################## + +@rem Set local scope for the variables with windows NT shell +if "%OS%"=="Windows_NT" setlocal + +set DIRNAME=%~dp0 +if "%DIRNAME%" == "" set DIRNAME=. +set APP_BASE_NAME=%~n0 +set APP_HOME=%DIRNAME% + +@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" + +@rem Find java.exe +if defined JAVA_HOME goto findJavaFromJavaHome + +set JAVA_EXE=java.exe +%JAVA_EXE% -version >NUL 2>&1 +if "%ERRORLEVEL%" == "0" goto init + +echo. +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:findJavaFromJavaHome +set JAVA_HOME=%JAVA_HOME:"=% +set JAVA_EXE=%JAVA_HOME%/bin/java.exe + +if exist "%JAVA_EXE%" goto init + +echo. +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:init +@rem Get command-line arguments, handling Windows variants + +if not "%OS%" == "Windows_NT" goto win9xME_args + +:win9xME_args +@rem Slurp the command line arguments. +set CMD_LINE_ARGS= +set _SKIP=2 + +:win9xME_args_slurp +if "x%~1" == "x" goto execute + +set CMD_LINE_ARGS=%* + +:execute +@rem Setup the command line + +set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar + +@rem Execute Gradle +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% + +:end +@rem End local scope for the variables with windows NT shell +if "%ERRORLEVEL%"=="0" goto mainEnd + +:fail +rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of +rem the _cmd.exe /c_ return code! +if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 +exit /b 1 + +:mainEnd +if "%OS%"=="Windows_NT" endlocal + +:omega diff --git a/helm/.helmignore b/helm/.helmignore new file mode 100644 index 00000000..fbe01f88 --- /dev/null +++ b/helm/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ \ No newline at end of file diff --git a/helm/Chart.yaml b/helm/Chart.yaml new file mode 100644 index 00000000..e8793c01 --- /dev/null +++ b/helm/Chart.yaml @@ -0,0 +1,23 @@ +# This Chart.yaml file will act as the template for the "helm package" command. The helm package will set the chart +# version and appVersion. +# Command to package: +# helm package --version --app-version +apiVersion: v2 +name: query-service +description: Query Service Helm Chart + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. The "helm package" command will take care of setting this. +# A new chart will be created for each new version of the service. +version: 0.1.0 + diff --git a/helm/templates/deployment.yaml b/helm/templates/deployment.yaml new file mode 100644 index 00000000..20379496 --- /dev/null +++ b/helm/templates/deployment.yaml @@ -0,0 +1,84 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Chart.Name }} + labels: + release: {{ .Release.Name }} + {{- with .Values.deploymentLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.replicaCount }} + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: {{ .Values.maxUnavailable }} + selector: + matchLabels: + {{- toYaml .Values.deploymentSelectorMatchLabels | nindent 6 }} + template: + metadata: + labels: + release: {{ .Release.Name }} + {{- with .Values.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.containerHealthProbePort }}" + checksum/config: {{ include (print $.Template.BasePath "/query-service-config.yaml") . | sha256sum }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - name: service-config + configMap: + name: {{ .Values.queryServiceConfig.name }} + - name: log4j-config + configMap: + name: {{ .Values.logConfig.name }} + {{- with .Values.nodeLabels }} + nodeSelector: + {{- toYaml . | nindent 8}} + {{- end }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: grpc-port + containerPort: {{ .Values.containerPort }} + protocol: TCP + - name: health-port + containerPort: {{ .Values.containerHealthProbePort }} + protocol: TCP + env: + - name: SERVICE_NAME + value: "{{ .Chart.Name }}" + - name: BOOTSTRAP_CONFIG_URI + value: "file:///app/resources/configs" + - name: LOG4J_CONFIGURATION_FILE + value: "/var/{{ .Chart.Name }}/log/log4j2.properties" + - name: JAVA_TOOL_OPTIONS + value: {{ .Values.javaOpts | quote }} + volumeMounts: + - name: service-config + mountPath: /app/resources/configs/{{ .Chart.Name }}/application.conf + subPath: application.conf + - name: log4j-config + mountPath: /var/{{ .Chart.Name }}/log + livenessProbe: + initialDelaySeconds: {{ int .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ int .Values.livenessProbe.periodSeconds }} + tcpSocket: + port: grpc-port + readinessProbe: + initialDelaySeconds: {{ int .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ int .Values.readinessProbe.periodSeconds }} + httpGet: + path: /health + port: {{ .Values.containerHealthProbePort }} + resources: + {{- toYaml .Values.resources | nindent 12 }} diff --git a/helm/templates/logconfig.yaml b/helm/templates/logconfig.yaml new file mode 100644 index 00000000..2177b482 --- /dev/null +++ b/helm/templates/logconfig.yaml @@ -0,0 +1,44 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Values.logConfig.name }} + labels: + release: {{ .Release.Name }} +data: + log4j2.properties: |- + status = error + name = PropertiesConfig + {{- if .Values.logConfig.monitorInterval}} + monitorInterval = {{ .Values.logConfig.monitorInterval }} + {{- end }} + + appender.console.type = Console + appender.console.name = STDOUT + appender.console.layout.type = PatternLayout + appender.console.layout.pattern = %d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %c{1.} - %msg%n + + {{- if .Values.logConfig.appender.rolling.enabled }} + appender.rolling.type = RollingFile + appender.rolling.name = ROLLING_FILE + appender.rolling.fileName = ${env:SERVICE_NAME:-service}.log + appender.rolling.filePattern = ${env:SERVICE_NAME:-service}-%d{MM-dd-yy-HH-mm-ss}-%i.log.gz + appender.rolling.layout.type = PatternLayout + appender.rolling.layout.pattern = %d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %c{1.} - %msg%n + appender.rolling.policies.type = Policies + appender.rolling.policies.time.type = TimeBasedTriggeringPolicy + appender.rolling.policies.time.interval = 3600 + appender.rolling.policies.time.modulate = true + appender.rolling.policies.size.type = SizeBasedTriggeringPolicy + appender.rolling.policies.size.size = 20MB + appender.rolling.strategy.type = DefaultRolloverStrategy + appender.rolling.strategy.max = 5 + {{- end }} + + rootLogger.level = {{ .Values.logConfig.rootLogger.level }} + rootLogger.appenderRef.stdout.ref = STDOUT + {{- if .Values.logConfig.appender.rolling.enabled }} + rootLogger.appenderRef.rolling.ref = ROLLING_FILE + {{- end }} + loggers = PINOT_HANDLER + logger.PINOT_HANDLER.name = org.hypertrace.core.query.service.pinot.PinotBasedRequestHandler + logger.PINOT_HANDLER.level = INFO diff --git a/helm/templates/query-service-config.yaml b/helm/templates/query-service-config.yaml new file mode 100644 index 00000000..3319c60c --- /dev/null +++ b/helm/templates/query-service-config.yaml @@ -0,0 +1,31 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Values.queryServiceConfig.name }} + labels: + release: {{ .Release.Name }} +data: + application.conf: |- + service.config = { + tenantColumnName = "{{ .Values.queryServiceConfig.data.tenantColumnName }}" + clients = [ + { + type = zookeeper + connectionString = "{{ .Values.queryServiceConfig.data.zookeeperConnectionString }}" + } + ] + {{- if .Values.handlers }} + queryRequestHandlersConfig = [ + {{- range .Values.handlers }} + { +{{ tpl . $ | indent 10 }} + } + {{- end }} + {{- range .Values.extraHandlers }} + { +{{ tpl . $ | indent 10 }} + } + {{- end }} + ] + {{- end }} + } \ No newline at end of file diff --git a/helm/templates/service.yaml b/helm/templates/service.yaml new file mode 100644 index 00000000..e48833d9 --- /dev/null +++ b/helm/templates/service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ .Chart.Name }} + labels: + release: {{ .Release.Name }}s +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: grpc-port + name: grpc-8090 + selector: + {{- toYaml .Values.serviceSelectorLabels | nindent 4 }} diff --git a/helm/values.yaml b/helm/values.yaml new file mode 100644 index 00000000..0e3238f5 --- /dev/null +++ b/helm/values.yaml @@ -0,0 +1,125 @@ +########### +# Deployment and Service +########### +replicaCount: 1 +maxUnavailable: 0 + +image: + repository: hypertrace/query-service + pullPolicy: IfNotPresent + +containerPort: 8090 +containerHealthProbePort: 8091 + +service: + type: ClusterIP + port: 8090 + +imagePullSecrets: {} + +nodeLabels: {} + +javaOpts: "-XX:InitialRAMPercentage=50.0 -XX:MaxRAMPercentage=75.0" + +livenessProbe: + initialDelaySeconds: 10 + periodSeconds: 5 + +readinessProbe: + initialDelaySeconds: 2 + periodSeconds: 5 + +resources: + limits: + cpu: 2 + memory: 768Mi + requests: + cpu: 100m + memory: 768Mi + +deploymentLabels: + app: query-service + +podLabels: + app: query-service + +deploymentSelectorMatchLabels: + app: query-service + +serviceSelectorLabels: + app: query-service + +########### +# Config Maps +########### +queryServiceConfig: + name: query-service-config + data: + zookeeperConnectionString: zookeeper:2181/pinot/my-views + tenantColumnName: tenant_id + +handlers: + - |- + name = trace-view-handler + type = pinot + clientConfig = zookeeper + requestHandlerInfo = { + viewDefinition = { + viewName = rawTraceView + fieldMap = { + "TRACE.id": "trace_id", + "TRACE.name": "transaction_name", + "TRACE.startTime": "start_time_millis", + "TRACE.endTime": "end_time_millis", + "TRACE.transactionName": "transaction_name", + "TRACE.services": "services", + "TRACE.duration": "duration_millis", + "TRACE.numServices": "num_services", + "TRACE.numSpans": "num_spans" + } + } + } + - |- + name = span-event-view-handler + type = pinot + clientConfig = zookeeper + requestHandlerInfo = { + viewDefinition = { + viewName = spanEventView + mapFields = ["tags"] + fieldMap = { + "EVENT.serviceId": "service_id", + "EVENT.serviceName" : "service_name", + "EVENT.apiId" : "api_id", + "EVENT.apiName" : "api_name", + "EVENT.apiTraceId" : "api_trace_id", + "EVENT.id" : "span_id", + "EVENT.startTime": "start_time_millis", + "EVENT.endTime": "end_time_millis", + "EVENT.traceId" : "trace_id", + "EVENT.parentSpanId" : "parent_span_id", + "EVENT.type" : "span_kind", + "EVENT.entryApiId": "entry_api_id", + "EVENT.protocolName": "protocol_name", + "EVENT.statusCode": "status_code", + "EVENT.spanTags" : "tags" + "EVENT.spanRequestUrl" : "request_url", + "EVENT.duration": "duration_millis", + "EVENT.displayEntityName": "display_entity_name", + "EVENT.displaySpanName": "display_span_name", + "EVENT.errorCount": "error_count", + "EVENT.exceptionCount": "exception_count" + } + } + } + +extraHandlers: [] + +logConfig: + name: query-service-log-appender-config + monitorInterval: 30 + rootLogger: + level: INFO + appender: + rolling: + enabled: false diff --git a/query-service-api/README.md b/query-service-api/README.md new file mode 100644 index 00000000..7c5aa702 --- /dev/null +++ b/query-service-api/README.md @@ -0,0 +1,12 @@ +## Generating Golang Client with GRPC support +The client currently can be generated locally by changing following properties in build.gradle.kts: +```kotlin +val generateLocalGoGrpcFiles = true + +path = "/bin/protoc-gen-go" + +``` + +Next run ../gradlew clean build + +The go files are generated in build/generated/source/proto/main/*go directories. diff --git a/query-service-api/build.gradle.kts b/query-service-api/build.gradle.kts new file mode 100644 index 00000000..5a0b2e7c --- /dev/null +++ b/query-service-api/build.gradle.kts @@ -0,0 +1,73 @@ +import com.google.protobuf.gradle.* + +plugins { + `java-library` + id("com.google.protobuf") version "0.8.8" + id("org.hypertrace.publish-plugin") + id("org.hypertrace.jacoco-report-plugin") +} + +val generateLocalGoGrpcFiles = false + +protobuf { + protoc { + artifact = "com.google.protobuf:protoc:3.12.3" + } + plugins { + // Optional: an artifact spec for a protoc plugin, with "grpc" as + // the identifier, which can be referred to in the "plugins" + // container of the "generateProtoTasks" closure. + id("grpc_java") { + artifact = "io.grpc:protoc-gen-grpc-java:1.30.2" + } + + if (generateLocalGoGrpcFiles) { + id("grpc_go") { + path = "/bin/protoc-gen-go" + } + } + } + generateProtoTasks { + ofSourceSet("main").forEach { + it.plugins { + // Apply the "grpc" plugin whose spec is defined above, without options. + id("grpc_java") + + if (generateLocalGoGrpcFiles) { + id("grpc_go") + } + } + it.builtins { + java + + if (generateLocalGoGrpcFiles) { + id("go") + } + } + } + } +} + +sourceSets { + main { + java { + srcDirs("src/main/java", "build/generated/source/proto/main/java", "build/generated/source/proto/main/grpc_java") + } + + proto { + srcDirs("src/main/proto") + } + } +} + +tasks.test { + useJUnitPlatform() +} + +dependencies { + api("io.grpc:grpc-protobuf:1.30.2") + api("io.grpc:grpc-stub:1.30.2") + api("javax.annotation:javax.annotation-api:1.3.2") + + testImplementation("org.junit.jupiter:junit-jupiter:5.6.2") +} diff --git a/query-service-api/src/main/java/org/hypertrace/core/query/service/util/QueryRequestUtil.java b/query-service-api/src/main/java/org/hypertrace/core/query/service/util/QueryRequestUtil.java new file mode 100644 index 00000000..facaf0e2 --- /dev/null +++ b/query-service-api/src/main/java/org/hypertrace/core/query/service/util/QueryRequestUtil.java @@ -0,0 +1,191 @@ +package org.hypertrace.core.query.service.util; + +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import org.hypertrace.core.query.service.api.ColumnIdentifier; +import org.hypertrace.core.query.service.api.Expression; +import org.hypertrace.core.query.service.api.Filter; +import org.hypertrace.core.query.service.api.Function; +import org.hypertrace.core.query.service.api.LiteralConstant; +import org.hypertrace.core.query.service.api.Operator; +import org.hypertrace.core.query.service.api.OrderByExpression; +import org.hypertrace.core.query.service.api.SortOrder; +import org.hypertrace.core.query.service.api.Value; +import org.hypertrace.core.query.service.api.ValueType; + +/** + * Utility methods to easily create {@link org.hypertrace.core.query.service.api.QueryRequest} its + * selections and filters. + */ +public class QueryRequestUtil { + + public static Filter createTimeFilter(String columnName, Operator op, long value) { + ColumnIdentifier.Builder timeColumn = ColumnIdentifier.newBuilder().setColumnName(columnName); + Expression.Builder lhs = Expression.newBuilder().setColumnIdentifier(timeColumn); + + LiteralConstant.Builder constant = + LiteralConstant.newBuilder() + .setValue( + Value.newBuilder().setValueType(ValueType.STRING).setString(String.valueOf(value))); + Expression.Builder rhs = Expression.newBuilder().setLiteral(constant); + return Filter.newBuilder().setLhs(lhs).setOperator(op).setRhs(rhs).build(); + } + + public static Filter.Builder createBetweenTimesFilter( + String columnName, long lower, long higher) { + return Filter.newBuilder() + .setOperator(Operator.AND) + .addChildFilter(createTimeFilter(columnName, Operator.GE, lower)) + .addChildFilter(createTimeFilter(columnName, Operator.LT, higher)); + } + + /** Given a column name, creates and returns an expression to select count(columnName). */ + public static Expression.Builder createCountByColumnSelection(String... columnNames) { + Function.Builder count = + Function.newBuilder() + .setFunctionName("Count") + .addAllArguments( + Arrays.stream(columnNames) + .map( + columnName -> + Expression.newBuilder() + .setColumnIdentifier( + ColumnIdentifier.newBuilder().setColumnName(columnName)) + .build()) + .collect(Collectors.toList())); + return Expression.newBuilder().setFunction(count); + } + + public static Filter.Builder createColumnValueFilter( + String columnName, Operator operator, String value) { + ColumnIdentifier.Builder column = ColumnIdentifier.newBuilder().setColumnName(columnName); + Expression.Builder lhs = Expression.newBuilder().setColumnIdentifier(column); + + LiteralConstant.Builder constant = + LiteralConstant.newBuilder() + .setValue(Value.newBuilder().setValueType(ValueType.STRING).setString(value)); + Expression.Builder rhs = Expression.newBuilder().setLiteral(constant); + return Filter.newBuilder().setLhs(lhs).setOperator(operator).setRhs(rhs); + } + + public static Filter.Builder createBooleanFilter(Operator operator, List childFilters) { + return Filter.newBuilder().setOperator(operator).addAllChildFilter(childFilters); + } + + public static Filter.Builder createValueInFilter(String columnName, Collection values) { + return createValuesOpFilter(columnName, values, Operator.IN); + } + + public static Expression.Builder createColumnExpression(String columnName) { + return Expression.newBuilder() + .setColumnIdentifier(ColumnIdentifier.newBuilder().setColumnName(columnName)); + } + + public static Expression.Builder createFunctionExpression( + String functionName, Expression... expressions) { + Function.Builder functionBuilder = Function.newBuilder().setFunctionName(functionName); + for (Expression e : expressions) { + functionBuilder.addArguments(e); + } + + return Expression.newBuilder().setFunction(functionBuilder); + } + + public static Expression.Builder createStringLiteralExpression(String value) { + LiteralConstant.Builder constant = + LiteralConstant.newBuilder() + .setValue( + Value.newBuilder().setValueType(ValueType.STRING).setString(String.valueOf(value))); + Expression.Builder expression = Expression.newBuilder().setLiteral(constant); + + return expression; + } + + public static Expression.Builder createLongLiteralExpression(Long value) { + LiteralConstant.Builder constant = + LiteralConstant.newBuilder() + .setValue(Value.newBuilder().setValueType(ValueType.LONG).setLong(value)); + Expression.Builder expression = Expression.newBuilder().setLiteral(constant); + + return expression; + } + + public static OrderByExpression.Builder createOrderByExpression( + String columnName, SortOrder order) { + return OrderByExpression.newBuilder() + .setOrder(order) + .setExpression(createColumnExpression(columnName)); + } + + public static Function.Builder createTimeColumnGroupByFunction( + String timeColumn, long periodSecs) { + return Function.newBuilder() + .setFunctionName("dateTimeConvert") + .addArguments(QueryRequestUtil.createColumnExpression(timeColumn)) + .addArguments( + Expression.newBuilder() + .setLiteral( + LiteralConstant.newBuilder() + .setValue( + org.hypertrace.core.query.service.api.Value.newBuilder() + .setString("1:MILLISECONDS:EPOCH")))) + .addArguments( + Expression.newBuilder() + .setLiteral( + LiteralConstant.newBuilder() + .setValue( + org.hypertrace.core.query.service.api.Value.newBuilder() + .setString("1:MILLISECONDS:EPOCH")))) + .addArguments( + Expression.newBuilder() + .setLiteral( + LiteralConstant.newBuilder() + .setValue( + org.hypertrace.core.query.service.api.Value.newBuilder() + .setString(periodSecs + ":SECONDS")))); + } + + public static Filter createValueEQFilter(List idColumns, List idColumnsValues) { + if (idColumns.size() != idColumnsValues.size()) { + throw new IllegalArgumentException( + String.format( + "Literal for composite id column doesn't have required number of values." + + " Invalid idColumnsValues:%s for idColumns:%s", + idColumnsValues, idColumns)); + } + List childFilters = + IntStream.range(0, idColumnsValues.size()) + .mapToObj( + i -> + Filter.newBuilder() + .setLhs(createColumnExpression(idColumns.get(i))) + .setOperator(Operator.EQ) + .setRhs(getLiteralExpression(idColumnsValues.get(i))) + .build()) + .collect(Collectors.toList()); + return Filter.newBuilder().setOperator(Operator.AND).addAllChildFilter(childFilters).build(); + } + + private static Expression.Builder getLiteralExpression(String value) { + return Expression.newBuilder() + .setLiteral( + LiteralConstant.newBuilder() + .setValue(Value.newBuilder().setString(value).setValueType(ValueType.STRING))); + } + + private static Filter.Builder createValuesOpFilter( + String columnName, Collection values, Operator op) { + ColumnIdentifier.Builder column = ColumnIdentifier.newBuilder().setColumnName(columnName); + Expression.Builder lhs = Expression.newBuilder().setColumnIdentifier(column); + + LiteralConstant.Builder constant = + LiteralConstant.newBuilder() + .setValue( + Value.newBuilder().setValueType(ValueType.STRING_ARRAY).addAllStringArray(values)); + Expression.Builder rhs = Expression.newBuilder().setLiteral(constant); + return Filter.newBuilder().setLhs(lhs).setOperator(op).setRhs(rhs); + } +} diff --git a/query-service-api/src/main/proto/query-service.proto b/query-service-api/src/main/proto/query-service.proto new file mode 100644 index 00000000..1990a20d --- /dev/null +++ b/query-service-api/src/main/proto/query-service.proto @@ -0,0 +1,16 @@ +syntax = "proto3"; + +option java_multiple_files = true; +option java_package = "org.hypertrace.core.query.service.api"; +option java_outer_classname = "QueryServiceProto"; + + +package org.hypertrace.core.query.service; + +import "request.proto"; +import "response.proto"; + +service QueryService { + rpc execute (QueryRequest) returns (stream ResultSetChunk) { + } +} diff --git a/query-service-api/src/main/proto/request.proto b/query-service-api/src/main/proto/request.proto new file mode 100644 index 00000000..af963458 --- /dev/null +++ b/query-service-api/src/main/proto/request.proto @@ -0,0 +1,86 @@ +syntax = "proto3"; + +option java_multiple_files = true; +option java_package = "org.hypertrace.core.query.service.api"; +option java_outer_classname = "QueryRequestProto"; + +package org.hypertrace.core.query.service; + +import "value.proto"; + +message Expression { + oneof value { + ColumnIdentifier columnIdentifier = 1; + LiteralConstant literal = 2; + Function function = 3; + OrderByExpression orderBy = 4; + } +} +message QueryRequest { + + repeated string source = 1; + Filter filter = 2; + repeated Expression selection = 3; + repeated Expression aggregation = 4; + repeated Expression groupBy = 5; + repeated OrderByExpression orderBy = 6; + + int32 limit = 7; + int32 offset = 8; + bool distinctSelections = 9; +} + +message Filter { + + Expression lhs = 1; + Operator operator = 2; + Expression rhs = 3; + repeated Filter childFilter = 4; +} + +enum Operator { + AND = 0; + OR = 1; + NOT = 2; + EQ = 3; + NEQ = 4; + IN = 5; + NOT_IN = 6; + RANGE = 7; + GT = 8; + LT = 9; + GE = 10; + LE = 11; + LIKE = 12; + CONTAINS_KEY = 13; + CONTAINS_KEYVALUE = 14; +} + + +message Function { + string functionName = 1; + repeated Expression arguments = 2; + string alias = 3; +} + +message LiteralConstant { + Value value = 1; +} + +message ColumnIdentifier { + + string columnName = 1; + string alias = 2; +} + +message OrderByExpression { + Expression expression = 1; + SortOrder order = 2; +} + +enum SortOrder { + ASC = 0; + DESC = 1; +} + + diff --git a/query-service-api/src/main/proto/response.proto b/query-service-api/src/main/proto/response.proto new file mode 100644 index 00000000..af59beaa --- /dev/null +++ b/query-service-api/src/main/proto/response.proto @@ -0,0 +1,38 @@ +syntax = "proto3"; + +option java_multiple_files = true; +option java_package = "org.hypertrace.core.query.service.api"; +option java_outer_classname = "QueryResponseProto"; + +package org.hypertrace.core.query.service; + +import "value.proto"; + +message ColumnMetadata { + string column_name = 1; + ValueType value_type = 2; + //is the value of type array + bool is_repeated = 3; +} + +message ResultSetMetadata { + repeated ColumnMetadata column_metadata = 1; +} + +message ResultSetChunk { + int32 chunk_id = 1; + bool is_last_chunk = 3; + //only present in the first chunk + ResultSetMetadata result_set_metadata = 4; + repeated Row row = 5; + + //can be in any chunk. + bool hasError = 6; + string errorMessage = 7; +} + + +message Row { + repeated Value column = 1; +} + diff --git a/query-service-api/src/main/proto/value.proto b/query-service-api/src/main/proto/value.proto new file mode 100644 index 00000000..9f98cb8f --- /dev/null +++ b/query-service-api/src/main/proto/value.proto @@ -0,0 +1,48 @@ +syntax = "proto3"; + +option java_multiple_files = true; +option java_package = "org.hypertrace.core.query.service.api"; +option java_outer_classname = "ValueProto"; + +package org.hypertrace.core.query.service; + +enum ValueType { + STRING = 0; + LONG = 1; + INT = 2; + FLOAT = 3; + DOUBLE = 4; + BYTES = 5; + BOOL = 6; + TIMESTAMP = 7; + STRING_ARRAY = 8; + LONG_ARRAY = 9; + INT_ARRAY = 10; + FLOAT_ARRAY = 11; + DOUBLE_ARRAY = 12; + BYTES_ARRAY = 13; + BOOLEAN_ARRAY = 14; + // assumes that key is always string + STRING_MAP = 15; +} + +message Value { + ValueType valueType = 1; + + string string = 3; + int64 long = 4; + int32 int = 5; + float float = 6; + double double = 7; + bytes bytes = 8; + bool boolean = 9; + sfixed64 timestamp = 15; + repeated string string_array = 16; + repeated int64 long_array = 17; + repeated int32 int_array = 18; + repeated float float_array = 19; + repeated double double_array = 20; + repeated bytes bytes_array = 21; + repeated bool boolean_array = 22; + map string_map = 23; +} diff --git a/query-service-api/src/test/java/org/hypertrace/core/query/service/util/QueryRequestUtilTest.java b/query-service-api/src/test/java/org/hypertrace/core/query/service/util/QueryRequestUtilTest.java new file mode 100644 index 00000000..204d182f --- /dev/null +++ b/query-service-api/src/test/java/org/hypertrace/core/query/service/util/QueryRequestUtilTest.java @@ -0,0 +1,54 @@ +package org.hypertrace.core.query.service.util; + +import org.hypertrace.core.query.service.api.ColumnIdentifier; +import org.hypertrace.core.query.service.api.Expression; +import org.hypertrace.core.query.service.api.Filter; +import org.hypertrace.core.query.service.api.LiteralConstant; +import org.hypertrace.core.query.service.api.Operator; +import org.hypertrace.core.query.service.api.Value; +import org.hypertrace.core.query.service.api.ValueType; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + +public class QueryRequestUtilTest { + @Test + public void testCreateBetweenTimesFilter() { + Filter.Builder timeFilter = + QueryRequestUtil.createBetweenTimesFilter("API.startTime", 20L, 30L); + Assertions.assertEquals( + Filter.newBuilder() + .setOperator(Operator.AND) + .addChildFilter( + Filter.newBuilder() + .setLhs( + Expression.newBuilder() + .setColumnIdentifier( + ColumnIdentifier.newBuilder().setColumnName("API.startTime"))) + .setOperator(Operator.GE) + .setRhs( + Expression.newBuilder() + .setLiteral( + LiteralConstant.newBuilder() + .setValue( + Value.newBuilder() + .setValueType(ValueType.STRING) + .setString("20"))))) + .addChildFilter( + Filter.newBuilder() + .setLhs( + Expression.newBuilder() + .setColumnIdentifier( + ColumnIdentifier.newBuilder().setColumnName("API.startTime"))) + .setOperator(Operator.LT) + .setRhs( + Expression.newBuilder() + .setLiteral( + LiteralConstant.newBuilder() + .setValue( + Value.newBuilder() + .setValueType(ValueType.STRING) + .setString("30"))))) + .build(), + timeFilter.build()); + } +} diff --git a/query-service-client/build.gradle.kts b/query-service-client/build.gradle.kts new file mode 100644 index 00000000..9a489c9c --- /dev/null +++ b/query-service-client/build.gradle.kts @@ -0,0 +1,16 @@ +plugins { + `java-library` + jacoco + id("org.hypertrace.publish-plugin") + id("org.hypertrace.jacoco-report-plugin") +} + +dependencies { + api(project(":query-service-api")) + implementation("org.hypertrace.core.grpcutils:grpc-client-utils:0.1.0") + + // Logging + implementation("org.slf4j:slf4j-api:1.7.30") + // Config + implementation("com.typesafe:config:1.3.2") +} diff --git a/query-service-client/src/main/java/org/hypertrace/core/query/service/client/QueryServiceClient.java b/query-service-client/src/main/java/org/hypertrace/core/query/service/client/QueryServiceClient.java new file mode 100644 index 00000000..1a2e2082 --- /dev/null +++ b/query-service-client/src/main/java/org/hypertrace/core/query/service/client/QueryServiceClient.java @@ -0,0 +1,52 @@ +package org.hypertrace.core.query.service.client; + +import io.grpc.Deadline; +import io.grpc.ManagedChannel; +import io.grpc.ManagedChannelBuilder; +import java.util.Iterator; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import org.hypertrace.core.grpcutils.client.GrpcClientRequestContextUtil; +import org.hypertrace.core.grpcutils.client.RequestContextClientCallCredsProviderFactory; +import org.hypertrace.core.query.service.api.QueryRequest; +import org.hypertrace.core.query.service.api.QueryServiceGrpc; +import org.hypertrace.core.query.service.api.QueryServiceGrpc.QueryServiceBlockingStub; +import org.hypertrace.core.query.service.api.ResultSetChunk; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class QueryServiceClient { + private static final Logger LOG = LoggerFactory.getLogger(QueryServiceClient.class); + /** + * Since Pinot truncates the GroupBy results to 10, we need to set higher value when we need more + * values than 10 or all results. We might need to increase it to even higher but starting with a + * reasonably small value. + */ + public static final int DEFAULT_QUERY_SERVICE_GROUP_BY_LIMIT = 10000; + + private final QueryServiceBlockingStub queryServiceClient; + + public QueryServiceClient(QueryServiceConfig queryServiceConfig) { + ManagedChannel managedChannel = + ManagedChannelBuilder.forAddress( + queryServiceConfig.getQueryServiceHost(), queryServiceConfig.getQueryServicePort()) + .usePlaintext() + .build(); + queryServiceClient = + QueryServiceGrpc.newBlockingStub(managedChannel) + .withCallCredentials( + RequestContextClientCallCredsProviderFactory.getClientCallCredsProvider().get()); + } + + public Iterator executeQuery( + QueryRequest request, Map context, int timeoutMillis) { + LOG.debug( + "Sending query to query service with timeout: {}, and request: {}", timeoutMillis, request); + return GrpcClientRequestContextUtil.executeWithHeadersContext( + context, + () -> + queryServiceClient + .withDeadline(Deadline.after(timeoutMillis, TimeUnit.MILLISECONDS)) + .execute(request)); + } +} diff --git a/query-service-client/src/main/java/org/hypertrace/core/query/service/client/QueryServiceConfig.java b/query-service-client/src/main/java/org/hypertrace/core/query/service/client/QueryServiceConfig.java new file mode 100644 index 00000000..ecb05e70 --- /dev/null +++ b/query-service-client/src/main/java/org/hypertrace/core/query/service/client/QueryServiceConfig.java @@ -0,0 +1,27 @@ +package org.hypertrace.core.query.service.client; + +import com.typesafe.config.Config; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** Config object used to pass the QueryService details that are to be used by the EntityGateway. */ +public class QueryServiceConfig { + private static final Logger LOG = LoggerFactory.getLogger(QueryServiceConfig.class); + + private final String queryServiceHost; + private final int queryServicePort; + + public QueryServiceConfig(Config config) { + LOG.info(config.toString()); + this.queryServiceHost = config.getString("host"); + this.queryServicePort = config.getInt("port"); + } + + public String getQueryServiceHost() { + return this.queryServiceHost; + } + + public int getQueryServicePort() { + return queryServicePort; + } +} diff --git a/query-service-impl/README.md b/query-service-impl/README.md new file mode 100644 index 00000000..35e4f3c4 --- /dev/null +++ b/query-service-impl/README.md @@ -0,0 +1,17 @@ +# Query Service + +Run it with: + +../gradlew run + +Test the client using integration test (this requires E2E platform setup with sample data): + +sh run-integration-tests.sh + +Sample result: +``` +[Test worker] INFO org.hypertrace.core.query.service.QueryClientTest - traceId: "G\025\235\255O\306i\270\225\332wL\036\231\245\234" +spanId: "+q\346|\2175\245\207" +process: "{service_name=frontend, tags=[]}" +operationName: "Sent.hipstershop.ProductCatalogService.ListProducts" +``` diff --git a/query-service-impl/build.gradle.kts b/query-service-impl/build.gradle.kts new file mode 100644 index 00000000..28ab1ad6 --- /dev/null +++ b/query-service-impl/build.gradle.kts @@ -0,0 +1,42 @@ +plugins { + `java-library` + jacoco + id("org.hypertrace.jacoco-report-plugin") +} + +tasks.test { + useJUnitPlatform() +} + +dependencies { + constraints { + implementation("com.fasterxml.jackson.core:jackson-databind:2.11.0") { + because("Deserialization of Untrusted Data [High Severity][https://snyk.io/vuln/SNYK-JAVA-COMFASTERXMLJACKSONCORE-561587] in com.fasterxml.jackson.core:jackson-databind@2.9.8\n" + + " used by org.apache.pinot:pinot-java-client") + } + implementation("io.netty:netty:3.10.3.Final") { + because("HTTP Request Smuggling [Medium Severity][https://snyk.io/vuln/SNYK-JAVA-IONETTY-473694] in io.netty:netty@3.9.6.Final\n" + + " introduced by org.apache.pinot:pinot-java-client") + } + implementation("org.apache.zookeeper:zookeeper:3.6.1") { + because("Authentication Bypass [High Severity][https://snyk.io/vuln/SNYK-JAVA-ORGAPACHEZOOKEEPER-32301] in org.apache.zookeeper:zookeeper@3.4.6\n" + + " introduced by org.apache.pinot:pinot-java-client") + } + implementation("commons-codec:commons-codec:1.13") { + because("Information Exposure [Low Severity][https://snyk.io/vuln/SNYK-JAVA-COMMONSCODEC-561518] in commons-codec:commons-codec@1.11" + + " introduced org.apache.httpcomponents:httpclient@4.5.12") + } + } + api(project(":query-service-api")) + implementation("org.hypertrace.core.grpcutils:grpc-context-utils:0.1.0") + implementation("org.apache.pinot:pinot-java-client:0.3.0") { + // We want to use log4j2 impl so exclude the log4j binding of slf4j + exclude("org.slf4j", "slf4j-log4j12") + } + implementation("org.slf4j:slf4j-api:1.7.30") + implementation("com.typesafe:config:1.3.2") + + testImplementation(project(":query-service-api")) + testImplementation("org.junit.jupiter:junit-jupiter:5.6.2") + testImplementation("org.mockito:mockito-core:3.3.3") +} diff --git a/query-service-impl/config.yml b/query-service-impl/config.yml new file mode 100644 index 00000000..0d047ff4 --- /dev/null +++ b/query-service-impl/config.yml @@ -0,0 +1,11 @@ +logging: + level: INFO + loggers: + org.hypertrace.core.query.service: INFO +server: + applicationConnectors: + - type: http + port: 8080 + adminConnectors: + - type: http + port: 8081 diff --git a/query-service-impl/src/main/java/org/hypertrace/core/query/service/QueryContext.java b/query-service-impl/src/main/java/org/hypertrace/core/query/service/QueryContext.java new file mode 100644 index 00000000..575ee486 --- /dev/null +++ b/query-service-impl/src/main/java/org/hypertrace/core/query/service/QueryContext.java @@ -0,0 +1,18 @@ +package org.hypertrace.core.query.service; + +/** + * Class to hold context for a query from the incoming request. We maintain a separate class for + * QueryService so that the context for this service can evolve independent from the platform + * RequestContext class. + */ +public class QueryContext { + private final String tenantId; + + public QueryContext(String tenantId) { + this.tenantId = tenantId; + } + + public String getTenantId() { + return tenantId; + } +} diff --git a/query-service-impl/src/main/java/org/hypertrace/core/query/service/QueryCost.java b/query-service-impl/src/main/java/org/hypertrace/core/query/service/QueryCost.java new file mode 100644 index 00000000..86908a62 --- /dev/null +++ b/query-service-impl/src/main/java/org/hypertrace/core/query/service/QueryCost.java @@ -0,0 +1,32 @@ +package org.hypertrace.core.query.service; + +public class QueryCost { + + /** + * Return the cost to evaluate the request. + * + * @return -1 means it cannot handle the request else 0 (super fast) to 1 very expensive + */ + double cost; + /** + * Allows the request handler to return additional context as part of RequestHandler.canHandle + * method in RequestHandler. This will be passed in to the RequestHandler.handleRequest + */ + Object context; + + public double getCost() { + return cost; + } + + public void setCost(double cost) { + this.cost = cost; + } + + public Object getContext() { + return context; + } + + public void setContext(Object context) { + this.context = context; + } +} diff --git a/query-service-impl/src/main/java/org/hypertrace/core/query/service/QueryResultCollector.java b/query-service-impl/src/main/java/org/hypertrace/core/query/service/QueryResultCollector.java new file mode 100644 index 00000000..3b6af59e --- /dev/null +++ b/query-service-impl/src/main/java/org/hypertrace/core/query/service/QueryResultCollector.java @@ -0,0 +1,15 @@ +package org.hypertrace.core.query.service; + +/** Interface which is passed as a callback to {@link RequestHandler} */ +public interface QueryResultCollector { + + /** + * Collect and handle the response received in T. + * + * @param t One of the items in the result. + */ + void collect(T t); + + /** Finish collecting all the results and wrap up the query. */ + void finish(); +} diff --git a/query-service-impl/src/main/java/org/hypertrace/core/query/service/QueryServiceImpl.java b/query-service-impl/src/main/java/org/hypertrace/core/query/service/QueryServiceImpl.java new file mode 100644 index 00000000..98afe6e7 --- /dev/null +++ b/query-service-impl/src/main/java/org/hypertrace/core/query/service/QueryServiceImpl.java @@ -0,0 +1,108 @@ +package org.hypertrace.core.query.service; + +import com.google.common.base.Preconditions; +import com.typesafe.config.Config; +import io.grpc.Status; +import io.grpc.stub.StreamObserver; +import java.util.HashMap; +import java.util.Map; +import java.util.NoSuchElementException; +import java.util.stream.Collectors; +import org.hypertrace.core.grpcutils.context.RequestContext; +import org.hypertrace.core.query.service.QueryServiceImplConfig.ClientConfig; +import org.hypertrace.core.query.service.QueryServiceImplConfig.RequestHandlerConfig; +import org.hypertrace.core.query.service.api.QueryRequest; +import org.hypertrace.core.query.service.api.QueryServiceGrpc; +import org.hypertrace.core.query.service.api.ResultSetChunk; +import org.hypertrace.core.query.service.pinot.PinotBasedRequestHandler; +import org.hypertrace.core.query.service.pinot.PinotClientFactory; +import org.hypertrace.core.query.service.pinot.ViewDefinition; + +public class QueryServiceImpl extends QueryServiceGrpc.QueryServiceImplBase { + + private static final org.slf4j.Logger LOG = + org.slf4j.LoggerFactory.getLogger(QueryServiceImpl.class); + + private final RequestHandlerSelector selector; + + public QueryServiceImpl(QueryServiceImplConfig config) { + Map clientConfigMap = + config.getClients().stream() + .map(ClientConfig::parse) + .collect(Collectors.toMap(ClientConfig::getType, clientConfig -> clientConfig)); + String tenantColumnName = config.getTenantColumnName(); + + if (tenantColumnName == null || tenantColumnName.isBlank()) { + throw new RuntimeException( + "Tenant column name is not defined. Need to set service.config.tenantColumnName in the application config."); + } + + for (Config requestHandlerConfig : config.getQueryRequestHandlersConfig()) { + initRequestHandler( + RequestHandlerConfig.parse(requestHandlerConfig), clientConfigMap, tenantColumnName); + } + selector = new RequestHandlerSelector(RequestHandlerRegistry.get()); + } + + private void initRequestHandler( + RequestHandlerConfig config, + Map clientConfigMap, + String tenantColumnName) { + + // Register Pinot RequestHandler + if ("pinot".equals(config.getType())) { + Map requestHandlerInfoConf = new HashMap<>(); + requestHandlerInfoConf.put( + PinotBasedRequestHandler.VIEW_DEFINITION_CONFIG_KEY, + ViewDefinition.parse( + (Map) + config + .getRequestHandlerInfo() + .get(PinotBasedRequestHandler.VIEW_DEFINITION_CONFIG_KEY), + tenantColumnName)); + RequestHandlerRegistry.get() + .register( + config.getName(), + new RequestHandlerInfo( + config.getName(), PinotBasedRequestHandler.class, requestHandlerInfoConf)); + } else { + throw new UnsupportedOperationException( + "Unsupported RequestHandler type - " + config.getType()); + } + + // Register Pinot Client + ClientConfig clientConfig = clientConfigMap.get(config.getClientConfig()); + Preconditions.checkNotNull(clientConfig); + PinotClientFactory.createPinotClient( + config.getName(), clientConfig.getType(), clientConfig.getConnectionString()); + } + + @Override + public void execute(QueryRequest queryRequest, StreamObserver responseObserver) { + try { + RequestAnalyzer analyzer = new RequestAnalyzer(queryRequest); + analyzer.analyze(); + RequestHandler requestHandler = selector.select(queryRequest, analyzer); + if (requestHandler == null) { + // An error is logged in the select() method + responseObserver.onError( + Status.NOT_FOUND + .withDescription("Could not find any handler to handle the request") + .asException()); + return; + } + + ResultSetChunkCollector collector = new ResultSetChunkCollector(responseObserver); + collector.init(analyzer.getResultSetMetadata()); + + String tenantId = RequestContext.CURRENT.get().getTenantId().get(); + requestHandler.handleRequest(new QueryContext(tenantId), queryRequest, collector, analyzer); + } catch (NoSuchElementException e) { + LOG.error("TenantId is missing in the context.", e); + responseObserver.onError(e); + } catch (Exception e) { + LOG.error("Error processing request: {}", queryRequest, e); + responseObserver.onError(e); + } + } +} diff --git a/query-service-impl/src/main/java/org/hypertrace/core/query/service/QueryServiceImplConfig.java b/query-service-impl/src/main/java/org/hypertrace/core/query/service/QueryServiceImplConfig.java new file mode 100644 index 00000000..179d0742 --- /dev/null +++ b/query-service-impl/src/main/java/org/hypertrace/core/query/service/QueryServiceImplConfig.java @@ -0,0 +1,110 @@ +package org.hypertrace.core.query.service; + +import com.typesafe.config.Config; +import com.typesafe.config.ConfigBeanFactory; +import java.util.List; +import java.util.Map; + +public class QueryServiceImplConfig { + private String tenantColumnName; + private List clients; + private List queryRequestHandlersConfig; + + public static QueryServiceImplConfig parse(Config config) { + return ConfigBeanFactory.create(config, QueryServiceImplConfig.class); + } + + public String getTenantColumnName() { + return tenantColumnName; + } + + public List getClients() { + return this.clients; + } + + public void setClients(List clients) { + this.clients = clients; + } + + public List getQueryRequestHandlersConfig() { + return this.queryRequestHandlersConfig; + } + + public void setQueryRequestHandlersConfig(List queryRequestHandlersConfig) { + this.queryRequestHandlersConfig = queryRequestHandlersConfig; + } + + public void setTenantColumnName(String tenantColumnName) { + this.tenantColumnName = tenantColumnName; + } + + public static class RequestHandlerConfig { + + private String name; + private String type; + private String clientConfig; + private Map requestHandlerInfo; + + public static RequestHandlerConfig parse(Config config) { + return ConfigBeanFactory.create(config, RequestHandlerConfig.class); + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getType() { + return type; + } + + public void setType(String type) { + this.type = type; + } + + public String getClientConfig() { + return clientConfig; + } + + public void setClientConfig(String clientConfig) { + this.clientConfig = clientConfig; + } + + public Map getRequestHandlerInfo() { + return requestHandlerInfo; + } + + public void setRequestHandlerInfo(Map requestHandlerInfo) { + this.requestHandlerInfo = requestHandlerInfo; + } + } + + public static class ClientConfig { + + private String type; + private String connectionString; + + public static ClientConfig parse(Config config) { + return ConfigBeanFactory.create(config, ClientConfig.class); + } + + public String getType() { + return type; + } + + public void setType(String type) { + this.type = type; + } + + public String getConnectionString() { + return connectionString; + } + + public void setConnectionString(String connectionString) { + this.connectionString = connectionString; + } + } +} diff --git a/query-service-impl/src/main/java/org/hypertrace/core/query/service/RequestAnalyzer.java b/query-service-impl/src/main/java/org/hypertrace/core/query/service/RequestAnalyzer.java new file mode 100644 index 00000000..3cf894ab --- /dev/null +++ b/query-service-impl/src/main/java/org/hypertrace/core/query/service/RequestAnalyzer.java @@ -0,0 +1,170 @@ +package org.hypertrace.core.query.service; + +import java.util.ArrayList; +import java.util.HashSet; +import java.util.LinkedHashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Set; +import org.hypertrace.core.query.service.api.ColumnIdentifier; +import org.hypertrace.core.query.service.api.ColumnMetadata; +import org.hypertrace.core.query.service.api.Expression; +import org.hypertrace.core.query.service.api.Expression.ValueCase; +import org.hypertrace.core.query.service.api.Filter; +import org.hypertrace.core.query.service.api.Function; +import org.hypertrace.core.query.service.api.OrderByExpression; +import org.hypertrace.core.query.service.api.QueryRequest; +import org.hypertrace.core.query.service.api.ResultSetMetadata; +import org.hypertrace.core.query.service.api.ValueType; + +public class RequestAnalyzer { + + private QueryRequest request; + private Set referencedColumns; + private LinkedHashSet selectedColumns; + private ResultSetMetadata resultSetMetadata; + // Contains all selections to be made in the DB: selections on group by, single columns and + // aggregations in that order. + // There should be a one-to-one mapping between this and the columnMetadataSet in + // ResultSetMetadata. + // The difference between this and selectedColumns above is that this is a set of Expressions + // while the selectedColumns + // is a set of column names. + private final LinkedHashSet allSelections; + + public RequestAnalyzer(QueryRequest request) { + this.request = request; + this.selectedColumns = new LinkedHashSet<>(); + this.allSelections = new LinkedHashSet<>(); + } + + public void analyze() { + List filterColumns = new ArrayList<>(); + LinkedList filterQueue = new LinkedList<>(); + filterQueue.add(request.getFilter()); + while (!filterQueue.isEmpty()) { + Filter filter = filterQueue.pop(); + if (filter.getChildFilterCount() > 0) { + for (Filter childFilter : filter.getChildFilterList()) { + filterQueue.add(childFilter); + } + } else { + extractColumns(filterColumns, filter.getLhs()); + extractColumns(filterColumns, filter.getRhs()); + } + } + List postFilterColumns = new ArrayList<>(); + List selectedList = new ArrayList<>(); + LinkedHashSet columnMetadataSet = new LinkedHashSet<>(); + + // group by columns must be first in the response + if (request.getGroupByCount() > 0) { + for (Expression expression : request.getGroupByList()) { + extractColumns(postFilterColumns, expression); + columnMetadataSet.add(toColumnMetadata(expression)); + allSelections.add(expression); + } + } + if (request.getSelectionCount() > 0) { + for (Expression expression : request.getSelectionList()) { + extractColumns(selectedList, expression); + postFilterColumns.addAll(selectedList); + columnMetadataSet.add(toColumnMetadata(expression)); + allSelections.add(expression); + } + } + if (request.getAggregationCount() > 0) { + for (Expression expression : request.getAggregationList()) { + extractColumns(postFilterColumns, expression); + columnMetadataSet.add(toColumnMetadata(expression)); + allSelections.add(expression); + } + } + + referencedColumns = new HashSet<>(); + referencedColumns.addAll(filterColumns); + referencedColumns.addAll(postFilterColumns); + resultSetMetadata = + ResultSetMetadata.newBuilder().addAllColumnMetadata(columnMetadataSet).build(); + selectedColumns.addAll(selectedList); + } + + private ColumnMetadata toColumnMetadata(Expression expression) { + ColumnMetadata.Builder builder = ColumnMetadata.newBuilder(); + ValueCase valueCase = expression.getValueCase(); + switch (valueCase) { + case COLUMNIDENTIFIER: + ColumnIdentifier columnIdentifier = expression.getColumnIdentifier(); + String alias = columnIdentifier.getAlias(); + if (alias != null && alias.trim().length() > 0) { + builder.setColumnName(alias); + } else { + builder.setColumnName(columnIdentifier.getColumnName()); + } + builder.setValueType(ValueType.STRING); + builder.setIsRepeated(false); + break; + case LITERAL: + break; + case FUNCTION: + Function function = expression.getFunction(); + alias = function.getAlias(); + if (alias != null && alias.trim().length() > 0) { + builder.setColumnName(alias); + } else { + // todo: handle recursive functions max(rollup(time,50) + // workaround is to use alias for now + builder.setColumnName(function.getFunctionName()); + } + builder.setValueType(ValueType.STRING); + builder.setIsRepeated(false); + break; + case ORDERBY: + break; + case VALUE_NOT_SET: + break; + } + return builder.build(); + } + + private void extractColumns(List columns, Expression expression) { + ValueCase valueCase = expression.getValueCase(); + switch (valueCase) { + case COLUMNIDENTIFIER: + ColumnIdentifier columnIdentifier = expression.getColumnIdentifier(); + columns.add(columnIdentifier.getColumnName()); + break; + case LITERAL: + // no columns + break; + case FUNCTION: + Function function = expression.getFunction(); + for (Expression childExpression : function.getArgumentsList()) { + extractColumns(columns, childExpression); + } + break; + case ORDERBY: + OrderByExpression orderBy = expression.getOrderBy(); + extractColumns(columns, orderBy.getExpression()); + break; + case VALUE_NOT_SET: + break; + } + } + + public Set getReferencedColumns() { + return referencedColumns; + } + + public ResultSetMetadata getResultSetMetadata() { + return resultSetMetadata; + } + + public LinkedHashSet getSelectedColumns() { + return selectedColumns; + } + + public LinkedHashSet getAllSelections() { + return this.allSelections; + } +} diff --git a/query-service-impl/src/main/java/org/hypertrace/core/query/service/RequestHandler.java b/query-service-impl/src/main/java/org/hypertrace/core/query/service/RequestHandler.java new file mode 100644 index 00000000..1409edb9 --- /dev/null +++ b/query-service-impl/src/main/java/org/hypertrace/core/query/service/RequestHandler.java @@ -0,0 +1,22 @@ +package org.hypertrace.core.query.service; + +import java.util.Map; +import java.util.Set; +import org.hypertrace.core.query.service.api.QueryRequest; + +public interface RequestHandler { + + /** Get the name of Request Handler */ + String getName(); + + QueryCost canHandle(T request, Set referencedSources, Set referencedColumns); + + /** Handle the request and add rows to the collector. */ + void handleRequest( + QueryContext queryContext, + QueryRequest request, + QueryResultCollector collector, + RequestAnalyzer requestAnalyzer); + + void init(String name, Map config); +} diff --git a/query-service-impl/src/main/java/org/hypertrace/core/query/service/RequestHandlerInfo.java b/query-service-impl/src/main/java/org/hypertrace/core/query/service/RequestHandlerInfo.java new file mode 100644 index 00000000..20a8fd7d --- /dev/null +++ b/query-service-impl/src/main/java/org/hypertrace/core/query/service/RequestHandlerInfo.java @@ -0,0 +1,34 @@ +package org.hypertrace.core.query.service; + +import java.util.Map; + +public class RequestHandlerInfo { + + private String name; + + private Class requestHandlerClazz; + + // todo:change to concrete class later + private Map config; + + public RequestHandlerInfo( + String name, + Class requestHandlerClazz, + Map config) { + this.name = name; + this.requestHandlerClazz = requestHandlerClazz; + this.config = config; + } + + public String getName() { + return name; + } + + public Class getRequestHandlerClazz() { + return requestHandlerClazz; + } + + public Map getConfig() { + return config; + } +} diff --git a/query-service-impl/src/main/java/org/hypertrace/core/query/service/RequestHandlerRegistry.java b/query-service-impl/src/main/java/org/hypertrace/core/query/service/RequestHandlerRegistry.java new file mode 100644 index 00000000..b4956179 --- /dev/null +++ b/query-service-impl/src/main/java/org/hypertrace/core/query/service/RequestHandlerRegistry.java @@ -0,0 +1,34 @@ +package org.hypertrace.core.query.service; + +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; +import org.slf4j.LoggerFactory; + +public class RequestHandlerRegistry { + + private static final org.slf4j.Logger LOG = LoggerFactory.getLogger(RequestHandlerRegistry.class); + + Map requestHandlerInfoMap = new HashMap<>(); + + private static final RequestHandlerRegistry INSTANCE = new RequestHandlerRegistry(); + + private RequestHandlerRegistry() {} + + public boolean register(String handlerName, RequestHandlerInfo requestHandlerInfo) { + if (requestHandlerInfoMap.containsKey(handlerName)) { + LOG.error("RequestHandlerInfo registration failed. Duplicate Handler:{} ", handlerName); + return false; + } + requestHandlerInfoMap.put(handlerName, requestHandlerInfo); + return true; + } + + public Collection getAll() { + return requestHandlerInfoMap.values(); + } + + public static RequestHandlerRegistry get() { + return INSTANCE; + } +} diff --git a/query-service-impl/src/main/java/org/hypertrace/core/query/service/RequestHandlerSelector.java b/query-service-impl/src/main/java/org/hypertrace/core/query/service/RequestHandlerSelector.java new file mode 100644 index 00000000..6bdf0fbb --- /dev/null +++ b/query-service-impl/src/main/java/org/hypertrace/core/query/service/RequestHandlerSelector.java @@ -0,0 +1,76 @@ +package org.hypertrace.core.query.service; + +import java.lang.reflect.Constructor; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import org.hypertrace.core.query.service.api.QueryRequest; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class RequestHandlerSelector { + + private static final Logger LOG = LoggerFactory.getLogger(RequestHandlerSelector.class); + + List requestHandlers = new ArrayList<>(); + + public RequestHandlerSelector(List requestHandlers) { + this.requestHandlers = requestHandlers; + } + + public RequestHandlerSelector(RequestHandlerRegistry registry) { + Collection requestHandlerInfoList = registry.getAll(); + for (RequestHandlerInfo requestHandlerInfo : requestHandlerInfoList) { + try { + Constructor constructor = + requestHandlerInfo.getRequestHandlerClazz().getConstructor(new Class[] {}); + RequestHandler requestHandler = constructor.newInstance(); + requestHandler.init(requestHandlerInfo.getName(), requestHandlerInfo.getConfig()); + requestHandlers.add(requestHandler); + } catch (Exception e) { + LOG.error("Error initializing request Handler:{}", requestHandlerInfo, e); + } + } + } + + public RequestHandler select(QueryRequest request, RequestAnalyzer analyzer) { + + // check if each of the requestHandler can handle the request and return the cost of serving + // that query + double minCost = Double.MAX_VALUE; + RequestHandler selectedHandler = null; + Set referencedColumns = analyzer.getReferencedColumns(); + Set referencedSources = new HashSet<>(request.getSourceList()); + for (RequestHandler requestHandler : requestHandlers) { + QueryCost queryCost = requestHandler.canHandle(request, referencedSources, referencedColumns); + double cost = queryCost.getCost(); + if (LOG.isDebugEnabled()) { + LOG.debug("Request handler: {}, query cost: {}", requestHandler.getName(), cost); + } + if (cost >= 0 && cost < minCost) { + minCost = cost; + selectedHandler = requestHandler; + } + } + + if (selectedHandler != null) { + if (LOG.isDebugEnabled()) { + LOG.debug( + "Selected requestHandler: {} for the query: {}; referencedColumns: {}, cost: {}", + selectedHandler.getName(), + request, + referencedColumns, + minCost); + } + } else { + LOG.error( + "No requestHandler for the query: {}; referencedColumns: {}, cost: {}", + request, + referencedColumns, + minCost); + } + return selectedHandler; + } +} diff --git a/query-service-impl/src/main/java/org/hypertrace/core/query/service/ResultSetChunkCollector.java b/query-service-impl/src/main/java/org/hypertrace/core/query/service/ResultSetChunkCollector.java new file mode 100644 index 00000000..7d1f282d --- /dev/null +++ b/query-service-impl/src/main/java/org/hypertrace/core/query/service/ResultSetChunkCollector.java @@ -0,0 +1,65 @@ +package org.hypertrace.core.query.service; + +import io.grpc.stub.StreamObserver; +import java.io.PrintWriter; +import java.io.StringWriter; +import org.hypertrace.core.query.service.api.ResultSetChunk; +import org.hypertrace.core.query.service.api.ResultSetMetadata; +import org.hypertrace.core.query.service.api.Row; + +public class ResultSetChunkCollector implements QueryResultCollector { + + private static int DEFAULT_CHUNK_SIZE = 10000; // 10k rows + private StreamObserver grpcObserver; + private int maxChunkSize; + private int currentChunkSize; + private int chunkId; + private ResultSetChunk.Builder currentBuilder; + + public ResultSetChunkCollector(StreamObserver grpcObserver) { + this(grpcObserver, DEFAULT_CHUNK_SIZE); + } + + public ResultSetChunkCollector(StreamObserver grpcObserver, int chunkSize) { + this.grpcObserver = grpcObserver; + this.maxChunkSize = chunkSize; + this.chunkId = 0; + this.currentBuilder = ResultSetChunk.newBuilder(); + currentBuilder.setChunkId(this.chunkId); + } + + public void init(ResultSetMetadata metadata) { + currentBuilder.setResultSetMetadata(metadata); + } + + public void collect(Row row) { + currentBuilder.addRow(row); + currentChunkSize++; + if (currentChunkSize >= maxChunkSize) { + ResultSetChunk resultSetChunk = currentBuilder.build(); + grpcObserver.onNext(resultSetChunk); + currentBuilder.clear(); + chunkId = chunkId + 1; + currentChunkSize = 0; + currentBuilder.setChunkId(chunkId); + } + } + + public void error(Throwable t) { + currentBuilder.setIsLastChunk(true); + currentBuilder.setHasError(true); + StringWriter sw = new StringWriter(); + PrintWriter pw = new PrintWriter(sw); + t.printStackTrace(pw); + currentBuilder.setErrorMessage(sw.toString()); + grpcObserver.onNext(currentBuilder.build()); + } + + public void finish() { + // NOTE: Always send a one ResultChunk with isLastChunk = true + currentBuilder.setIsLastChunk(true); + ResultSetChunk resultSetChunk = currentBuilder.build(); + grpcObserver.onNext(resultSetChunk); + grpcObserver.onCompleted(); + } +} diff --git a/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/AdhocPinotQuery.java b/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/AdhocPinotQuery.java new file mode 100644 index 00000000..076b578f --- /dev/null +++ b/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/AdhocPinotQuery.java @@ -0,0 +1,52 @@ +package org.hypertrace.core.query.service.pinot; + +import java.util.Map; +import javax.annotation.concurrent.NotThreadSafe; +import org.apache.pinot.client.ResultSetGroup; + +/* + * AdhocPinotQuery could take any Pinot query and return ResultSetGroup which is the raw Pinot + * Response. + */ +@NotThreadSafe +public class AdhocPinotQuery extends PinotQuery { + + private String query; + + public AdhocPinotQuery(String name, PinotClientFactory.PinotClient pinotClient) { + super(name, pinotClient); + } + + @Override + public String getQuery(Map args) { + return this.query; + } + + public void setQuery(String query) { + this.query = query; + } + + @Override + ResultSetGroup convertQueryResults(ResultSetGroup queryResults) { + return queryResults; + } + + @Override + public int hashCode() { + int hash = super.hashCode(); + hash = 31 * hash + (query == null ? 0 : query.hashCode()); + return hash; + } + + @Override + public boolean equals(Object o) { + if (super.equals(o)) { + if (this.getClass() != o.getClass()) { + return false; + } + AdhocPinotQuery apq = (AdhocPinotQuery) o; + return (this.query.equals(apq.query)); + } + return false; + } +} diff --git a/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/DefaultResultSetTypePredicateProvider.java b/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/DefaultResultSetTypePredicateProvider.java new file mode 100644 index 00000000..42f0dea8 --- /dev/null +++ b/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/DefaultResultSetTypePredicateProvider.java @@ -0,0 +1,15 @@ +package org.hypertrace.core.query.service.pinot; + +import org.apache.pinot.client.ResultSet; + +public class DefaultResultSetTypePredicateProvider implements ResultSetTypePredicateProvider { + @Override + public boolean isSelectionResultSetType(ResultSet resultSet) { + return resultSet.getClass().getName().contains("SelectionResultSet"); + } + + @Override + public boolean isResultTableResultSetType(ResultSet resultSet) { + return resultSet.getClass().getName().contains("ResultTableResultSet"); + } +} diff --git a/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/Params.java b/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/Params.java new file mode 100644 index 00000000..02396924 --- /dev/null +++ b/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/Params.java @@ -0,0 +1,102 @@ +package org.hypertrace.core.query.service.pinot; + +import java.util.HashMap; +import java.util.Map; + +/** + * Holds the params that need to be set in the PreparedStatement for constructing the final PQL + * query + */ +public class Params { + + // Map of index to the corresponding param value + private Map integerParams; + private Map longParams; + private Map stringParams; + private Map floatParams; + private Map doubleParams; + + private Params( + Map integerParams, + Map longParams, + Map stringParams, + Map floatParams, + Map doubleParams) { + this.integerParams = integerParams; + this.longParams = longParams; + this.stringParams = stringParams; + this.floatParams = floatParams; + this.doubleParams = doubleParams; + } + + public Map getIntegerParams() { + return integerParams; + } + + public Map getLongParams() { + return longParams; + } + + public Map getStringParams() { + return stringParams; + } + + public Map getFloatParams() { + return floatParams; + } + + public Map getDoubleParams() { + return doubleParams; + } + + public static Builder newBuilder() { + return new Builder(); + } + + public static class Builder { + private int nextIndex; + private Map integerParams; + private Map longParams; + private Map stringParams; + private Map floatParams; + private Map doubleParams; + + private Builder() { + nextIndex = 0; + integerParams = new HashMap<>(); + longParams = new HashMap<>(); + stringParams = new HashMap<>(); + floatParams = new HashMap<>(); + doubleParams = new HashMap<>(); + } + + public Builder addIntegerParam(int paramValue) { + integerParams.put(nextIndex++, paramValue); + return this; + } + + public Builder addLongParam(long paramValue) { + longParams.put(nextIndex++, paramValue); + return this; + } + + public Builder addStringParam(String paramValue) { + stringParams.put(nextIndex++, paramValue); + return this; + } + + public Builder addFloatParam(float paramValue) { + floatParams.put(nextIndex++, paramValue); + return this; + } + + public Builder addDoubleParam(double paramValue) { + doubleParams.put(nextIndex++, paramValue); + return this; + } + + public Params build() { + return new Params(integerParams, longParams, stringParams, floatParams, doubleParams); + } + } +} diff --git a/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/PinotBasedRequestHandler.java b/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/PinotBasedRequestHandler.java new file mode 100644 index 00000000..80c87622 --- /dev/null +++ b/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/PinotBasedRequestHandler.java @@ -0,0 +1,281 @@ +package org.hypertrace.core.query.service.pinot; + +import com.google.common.base.Preconditions; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; +import org.apache.pinot.client.ResultSet; +import org.apache.pinot.client.ResultSetGroup; +import org.hypertrace.core.query.service.QueryContext; +import org.hypertrace.core.query.service.QueryCost; +import org.hypertrace.core.query.service.QueryResultCollector; +import org.hypertrace.core.query.service.RequestAnalyzer; +import org.hypertrace.core.query.service.RequestHandler; +import org.hypertrace.core.query.service.api.QueryRequest; +import org.hypertrace.core.query.service.api.Row; +import org.hypertrace.core.query.service.api.Row.Builder; +import org.hypertrace.core.query.service.api.Value; +import org.hypertrace.core.query.service.pinot.PinotClientFactory.PinotClient; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class PinotBasedRequestHandler implements RequestHandler { + + private static final Logger LOG = LoggerFactory.getLogger(PinotBasedRequestHandler.class); + + public static String VIEW_DEFINITION_CONFIG_KEY = "viewDefinition"; + private static final int SLOW_REQUEST_THRESHOLD_MS = 3000; // A 3 seconds request is too slow + + private String name; + private ViewDefinition viewDefinition; + private QueryRequestToPinotSQLConverter request2PinotSqlConverter; + private final PinotMapConverter pinotMapConverter; + // The implementations of ResultSet are package private and hence there's no way to determine the + // shape of the results + // other than to do string comparison on the simple class names. In order to be able to unit test + // the logic for + // parsing the Pinot response we need to be able to mock out the ResultSet interface and hence we + // create an interface + // for the logic to determine the handling function based in the ResultSet class name. See usages + // of resultSetTypePredicateProvider + // to see how it used. + private final ResultSetTypePredicateProvider resultSetTypePredicateProvider; + private final PinotClientFactory pinotClientFactory; + + public PinotBasedRequestHandler() { + this(new DefaultResultSetTypePredicateProvider(), PinotClientFactory.get()); + } + + PinotBasedRequestHandler( + ResultSetTypePredicateProvider resultSetTypePredicateProvider, + PinotClientFactory pinotClientFactory) { + this.resultSetTypePredicateProvider = resultSetTypePredicateProvider; + this.pinotClientFactory = pinotClientFactory; + this.pinotMapConverter = new PinotMapConverter(); + } + + @Override + public String getName() { + return name; + } + + @Override + public void init(String name, Map config) { + this.name = name; + // TODO:use typesafe HOCON object + this.viewDefinition = (ViewDefinition) config.get(VIEW_DEFINITION_CONFIG_KEY); + request2PinotSqlConverter = new QueryRequestToPinotSQLConverter(viewDefinition); + } + + @Override + public QueryCost canHandle( + QueryRequest request, Set referencedSources, Set referencedColumns) { + double cost = -1; + boolean found = true; + for (String referencedColumn : referencedColumns) { + if (!viewDefinition.containsColumn(referencedColumn)) { + found = false; + break; + } + } + // successfully found a view that can handle the request + if (found) { + // TODO: Come up with a way to compute the cost based on request and view definition + // Higher columns --> Higher cost, + // Finer the time granularity --> Higher the cost. + cost = 0.5; + } + QueryCost queryCost = new QueryCost(); + queryCost.setCost(cost); + return queryCost; + } + + @Override + public void handleRequest( + QueryContext queryContext, + QueryRequest request, + QueryResultCollector collector, + RequestAnalyzer requestAnalyzer) { + long start = System.currentTimeMillis(); + validateQueryRequest(queryContext, request); + Entry pql = + request2PinotSqlConverter.toSQL(queryContext, request, requestAnalyzer.getAllSelections()); + if (LOG.isDebugEnabled()) { + LOG.debug("Trying to execute PQL: [ {} ] by RequestHandler: [ {} ]", pql, this.getName()); + } + final PinotClient pinotClient = pinotClientFactory.getPinotClient(this.getName()); + try { + final ResultSetGroup resultSetGroup = pinotClient.executeQuery(pql.getKey(), pql.getValue()); + if (LOG.isDebugEnabled()) { + LOG.debug("Query results: [ {} ]", resultSetGroup.toString()); + } + // need to merge data especially for Pinot. That's why we need to track the map columns + convert(resultSetGroup, collector, requestAnalyzer.getSelectedColumns()); + long requestTimeMs = System.currentTimeMillis() - start; + if (requestTimeMs > SLOW_REQUEST_THRESHOLD_MS) { + LOG.warn("Query Execution time: {} millis\nQuery Request: {}", requestTimeMs, request); + } + } catch (Exception ex) { + // Catch this exception to log the Pinot SQL query that caused the issue + LOG.error("An error occurred while executing: {}", pql.getKey(), ex); + // Rethrow for the caller to return an error. + throw ex; + } + } + + void convert( + ResultSetGroup resultSetGroup, + QueryResultCollector collector, + LinkedHashSet selectedAttributes) { + List rowBuilderList = new ArrayList<>(); + if (resultSetGroup.getResultSetCount() > 0) { + ResultSet resultSet = resultSetGroup.getResultSet(0); + // Pinot has different Response format for selection and aggregation/group by query. + if (resultSetTypePredicateProvider.isSelectionResultSetType(resultSet)) { + // map merging is only supported in the selection. Filtering and Group by has its own + // syntax in Pinot + handleSelection(resultSetGroup, rowBuilderList, selectedAttributes); + } else if (resultSetTypePredicateProvider.isResultTableResultSetType(resultSet)) { + handleTableFormatResultSet(resultSetGroup, rowBuilderList); + } else { + handleAggregationAndGroupBy(resultSetGroup, rowBuilderList); + } + } + for (Row.Builder builder : rowBuilderList) { + final Row row = builder.build(); + LOG.debug("collect a row: {}", row); + collector.collect(row); + } + collector.finish(); + } + + private void handleSelection( + ResultSetGroup resultSetGroup, + List rowBuilderList, + LinkedHashSet selectedAttributes) { + int resultSetGroupCount = resultSetGroup.getResultSetCount(); + for (int i = 0; i < resultSetGroupCount; i++) { + ResultSet resultSet = resultSetGroup.getResultSet(i); + // Find the index in the result's column for each selected attributes + PinotResultAnalyzer resultAnalyzer = + PinotResultAnalyzer.create(resultSet, selectedAttributes, viewDefinition); + + // For each row returned from Pinot, + // build the row according to the selected attributes from the request + for (int rowId = 0; rowId < resultSet.getRowCount(); rowId++) { + Builder builder; + builder = Row.newBuilder(); + rowBuilderList.add(builder); + + // for each selected attributes in the request get the data from the + // Pinot row result + for (String logicalName : selectedAttributes) { + // colVal will never be null. But getDataRow can throw a runtime exception if it failed + // to retrieve data + String colVal = resultAnalyzer.getDataFromRow(rowId, logicalName); + builder.addColumn(Value.newBuilder().setString(colVal).build()); + } + } + } + } + + private void handleAggregationAndGroupBy( + ResultSetGroup resultSetGroup, List rowBuilderList) { + int resultSetGroupCount = resultSetGroup.getResultSetCount(); + Map groupKey2RowIdMap = new HashMap<>(); + for (int i = 0; i < resultSetGroupCount; i++) { + ResultSet resultSet = resultSetGroup.getResultSet(i); + for (int rowId = 0; rowId < resultSet.getRowCount(); rowId++) { + Builder builder; + // + int groupKeyLength = resultSet.getGroupKeyLength(); + String groupKey; + StringBuilder groupKeyBuilder = new StringBuilder(); + String groupKeyDelim = ""; + for (int g = 0; g < groupKeyLength; g++) { + String colVal = resultSet.getGroupKeyString(rowId, g); + groupKeyBuilder.append(groupKeyDelim).append(colVal); + groupKeyDelim = "|"; + } + groupKey = groupKeyBuilder.toString(); + if (!groupKey2RowIdMap.containsKey(groupKey)) { + builder = Row.newBuilder(); + rowBuilderList.add(builder); + groupKey2RowIdMap.put(groupKey, rowId); + for (int g = 0; g < groupKeyLength; g++) { + String colVal = resultSet.getGroupKeyString(rowId, g); + // add it only the first time + builder.addColumn(Value.newBuilder().setString(colVal).build()); + groupKeyBuilder.append(colVal).append(groupKeyDelim); + groupKeyDelim = "|"; + } + } else { + builder = rowBuilderList.get(groupKey2RowIdMap.get(groupKey)); + } + int columnCount = resultSet.getColumnCount(); + if (columnCount > 0) { + for (int c = 0; c < columnCount; c++) { + String colVal = resultSet.getString(rowId, c); + builder.addColumn(Value.newBuilder().setString(colVal).build()); + } + } + } + } + } + + private void handleTableFormatResultSet( + ResultSetGroup resultSetGroup, List rowBuilderList) { + int resultSetGroupCount = resultSetGroup.getResultSetCount(); + for (int i = 0; i < resultSetGroupCount; i++) { + ResultSet resultSet = resultSetGroup.getResultSet(i); + for (int rowIdx = 0; rowIdx < resultSet.getRowCount(); rowIdx++) { + Builder builder; + builder = Row.newBuilder(); + rowBuilderList.add(builder); + + for (int colIdx = 0; colIdx < resultSet.getColumnCount(); colIdx++) { + if (resultSet.getColumnName(colIdx).endsWith(ViewDefinition.MAP_KEYS_SUFFIX)) { + // Read the key and value column values. The columns should be side by side. That's how + // the Pinot query + // is structured + String mapKeys = resultSet.getString(rowIdx, colIdx); + String mapVals = resultSet.getString(rowIdx, colIdx + 1); + try { + builder.addColumn( + Value.newBuilder().setString(pinotMapConverter.merge(mapKeys, mapVals)).build()); + } catch (IOException ex) { + LOG.error("An error occured while merging mapKeys and mapVals", ex); + throw new RuntimeException( + "An error occurred while parsing the Pinot Table format response", ex); + } + // advance colIdx by 1 since we have read 2 columns + colIdx++; + } else { + String val = resultSet.getString(rowIdx, colIdx); + builder.addColumn(Value.newBuilder().setString(val).build()); + } + } + } + } + } + + private void validateQueryRequest(QueryContext queryContext, QueryRequest request) { + // Validate QueryContext and tenant id presence + Preconditions.checkNotNull(queryContext); + Preconditions.checkNotNull(queryContext.getTenantId()); + + // Validate DISTINCT selections + if (request.getDistinctSelections()) { + boolean noGroupBy = request.getGroupByCount() == 0; + boolean noAggregations = request.getAggregationCount() == 0; + Preconditions.checkArgument( + noGroupBy && noAggregations, + "If distinct selections are requested, there should be no groupBys or aggregations."); + } + } +} diff --git a/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/PinotClientFactory.java b/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/PinotClientFactory.java new file mode 100644 index 00000000..1e1fe613 --- /dev/null +++ b/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/PinotClientFactory.java @@ -0,0 +1,101 @@ +package org.hypertrace.core.query.service.pinot; + +import com.google.common.annotations.VisibleForTesting; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.Future; +import org.apache.pinot.client.Connection; +import org.apache.pinot.client.ConnectionFactory; +import org.apache.pinot.client.PreparedStatement; +import org.apache.pinot.client.Request; +import org.apache.pinot.client.ResultSetGroup; + +/* + * Factory to create PinotClient based on given zookeeper path. + */ +public class PinotClientFactory { + + private static final org.slf4j.Logger LOG = + org.slf4j.LoggerFactory.getLogger(PinotClientFactory.class); + // Singleton instance + private static final PinotClientFactory INSTANCE = new PinotClientFactory(); + + private final ConcurrentHashMap clientMap = new ConcurrentHashMap<>(); + + private PinotClientFactory() {} + + // Create a Pinot Client. + public static PinotClient createPinotClient(String pinotCluster, String pathType, String path) { + if (!get().containsClient(pinotCluster)) { + synchronized (get()) { + if (!get().containsClient(pinotCluster)) { + get().addPinotClient(pinotCluster, new PinotClient(pathType, path)); + } + } + } + return get().getPinotClient(pinotCluster); + } + + public static PinotClientFactory get() { + return INSTANCE; + } + + private void addPinotClient(String cluster, PinotClient client) { + this.clientMap.put(cluster, client); + } + + public boolean containsClient(String clusterName) { + return this.clientMap.containsKey(clusterName); + } + + public PinotClient getPinotClient(String clusterName) { + return this.clientMap.get(clusterName); + } + + public static class PinotClient { + private static final String SQL_FORMAT = "sql"; + + private final Connection connection; + + @VisibleForTesting + public PinotClient(Connection connection) { + this.connection = connection; + } + + private PinotClient(String pathType, String path) { + switch (pathType.toLowerCase()) { + case "zk": + case "zookeeper": + LOG.info("Trying to create a Pinot client connected to Zookeeper: {}", path); + this.connection = ConnectionFactory.fromZookeeper(path); + break; + case "broker": + LOG.info("Trying to create a Pinot client with default brokerlist: {}", path); + this.connection = ConnectionFactory.fromHostList(path); + break; + default: + throw new RuntimeException("Unsupported Pinot Client scheme: " + pathType); + } + } + + public ResultSetGroup executeQuery(String statement, Params params) { + PreparedStatement preparedStatement = buildPreparedStatement(statement, params); + return preparedStatement.execute(); + } + + public Future executeQueryAsync(String statement, Params params) { + PreparedStatement preparedStatement = buildPreparedStatement(statement, params); + return preparedStatement.executeAsync(); + } + + private PreparedStatement buildPreparedStatement(String statement, Params params) { + Request request = new Request(SQL_FORMAT, statement); + PreparedStatement preparedStatement = connection.prepareStatement(request); + params.getStringParams().forEach(preparedStatement::setString); + params.getIntegerParams().forEach(preparedStatement::setInt); + params.getLongParams().forEach(preparedStatement::setLong); + params.getDoubleParams().forEach(preparedStatement::setDouble); + params.getFloatParams().forEach(preparedStatement::setFloat); + return preparedStatement; + } + } +} diff --git a/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/PinotColumnSpec.java b/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/PinotColumnSpec.java new file mode 100644 index 00000000..c3f06277 --- /dev/null +++ b/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/PinotColumnSpec.java @@ -0,0 +1,31 @@ +package org.hypertrace.core.query.service.pinot; + +import java.util.ArrayList; +import java.util.List; +import org.hypertrace.core.query.service.api.ValueType; + +public class PinotColumnSpec { + + private final List columnNames; + private ValueType type; + + public PinotColumnSpec() { + columnNames = new ArrayList<>(); + } + + public List getColumnNames() { + return columnNames; + } + + public void addColumnName(String columnName) { + columnNames.add(columnName); + } + + public ValueType getType() { + return type; + } + + public void setType(ValueType type) { + this.type = type; + } +} diff --git a/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/PinotMapConverter.java b/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/PinotMapConverter.java new file mode 100644 index 00000000..49662d82 --- /dev/null +++ b/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/PinotMapConverter.java @@ -0,0 +1,91 @@ +package org.hypertrace.core.query.service.pinot; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class PinotMapConverter { + // This is how empty list is represented in Pinot + private static final String PINOT_EMPTY_LIST = "[\"\"]"; + private static final Logger LOG = LoggerFactory.getLogger(PinotMapConverter.class); + private static final TypeReference> listOfString = new TypeReference<>() {}; + + private final ObjectMapper objectMapper; + + public PinotMapConverter() { + this.objectMapper = new ObjectMapper(); + } + + String merge(String keyData, String valueData) throws IOException { + Map map = new HashMap<>(); + // default should not be null + if (keyData == null || valueData == null) { + // throw IOException so that it can be caught be the caller and provide additional + // context + throw new IOException("Key Data or Value Data of this map is null."); + } + + List keys; + if (PINOT_EMPTY_LIST.equals(keyData)) { + keys = new ArrayList<>(); + } else { + try { + keys = objectMapper.readValue(keyData, listOfString); + } catch (IOException e) { + LOG.error( + "Failed to deserialize map's key to list of string object. Raw Json String: {}", + keyData); + throw e; + } + } + + List values; + if (PINOT_EMPTY_LIST.equals(valueData)) { + values = new ArrayList<>(); + } else { + try { + values = objectMapper.readValue(valueData, listOfString); + } catch (IOException e) { + LOG.error( + "Failed to deserialize map's value to list of string object. Raw Json String {}", + valueData); + throw e; + } + } + + if (keys.size() != values.size()) { + LOG.warn( + "Keys and Values data size does not match. Data will be return based on the kyes" + + "Keys Size: {}, Values Size: {}", + keys.size(), + values.size()); + // todo: make this debug once in production + LOG.info("Keys: {}, \n Values:{}", keys, values); + } + + // If the size does not match, the key is driving the map data. Any excessive values + // will be dropped + for (int idx = 0; idx < keys.size(); idx++) { + if (idx < values.size()) { + map.put(keys.get(idx), values.get(idx)); + } else { + // to handle unbalanced size + map.put(keys.get(idx), null); + } + } + + try { + return objectMapper.writeValueAsString(map); + } catch (JsonProcessingException e) { + LOG.error("Unable to write the merged map as json. Raw Data: {}", map); + throw e; + } + } +} diff --git a/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/PinotQuery.java b/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/PinotQuery.java new file mode 100644 index 00000000..6b5cdda6 --- /dev/null +++ b/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/PinotQuery.java @@ -0,0 +1,61 @@ +package org.hypertrace.core.query.service.pinot; + +import java.util.Collections; +import java.util.Map; +import org.apache.pinot.client.ResultSetGroup; +import org.hypertrace.core.query.service.pinot.PinotClientFactory.PinotClient; + +/* + * PinotQuery provides basic interface for getting query response from Pinot. + */ +public abstract class PinotQuery { + + private final String name; + private final PinotClient pinotClient; + + public PinotQuery(String name, PinotClient pinotClient) { + this.name = name; + this.pinotClient = pinotClient; + } + + public String getName() { + return this.name; + } + + abstract String getQuery(Map args); + + abstract T convertQueryResults(ResultSetGroup queryResults); + + public T execute() { + return execute(Collections.emptyMap()); + } + + public T execute(Map args) { + final ResultSetGroup queryResults = + this.pinotClient.executeQuery(getQuery(args), Params.newBuilder().build()); + return convertQueryResults(queryResults); + } + + @Override + public int hashCode() { + int hash = 7; + hash = 31 * hash + (name == null ? 0 : name.hashCode()); + hash = 31 * hash + (pinotClient == null ? 0 : pinotClient.hashCode()); + return hash; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null) { + return false; + } + if (this.getClass() != o.getClass()) { + return false; + } + PinotQuery pq = (PinotQuery) o; + return (this.name.equals(pq.name) && this.pinotClient == pq.pinotClient); + } +} diff --git a/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/PinotResultAnalyzer.java b/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/PinotResultAnalyzer.java new file mode 100644 index 00000000..62c91131 --- /dev/null +++ b/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/PinotResultAnalyzer.java @@ -0,0 +1,152 @@ +package org.hypertrace.core.query.service.pinot; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.common.util.concurrent.RateLimiter; +import java.io.IOException; +import java.util.HashMap; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import javax.annotation.Nonnull; +import org.apache.pinot.client.ResultSet; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** Discovers the map attributes indexes from Pinot Result Set */ +class PinotResultAnalyzer { + private static final Logger LOG = LoggerFactory.getLogger(PinotResultAnalyzer.class); + + /* Stores the Map Attributes logical name to Physical Names */ + private final Map mapLogicalNameToKeyIndex; + private final Map mapLogicalNameToValueIndex; + + /* Stores the Non-Map Attributes logical name to Physical Name index */ + private final Map logicalNameToPhysicalNameIndex; + private final ResultSet resultSet; + private final ViewDefinition viewDefinition; + private final Map attributeLogRateLimitter; + private final PinotMapConverter pinotMapConverter; + + PinotResultAnalyzer( + ResultSet resultSet, + LinkedHashSet selectedAttributes, + ViewDefinition viewDefinition, + Map mapLogicalNameToKeyIndex, + Map mapLogicalNameToValueIndex, + Map logicalNameToPhysicalNameIndex) { + this.mapLogicalNameToKeyIndex = mapLogicalNameToKeyIndex; + this.mapLogicalNameToValueIndex = mapLogicalNameToValueIndex; + this.logicalNameToPhysicalNameIndex = logicalNameToPhysicalNameIndex; + this.resultSet = resultSet; + this.viewDefinition = viewDefinition; + this.attributeLogRateLimitter = new HashMap<>(); + selectedAttributes.forEach(e -> attributeLogRateLimitter.put(e, RateLimiter.create(0.5))); + this.pinotMapConverter = new PinotMapConverter(); + } + + /** For each selected attributes build the map of logical name to result index. */ + static PinotResultAnalyzer create( + ResultSet resultSet, + LinkedHashSet selectedAttributes, + ViewDefinition viewDefinition) { + Map mapLogicalNameToKeyIndex = new HashMap<>(); + Map mapLogicalNameToValueIndex = new HashMap<>(); + Map logicalNameToPhysicalNameIndex = new HashMap<>(); + + for (String logicalName : selectedAttributes) { + if (viewDefinition.isMap(logicalName)) { + String keyPhysicalName = viewDefinition.getKeyColumnNameForMap(logicalName); + String valuePhysicalName = viewDefinition.getValueColumnNameForMap(logicalName); + for (int colIndex = 0; colIndex < resultSet.getColumnCount(); colIndex++) { + String physName = resultSet.getColumnName(colIndex); + if (physName.equalsIgnoreCase(keyPhysicalName)) { + mapLogicalNameToKeyIndex.put(logicalName, colIndex); + } else if (physName.equalsIgnoreCase(valuePhysicalName)) { + mapLogicalNameToValueIndex.put(logicalName, colIndex); + } + } + } else { + List names = viewDefinition.getPhysicalColumnNames(logicalName); + Preconditions.checkArgument(names.size() == 1); + for (int colIndex = 0; colIndex < resultSet.getColumnCount(); colIndex++) { + String physName = resultSet.getColumnName(colIndex); + if (physName.equalsIgnoreCase(names.get(0))) { + logicalNameToPhysicalNameIndex.put(logicalName, colIndex); + break; + } + } + } + } + LOG.info("Map LogicalName to Key Index: {} ", mapLogicalNameToKeyIndex); + LOG.info("Map LogicalName to Value Index: {}", mapLogicalNameToValueIndex); + LOG.info("Attributes to Index: {}", logicalNameToPhysicalNameIndex); + return new PinotResultAnalyzer( + resultSet, + selectedAttributes, + viewDefinition, + mapLogicalNameToKeyIndex, + mapLogicalNameToValueIndex, + logicalNameToPhysicalNameIndex); + } + + @VisibleForTesting + Integer getMapKeyIndex(String logicalName) { + return mapLogicalNameToKeyIndex.get(logicalName); + } + + @VisibleForTesting + Integer getMapValueIndex(String logicalName) { + return mapLogicalNameToValueIndex.get(logicalName); + } + + @VisibleForTesting + Integer getPhysicalColumnIndex(String logicalName) { + return logicalNameToPhysicalNameIndex.get(logicalName); + } + + /** + * Gets the data from Result Set Row, will never null + * + * @throws IllegalStateException if index is missing for merging or there's an issue with the data + * format in Pinot + * @return merged map data if in correct format. Will never return null + */ + @Nonnull + String getDataFromRow(int rowIndex, String logicalName) { + + String result; + if (viewDefinition.isMap(logicalName)) { + Integer keyIndex = getMapKeyIndex(logicalName); + if (keyIndex == null) { + LOG.info("Map LogicalName to Key Index: {} ", mapLogicalNameToKeyIndex); + LOG.info("Attributes to Index: {}", logicalNameToPhysicalNameIndex); + throw new IllegalStateException( + "Unable to find the key index to attribute: " + logicalName); + } + String keyData = resultSet.getString(rowIndex, keyIndex); + + String valueData = ""; + Integer valueIndex = getMapValueIndex(logicalName); + if (valueIndex == null) { + if (attributeLogRateLimitter.get(logicalName).tryAcquire()) { + LOG.error("Unable to find the map value column index for Attribute: {}.", logicalName); + LOG.info("Map LogicalName to Value Index: {} ", mapLogicalNameToValueIndex); + LOG.info("Attributes to Index: {}", logicalNameToPhysicalNameIndex); + } + } else { + valueData = resultSet.getString(rowIndex, valueIndex); + } + try { + result = pinotMapConverter.merge(keyData, valueData); + } catch (IOException e) { + throw new IllegalStateException( + "Unable to merge the map data for attribute " + logicalName, e); + } + } else { + Integer colIndex = getPhysicalColumnIndex(logicalName); + result = resultSet.getString(rowIndex, colIndex); + } + return result; + } +} diff --git a/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/PinotUtils.java b/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/PinotUtils.java new file mode 100644 index 00000000..8a4497af --- /dev/null +++ b/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/PinotUtils.java @@ -0,0 +1,10 @@ +package org.hypertrace.core.query.service.pinot; + +public class PinotUtils { + + public static String getZkPath(String zkBasePath, String pinotClusterName) { + return zkBasePath.endsWith("/") + ? zkBasePath + pinotClusterName + : zkBasePath + "/" + pinotClusterName; + } +} diff --git a/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/QueryRequestToPinotSQLConverter.java b/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/QueryRequestToPinotSQLConverter.java new file mode 100644 index 00000000..6eeb7e9b --- /dev/null +++ b/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/QueryRequestToPinotSQLConverter.java @@ -0,0 +1,365 @@ +package org.hypertrace.core.query.service.pinot; + +import static org.hypertrace.core.query.service.api.Expression.ValueCase.COLUMNIDENTIFIER; +import static org.hypertrace.core.query.service.api.Expression.ValueCase.LITERAL; + +import com.google.common.base.Joiner; +import java.util.AbstractMap.SimpleEntry; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map.Entry; +import org.hypertrace.core.query.service.QueryContext; +import org.hypertrace.core.query.service.api.Expression; +import org.hypertrace.core.query.service.api.Filter; +import org.hypertrace.core.query.service.api.Function; +import org.hypertrace.core.query.service.api.LiteralConstant; +import org.hypertrace.core.query.service.api.Operator; +import org.hypertrace.core.query.service.api.OrderByExpression; +import org.hypertrace.core.query.service.api.QueryRequest; +import org.hypertrace.core.query.service.api.SortOrder; +import org.hypertrace.core.query.service.api.Value; +import org.hypertrace.core.query.service.api.ValueType; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** Converts {@link QueryRequest} to Pinot SQL query */ +class QueryRequestToPinotSQLConverter { + private static final Logger LOG = LoggerFactory.getLogger(QueryRequestToPinotSQLConverter.class); + + private static final String QUESTION_MARK = "?"; + private static final String REGEX_OPERATOR = "REGEXP_LIKE"; + private static final String MAP_VALUE = "mapValue"; + private static final int MAP_KEY_INDEX = 0; + private static final int MAP_VALUE_INDEX = 1; + + private ViewDefinition viewDefinition; + private Joiner joiner = Joiner.on(", ").skipNulls(); + + QueryRequestToPinotSQLConverter(ViewDefinition viewDefinition) { + this.viewDefinition = viewDefinition; + } + + Entry toSQL( + QueryContext queryContext, QueryRequest request, LinkedHashSet allSelections) { + Params.Builder paramsBuilder = Params.newBuilder(); + StringBuilder pqlBuilder = new StringBuilder("Select "); + String delim = ""; + + // Set the DISTINCT keyword if the request has set distinctSelections. + if (request.getDistinctSelections()) { + pqlBuilder.append("DISTINCT "); + } + + // allSelections contain all the various expressions in QueryRequest that we want selections on. + // Group bys, selections and aggregations in that order. See RequestAnalyzer#analyze() to see + // how it is created. + for (Expression expr : allSelections) { + pqlBuilder.append(delim); + pqlBuilder.append(convertExpression2String(expr, paramsBuilder)); + delim = ", "; + } + + pqlBuilder.append(" FROM ").append(viewDefinition.getViewName()); + + // Add the tenantId filter + pqlBuilder.append(" WHERE ").append(viewDefinition.getTenantIdColumn()).append(" = ?"); + paramsBuilder.addStringParam(queryContext.getTenantId()); + + if (request.hasFilter()) { + pqlBuilder.append(" AND "); + String filterClause = convertFilter2String(request.getFilter(), paramsBuilder); + pqlBuilder.append(filterClause); + } + + if (request.getGroupByCount() > 0) { + pqlBuilder.append(" GROUP BY "); + delim = ""; + for (Expression groupByExpression : request.getGroupByList()) { + pqlBuilder.append(delim); + pqlBuilder.append(convertExpression2String(groupByExpression, paramsBuilder)); + delim = ", "; + } + } + if (!request.getOrderByList().isEmpty()) { + pqlBuilder.append(" ORDER BY "); + delim = ""; + for (OrderByExpression orderByExpression : request.getOrderByList()) { + pqlBuilder.append(delim); + String orderBy = convertExpression2String(orderByExpression.getExpression(), paramsBuilder); + pqlBuilder.append(orderBy); + if (SortOrder.DESC.equals(orderByExpression.getOrder())) { + pqlBuilder.append(" desc "); + } + delim = ", "; + } + } + if (request.getLimit() > 0) { + if (request.getOffset() > 0) { + pqlBuilder + .append(" limit ") + .append(request.getOffset()) + .append(", ") + .append(request.getLimit()); + } else { + pqlBuilder.append(" limit ").append(request.getLimit()); + } + } + + if (LOG.isDebugEnabled()) { + LOG.debug("Converted QueryRequest to Pinot SQL: {}", pqlBuilder); + } + return new SimpleEntry<>(pqlBuilder.toString(), paramsBuilder.build()); + } + + private String convertFilter2String(Filter filter, Params.Builder paramsBuilder) { + StringBuilder builder = new StringBuilder(); + String operator = convertOperator2String(filter.getOperator()); + if (filter.getChildFilterCount() > 0) { + String delim = ""; + builder.append("( "); + for (Filter childFilter : filter.getChildFilterList()) { + builder.append(delim); + builder.append(convertFilter2String(childFilter, paramsBuilder)); + builder.append(" "); + delim = operator + " "; + } + builder.append(")"); + } else { + switch (filter.getOperator()) { + case LIKE: + // The like operation in PQL looks like `regexp_like(lhs, rhs)` + builder.append(operator); + builder.append("("); + builder.append(convertExpression2String(filter.getLhs(), paramsBuilder)); + builder.append(","); + builder.append(convertExpression2String(filter.getRhs(), paramsBuilder)); + builder.append(")"); + break; + case CONTAINS_KEY: + LiteralConstant[] kvp = convertExpressionToMapLiterals(filter.getRhs()); + builder.append(convertExpressionToMapKeyColumn(filter.getLhs())); + builder.append(" = "); + builder.append(convertLiteralToString(kvp[MAP_KEY_INDEX], paramsBuilder)); + break; + case CONTAINS_KEYVALUE: + kvp = convertExpressionToMapLiterals(filter.getRhs()); + String keyCol = convertExpressionToMapKeyColumn(filter.getLhs()); + String valCol = convertExpressionToMapValueColumn(filter.getLhs()); + builder.append(keyCol); + builder.append(" = "); + builder.append(convertLiteralToString(kvp[MAP_KEY_INDEX], paramsBuilder)); + builder.append(" AND "); + builder.append(valCol); + builder.append(" = "); + builder.append(convertLiteralToString(kvp[MAP_VALUE_INDEX], paramsBuilder)); + builder.append(" AND "); + builder.append(MAP_VALUE); + builder.append("("); + builder.append(keyCol); + builder.append(","); + builder.append(convertLiteralToString(kvp[MAP_KEY_INDEX], paramsBuilder)); + builder.append(","); + builder.append(valCol); + builder.append(") = "); + builder.append(convertLiteralToString(kvp[MAP_VALUE_INDEX], paramsBuilder)); + break; + default: + builder.append(convertExpression2String(filter.getLhs(), paramsBuilder)); + builder.append(" "); + builder.append(operator); + builder.append(" "); + builder.append(convertExpression2String(filter.getRhs(), paramsBuilder)); + } + } + return builder.toString(); + } + + private String convertOperator2String(Operator operator) { + switch (operator) { + case AND: + return "AND"; + case OR: + return "OR"; + case NOT: + return "NOT"; + case EQ: + return "="; + case NEQ: + return "!="; + case IN: + return "IN"; + case NOT_IN: + return "NOT IN"; + case GT: + return ">"; + case LT: + return "<"; + case GE: + return ">="; + case LE: + return "<="; + case LIKE: + return REGEX_OPERATOR; + case CONTAINS_KEY: + case CONTAINS_KEYVALUE: + return MAP_VALUE; + case RANGE: + throw new UnsupportedOperationException("RANGE NOT supported use >= and <="); + case UNRECOGNIZED: + default: + throw new UnsupportedOperationException("Unknown operator:" + operator); + } + } + + private String convertExpression2String(Expression expression, Params.Builder paramsBuilder) { + switch (expression.getValueCase()) { + case COLUMNIDENTIFIER: + String logicalColumnName = expression.getColumnIdentifier().getColumnName(); + // this takes care of the Map Type where it's split into 2 columns + List columnNames = viewDefinition.getPhysicalColumnNames(logicalColumnName); + return joiner.join(columnNames); + case LITERAL: + return convertLiteralToString(expression.getLiteral(), paramsBuilder); + case FUNCTION: + Function function = expression.getFunction(); + String functionName = function.getFunctionName(); + // For COUNT(column_name), Pinot sql format converts it to COUNT(*) and even only works with + // COUNT(*) for ORDER BY + if (functionName.equalsIgnoreCase("COUNT")) { + return functionName + "(*)"; + } + List argumentsList = function.getArgumentsList(); + String[] args = new String[argumentsList.size()]; + for (int i = 0; i < argumentsList.size(); i++) { + Expression expr = argumentsList.get(i); + args[i] = convertExpression2String(expr, paramsBuilder); + } + return functionName + "(" + joiner.join(args) + ")"; + case ORDERBY: + OrderByExpression orderBy = expression.getOrderBy(); + return convertExpression2String(orderBy.getExpression(), paramsBuilder); + case VALUE_NOT_SET: + break; + } + return ""; + } + + private String convertExpressionToMapKeyColumn(Expression expression) { + if (expression.getValueCase() == COLUMNIDENTIFIER) { + String logicalColumnName = expression.getColumnIdentifier().getColumnName(); + String col = viewDefinition.getKeyColumnNameForMap(logicalColumnName); + if (col != null && col.length() > 0) { + return col; + } + } + throw new IllegalArgumentException( + "operator CONTAINS_KEY/KEYVALUE supports multi value column only"); + } + + private String convertExpressionToMapValueColumn(Expression expression) { + if (expression.getValueCase() == COLUMNIDENTIFIER) { + String logicalColumnName = expression.getColumnIdentifier().getColumnName(); + String col = viewDefinition.getValueColumnNameForMap(logicalColumnName); + if (col != null && col.length() > 0) { + return col; + } + } + throw new IllegalArgumentException( + "operator CONTAINS_KEY/KEYVALUE supports multi value column only"); + } + + private LiteralConstant[] convertExpressionToMapLiterals(Expression expression) { + LiteralConstant[] literals = new LiteralConstant[2]; + if (expression.getValueCase() == LITERAL) { + LiteralConstant value = expression.getLiteral(); + if (value.getValue().getValueType() == ValueType.STRING_ARRAY) { + for (int i = 0; i < 2 && i < value.getValue().getStringArrayCount(); i++) { + literals[i] = + LiteralConstant.newBuilder() + .setValue( + Value.newBuilder().setString(value.getValue().getStringArray(i)).build()) + .build(); + } + } else { + throw new IllegalArgumentException( + "operator CONTAINS_KEYVALUE supports " + + ValueType.STRING_ARRAY.name() + + " value type only"); + } + } + + for (int i = 0; i < literals.length; i++) { + if (literals[i] == null) { + literals[i] = + LiteralConstant.newBuilder().setValue(Value.newBuilder().setString("").build()).build(); + } + } + + return literals; + } + + /** TODO:Handle all types */ + private String convertLiteralToString(LiteralConstant literal, Params.Builder paramsBuilder) { + Value value = literal.getValue(); + String ret = null; + switch (value.getValueType()) { + case STRING_ARRAY: + StringBuilder builder = new StringBuilder("("); + String delim = ""; + for (String item : value.getStringArrayList()) { + builder.append(delim); + builder.append(QUESTION_MARK); + paramsBuilder.addStringParam(item); + delim = ", "; + } + builder.append(")"); + ret = builder.toString(); + break; + case LONG_ARRAY: + break; + case INT_ARRAY: + break; + case FLOAT_ARRAY: + break; + case DOUBLE_ARRAY: + break; + case BYTES_ARRAY: + break; + case BOOLEAN_ARRAY: + break; + case STRING: + ret = QUESTION_MARK; + paramsBuilder.addStringParam(value.getString()); + break; + case LONG: + ret = QUESTION_MARK; + paramsBuilder.addLongParam(value.getLong()); + break; + case INT: + ret = QUESTION_MARK; + paramsBuilder.addIntegerParam(value.getInt()); + break; + case FLOAT: + ret = QUESTION_MARK; + paramsBuilder.addFloatParam(value.getFloat()); + break; + case DOUBLE: + ret = QUESTION_MARK; + paramsBuilder.addDoubleParam(value.getDouble()); + break; + case BYTES: + break; + case BOOL: + ret = QUESTION_MARK; + paramsBuilder.addStringParam(String.valueOf(value.getBoolean())); + break; + case TIMESTAMP: + ret = QUESTION_MARK; + paramsBuilder.addLongParam(value.getTimestamp()); + break; + case UNRECOGNIZED: + break; + } + return ret; + } +} diff --git a/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/ResultSetTypePredicateProvider.java b/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/ResultSetTypePredicateProvider.java new file mode 100644 index 00000000..5a8a4b21 --- /dev/null +++ b/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/ResultSetTypePredicateProvider.java @@ -0,0 +1,21 @@ +package org.hypertrace.core.query.service.pinot; + +import org.apache.pinot.client.ResultSet; + +/** + * This interface is used to determine which handler will parse the Pinot ResultSet in + * PinotBasedRequestHandler#convert(). We define it to make it easy to unit test the parsing logic + * since: - The implementations of ResultSet are package private and there's no way to determine the + * concrete type of the ResultSet object other than using the class name. See + * DefaultResultSetTypePredicateProvider class. - The ResultSet interface itself is implemented non + * uniformly by its implementations. The defined methods in the interface do not return consistent + * data across the implementations and the format of the implementations is different. - However, + * since it seems like for "sql" format the ResultTableResultSet is being returned for all Pinot + * query types we might be able to get rid of this in the future and have a single flow to parse the + * Pinot Response. + */ +public interface ResultSetTypePredicateProvider { + boolean isSelectionResultSetType(ResultSet resultSet); + + boolean isResultTableResultSetType(ResultSet resultSet); +} diff --git a/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/ViewDefinition.java b/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/ViewDefinition.java new file mode 100644 index 00000000..91b74a7c --- /dev/null +++ b/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/ViewDefinition.java @@ -0,0 +1,93 @@ +package org.hypertrace.core.query.service.pinot; + +import com.google.common.base.Preconditions; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; +import org.hypertrace.core.query.service.api.ValueType; + +public class ViewDefinition { + static final String MAP_KEYS_SUFFIX = "__KEYS"; + static final String MAP_VALUES_SUFFIX = "__VALUES"; + + private final String viewName; + private final Map columnSpecMap; + private final String tenantColumnName; + + public ViewDefinition( + String viewName, Map columnSpecMap, String tenantColumnName) { + this.viewName = viewName; + this.columnSpecMap = columnSpecMap; + this.tenantColumnName = tenantColumnName; + } + + public static ViewDefinition parse(Map config, String tenantColumnName) { + String viewName = (String) config.get("viewName"); + Map columnSpecMap = new HashMap<>(); + final Map fieldMap = (Map) config.get("fieldMap"); + // todo: refactor to use attr service + final List mapFieldsList = ((List) config.get("mapFields")); + Set mapFields = new HashSet<>(); + if (mapFieldsList != null) { + mapFields.addAll(mapFieldsList); + } + + for (String logicalName : fieldMap.keySet()) { + String physName = fieldMap.get(logicalName); + PinotColumnSpec spec = new PinotColumnSpec(); + // todo: replace this with call to attribute service + if (mapFields.contains(fieldMap.get(logicalName))) { + spec.setType(ValueType.STRING_MAP); + // split them to 2 automatically here + spec.addColumnName(physName + MAP_KEYS_SUFFIX); + spec.addColumnName(physName + MAP_VALUES_SUFFIX); + } else { + spec.addColumnName(physName); + spec.setType(ValueType.STRING); + } + columnSpecMap.put(logicalName, spec); + } + return new ViewDefinition(viewName, columnSpecMap, tenantColumnName); + } + + public String getViewName() { + return viewName; + } + + public String getTenantIdColumn() { + return tenantColumnName; + } + + public boolean containsColumn(String referencedColumn) { + return columnSpecMap.containsKey(referencedColumn); + } + + public List getPhysicalColumnNames(String logicalColumnName) { + return columnSpecMap.get(logicalColumnName).getColumnNames(); + } + + public boolean isMap(String logicalName) { + return (ValueType.STRING_MAP.equals(columnSpecMap.get(logicalName).getType())); + } + + public String getKeyColumnNameForMap(String logicalName) { + List keys = findPyhsicalNameWithSuffix(logicalName, MAP_KEYS_SUFFIX); + Preconditions.checkArgument(keys.size() <= 1); + return keys.isEmpty() ? null : keys.get(0); + } + + public String getValueColumnNameForMap(String logicalName) { + List keys = findPyhsicalNameWithSuffix(logicalName, MAP_VALUES_SUFFIX); + Preconditions.checkArgument(keys.size() <= 1); + return keys.isEmpty() ? null : keys.get(0); + } + + private List findPyhsicalNameWithSuffix(String logicalName, String suffix) { + return columnSpecMap.get(logicalName).getColumnNames().stream() + .filter(e -> e.toUpperCase().endsWith(suffix)) + .collect(Collectors.toList()); + } +} diff --git a/query-service-impl/src/test/java/org/hypertrace/core/query/service/QueryRequestBuilderUtils.java b/query-service-impl/src/test/java/org/hypertrace/core/query/service/QueryRequestBuilderUtils.java new file mode 100644 index 00000000..2b59c585 --- /dev/null +++ b/query-service-impl/src/test/java/org/hypertrace/core/query/service/QueryRequestBuilderUtils.java @@ -0,0 +1,32 @@ +package org.hypertrace.core.query.service; + +import org.hypertrace.core.query.service.api.ColumnIdentifier; +import org.hypertrace.core.query.service.api.Expression; +import org.hypertrace.core.query.service.api.Function; +import org.hypertrace.core.query.service.api.OrderByExpression; +import org.hypertrace.core.query.service.api.SortOrder; + +public class QueryRequestBuilderUtils { + public static Expression.Builder createColumnExpression(String columnName) { + return Expression.newBuilder() + .setColumnIdentifier(ColumnIdentifier.newBuilder().setColumnName(columnName)); + } + + public static Expression.Builder createFunctionExpression( + String functionName, String columnNameArg, String alias) { + return Expression.newBuilder() + .setFunction( + Function.newBuilder() + .setAlias(alias) + .setFunctionName(functionName) + .addArguments( + Expression.newBuilder() + .setColumnIdentifier( + ColumnIdentifier.newBuilder().setColumnName(columnNameArg)))); + } + + public static OrderByExpression.Builder createOrderByExpression( + Expression.Builder expression, SortOrder sortOrder) { + return OrderByExpression.newBuilder().setExpression(expression).setOrder(sortOrder); + } +} diff --git a/query-service-impl/src/test/java/org/hypertrace/core/query/service/QueryServiceImplConfigTest.java b/query-service-impl/src/test/java/org/hypertrace/core/query/service/QueryServiceImplConfigTest.java new file mode 100644 index 00000000..cb617df2 --- /dev/null +++ b/query-service-impl/src/test/java/org/hypertrace/core/query/service/QueryServiceImplConfigTest.java @@ -0,0 +1,166 @@ +package org.hypertrace.core.query.service; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +import com.typesafe.config.Config; +import com.typesafe.config.ConfigFactory; +import java.io.File; +import java.util.HashMap; +import java.util.Map; +import java.util.stream.Collectors; +import org.hypertrace.core.query.service.QueryServiceImplConfig.ClientConfig; +import org.hypertrace.core.query.service.QueryServiceImplConfig.RequestHandlerConfig; +import org.hypertrace.core.query.service.api.ColumnIdentifier; +import org.hypertrace.core.query.service.api.Expression; +import org.hypertrace.core.query.service.api.Filter; +import org.hypertrace.core.query.service.api.LiteralConstant; +import org.hypertrace.core.query.service.api.Operator; +import org.hypertrace.core.query.service.api.QueryRequest; +import org.hypertrace.core.query.service.api.QueryRequest.Builder; +import org.hypertrace.core.query.service.api.Value; +import org.hypertrace.core.query.service.pinot.PinotBasedRequestHandler; +import org.hypertrace.core.query.service.pinot.ViewDefinition; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class QueryServiceImplConfigTest { + + private static final Logger LOGGER = LoggerFactory.getLogger(QueryServiceImplConfigTest.class); + private Config appConfig; + private QueryServiceImplConfig queryServiceConfig; + + @BeforeEach + public void setup() { + appConfig = + ConfigFactory.parseFile( + new File( + QueryServiceImplConfigTest.class + .getClassLoader() + .getResource("application.conf") + .getPath())); + queryServiceConfig = QueryServiceImplConfig.parse(appConfig.getConfig("service.config")); + } + + @Test + public void testQueryServiceImplConfigParser() { + // Test QueryServiceImplConfig + assertEquals("query-service", appConfig.getString("service.name")); + assertEquals(8091, appConfig.getInt("service.admin.port")); + assertEquals(8090, appConfig.getInt("service.port")); + assertEquals(2, queryServiceConfig.getQueryRequestHandlersConfig().size()); + assertEquals(2, queryServiceConfig.getClients().size()); + assertEquals("tenant_id", queryServiceConfig.getTenantColumnName()); + + LOGGER.info("{}", queryServiceConfig.getQueryRequestHandlersConfig()); + + RequestHandlerConfig handler0 = + RequestHandlerConfig.parse(queryServiceConfig.getQueryRequestHandlersConfig().get(0)); + assertEquals("piontCluster0", handler0.getName()); + assertEquals("pinot", handler0.getType()); + Map requestHandlerInfo = handler0.getRequestHandlerInfo(); + LOGGER.info("{}", requestHandlerInfo); + + String tenantColumnName = "tenant_id"; + ViewDefinition viewDefinition = + ViewDefinition.parse((Map) requestHandlerInfo.get("viewDefinition"), tenantColumnName); + assertEquals("RawTraceView", viewDefinition.getViewName()); + assertEquals(tenantColumnName, viewDefinition.getTenantIdColumn()); + + Map clientConfigMap = + queryServiceConfig.getClients().stream() + .map(ClientConfig::parse) + .collect(Collectors.toMap(ClientConfig::getType, clientConfig -> clientConfig)); + ClientConfig clientConfig0 = clientConfigMap.get(handler0.getClientConfig()); + assertEquals("broker", clientConfig0.getType()); + assertEquals("pinotCluster0:8099", clientConfig0.getConnectionString()); + + RequestHandlerConfig handler1 = + RequestHandlerConfig.parse(queryServiceConfig.getQueryRequestHandlersConfig().get(1)); + assertEquals("span-event-view-handler", handler1.getName()); + assertEquals("pinot", handler1.getType()); + ClientConfig clientConfig1 = clientConfigMap.get(handler1.getClientConfig()); + assertEquals("zookeeper", clientConfig1.getType()); + assertEquals("pinotCluster1:2181", clientConfig1.getConnectionString()); + } + + @Test + public void testHandlerSelection() { + // Register all the handlers with the registry. + for (Config config : queryServiceConfig.getQueryRequestHandlersConfig()) { + RequestHandlerConfig handlerConfig = RequestHandlerConfig.parse(config); + Map requestHandlerInfoConf = new HashMap<>(); + + String tenantColumnName = "tenant_id"; + ViewDefinition viewDefinition = + ViewDefinition.parse( + (Map) + handlerConfig + .getRequestHandlerInfo() + .get(PinotBasedRequestHandler.VIEW_DEFINITION_CONFIG_KEY), + tenantColumnName); + assertEquals(tenantColumnName, viewDefinition.getTenantIdColumn()); + requestHandlerInfoConf.put( + PinotBasedRequestHandler.VIEW_DEFINITION_CONFIG_KEY, viewDefinition); + RequestHandlerRegistry.get() + .register( + handlerConfig.getName(), + new RequestHandlerInfo( + handlerConfig.getName(), PinotBasedRequestHandler.class, requestHandlerInfoConf)); + } + + RequestHandlerSelector selector = new RequestHandlerSelector(RequestHandlerRegistry.get()); + + QueryRequest queryRequest = buildSimpleQuery(); + RequestAnalyzer analyzer = new RequestAnalyzer(queryRequest); + analyzer.analyze(); + RequestHandler handler = selector.select(queryRequest, analyzer); + assertEquals("span-event-view-handler", handler.getName()); + } + + private QueryRequest buildSimpleQuery() { + Builder builder = QueryRequest.newBuilder(); + ColumnIdentifier eventId = ColumnIdentifier.newBuilder().setColumnName("EVENT.id").build(); + builder.addSelection(Expression.newBuilder().setColumnIdentifier(eventId).build()); + + ColumnIdentifier eventType = ColumnIdentifier.newBuilder().setColumnName("EVENT.type").build(); + builder.addSelection(Expression.newBuilder().setColumnIdentifier(eventType).build()); + + ColumnIdentifier displayName = + ColumnIdentifier.newBuilder().setColumnName("EVENT.displaySpanName").build(); + builder.addSelection(Expression.newBuilder().setColumnIdentifier(displayName).build()); + + ColumnIdentifier tags = ColumnIdentifier.newBuilder().setColumnName("EVENT.spanTags").build(); + builder.addSelection(Expression.newBuilder().setColumnIdentifier(tags).build()); + + Filter startTimeFilter = + createTimeFilter( + "EVENT.startTime", Operator.GT, System.currentTimeMillis() - 1000 * 60 * 60 * 24); + Filter endTimeFilter = + createTimeFilter("EVENT.endTime", Operator.LT, System.currentTimeMillis()); + + Filter andFilter = + Filter.newBuilder() + .setOperator(Operator.AND) + .addChildFilter(startTimeFilter) + .addChildFilter(endTimeFilter) + .build(); + builder.setFilter(andFilter); + return builder.build(); + } + + private Filter createTimeFilter(String columnName, Operator op, long value) { + + ColumnIdentifier startTimeColumn = + ColumnIdentifier.newBuilder().setColumnName(columnName).build(); + Expression lhs = Expression.newBuilder().setColumnIdentifier(startTimeColumn).build(); + + LiteralConstant constant = + LiteralConstant.newBuilder() + .setValue(Value.newBuilder().setString(String.valueOf(value)).build()) + .build(); + Expression rhs = Expression.newBuilder().setLiteral(constant).build(); + return Filter.newBuilder().setLhs(lhs).setOperator(op).setRhs(rhs).build(); + } +} diff --git a/query-service-impl/src/test/java/org/hypertrace/core/query/service/QueryServiceImplTest.java b/query-service-impl/src/test/java/org/hypertrace/core/query/service/QueryServiceImplTest.java new file mode 100644 index 00000000..70ab59ea --- /dev/null +++ b/query-service-impl/src/test/java/org/hypertrace/core/query/service/QueryServiceImplTest.java @@ -0,0 +1,332 @@ +package org.hypertrace.core.query.service; + +import com.google.common.collect.Lists; +import io.grpc.Deadline; +import io.grpc.ManagedChannel; +import io.grpc.ManagedChannelBuilder; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.concurrent.TimeUnit; +import org.hypertrace.core.query.service.api.ColumnIdentifier; +import org.hypertrace.core.query.service.api.Expression; +import org.hypertrace.core.query.service.api.Filter; +import org.hypertrace.core.query.service.api.Function; +import org.hypertrace.core.query.service.api.LiteralConstant; +import org.hypertrace.core.query.service.api.Operator; +import org.hypertrace.core.query.service.api.QueryRequest; +import org.hypertrace.core.query.service.api.QueryRequest.Builder; +import org.hypertrace.core.query.service.api.QueryServiceGrpc; +import org.hypertrace.core.query.service.api.QueryServiceGrpc.QueryServiceBlockingStub; +import org.hypertrace.core.query.service.api.ResultSetChunk; +import org.hypertrace.core.query.service.api.Value; +import org.hypertrace.core.query.service.util.QueryRequestUtil; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class QueryServiceImplTest { + private static final Logger LOGGER = LoggerFactory.getLogger(QueryServiceImplTest.class); + + @Test + public void testQueryServiceImplInitialization() { + QueryServiceImplConfig queryServiceConfig = new QueryServiceImplConfig(); + queryServiceConfig.setTenantColumnName("tenant_id"); + queryServiceConfig.setClients(List.of()); + queryServiceConfig.setQueryRequestHandlersConfig(List.of()); + + QueryServiceImpl queryServiceImpl = new QueryServiceImpl(queryServiceConfig); + Assertions.assertNotNull(queryServiceImpl); + } + + @Test + public void testBlankTenantColumnNameThrowsException() { + // Empty tenant id column name + QueryServiceImplConfig queryServiceConfig = new QueryServiceImplConfig(); + queryServiceConfig.setTenantColumnName(""); + queryServiceConfig.setClients(List.of()); + + Assertions.assertThrows( + RuntimeException.class, + () -> new QueryServiceImpl(queryServiceConfig), + "Tenant column name is not defined. Need to set service.config.tenantColumnName in the application config."); + + // null tenant id column name + QueryServiceImplConfig queryServiceConfig1 = new QueryServiceImplConfig(); + queryServiceConfig1.setTenantColumnName(null); + queryServiceConfig1.setClients(List.of()); + + Assertions.assertThrows( + RuntimeException.class, + () -> new QueryServiceImpl(queryServiceConfig1), + "Tenant column name is not defined. Need to set service.config.tenantColumnName in the application config."); + + // whitespace tenant id column name + QueryServiceImplConfig queryServiceConfig2 = new QueryServiceImplConfig(); + queryServiceConfig2.setTenantColumnName(" "); + queryServiceConfig2.setClients(List.of()); + + Assertions.assertThrows( + RuntimeException.class, + () -> new QueryServiceImpl(queryServiceConfig2), + "Tenant column name is not defined. Need to set service.config.tenantColumnName in the application config."); + } + + // works with query service running at localhost + @Disabled + public void testGrpc() { + ManagedChannel managedChannel = + ManagedChannelBuilder.forAddress("localhost", 8090).usePlaintext().build(); + QueryServiceBlockingStub QueryServiceBlockingStub = + QueryServiceGrpc.newBlockingStub(managedChannel); + + ArrayList queryRequests = + Lists.newArrayList( + buildSimpleQuery(), + buildAggQuery(), + buildGroupByAggQuery(), + buildGroupByTimeRollupAggQuery()); + + for (QueryRequest queryRequest : queryRequests) { + LOGGER.info("Trying to send request {}", queryRequest); + Iterator resultSetChunkIterator = + QueryServiceBlockingStub.withDeadline(Deadline.after(15, TimeUnit.SECONDS)) + .execute(queryRequest); + LOGGER.info("Got response back: {}", resultSetChunkIterator); + while (resultSetChunkIterator.hasNext()) { + LOGGER.info("{}", resultSetChunkIterator.next()); + } + } + } + + @Disabled + public void testGrpcMap() { + ManagedChannel managedChannel = + ManagedChannelBuilder.forAddress("localhost", 8090).usePlaintext().build(); + QueryServiceBlockingStub QueryServiceBlockingStub = + QueryServiceGrpc.newBlockingStub(managedChannel); + + ArrayList queryRequests = Lists.newArrayList(buildSimpleMapQuery()); + + for (QueryRequest queryRequest : queryRequests) { + LOGGER.info("Trying to send request {}", queryRequest); + Iterator resultSetChunkIterator = + QueryServiceBlockingStub.withDeadline(Deadline.after(25, TimeUnit.SECONDS)) + .execute(queryRequest); + LOGGER.info("Got response back: {}", resultSetChunkIterator); + while (resultSetChunkIterator.hasNext()) { + LOGGER.info(" Result {}", resultSetChunkIterator.next()); + } + } + } + + private QueryRequest buildSimpleQuery() { + Builder builder = QueryRequest.newBuilder(); + ColumnIdentifier spanId = ColumnIdentifier.newBuilder().setColumnName("EVENT.id").build(); + builder.addSelection(Expression.newBuilder().setColumnIdentifier(spanId).build()); + + Filter startTimeFilter = + createTimeFilter( + "EVENT.start_time_millis", + Operator.GT, + System.currentTimeMillis() - 1000 * 60 * 60 * 24); + Filter endTimeFilter = + createTimeFilter("EVENT.end_time_millis", Operator.LT, System.currentTimeMillis()); + + Filter andFilter = + Filter.newBuilder() + .setOperator(Operator.AND) + .addChildFilter(startTimeFilter) + .addChildFilter(endTimeFilter) + .build(); + builder.setFilter(andFilter); + + return builder.build(); + } + + private QueryRequest buildGroupByAggQuery() { + Builder builder = QueryRequest.newBuilder(); + builder.addAggregation(QueryRequestUtil.createCountByColumnSelection("EVENT.id")); + + Filter startTimeFilter = + createTimeFilter( + "EVENT.start_time_millis", + Operator.GT, + System.currentTimeMillis() - 1000 * 60 * 60 * 24); + Filter endTimeFilter = + createTimeFilter("EVENT.end_time_millis", Operator.LT, System.currentTimeMillis()); + + Filter andFilter = + Filter.newBuilder() + .setOperator(Operator.AND) + .addChildFilter(startTimeFilter) + .addChildFilter(endTimeFilter) + .build(); + builder.setFilter(andFilter); + + builder.addGroupBy( + Expression.newBuilder() + .setColumnIdentifier(ColumnIdentifier.newBuilder().setColumnName("EVENT.displaySpanName").build())); + return builder.build(); + } + + private QueryRequest buildGroupByTimeRollupAggQuery() { + Builder builder = QueryRequest.newBuilder(); + builder.addAggregation(QueryRequestUtil.createCountByColumnSelection("EVENT.id")); + + Filter startTimeFilter = + createTimeFilter( + "EVENT.start_time_millis", + Operator.GT, + System.currentTimeMillis() - 1000 * 60 * 60 * 24); + Filter endTimeFilter = + createTimeFilter("EVENT.end_time_millis", Operator.LT, System.currentTimeMillis()); + + Filter andFilter = + Filter.newBuilder() + .setOperator(Operator.AND) + .addChildFilter(startTimeFilter) + .addChildFilter(endTimeFilter) + .build(); + builder.setFilter(andFilter); + + Function groupByTimeUdf = + Function.newBuilder() + .setFunctionName("dateTimeConvert") + .addArguments( + Expression.newBuilder() + .setColumnIdentifier( + ColumnIdentifier.newBuilder() + .setColumnName("EVENT.start_time_millis") + .build())) + .addArguments( + Expression.newBuilder() + .setLiteral( + LiteralConstant.newBuilder() + .setValue( + Value.newBuilder().setString("1:MILLISECONDS:EPOCH").build()))) + .addArguments( + Expression.newBuilder() + .setLiteral( + LiteralConstant.newBuilder() + .setValue( + Value.newBuilder().setString("1:MILLISECONDS:EPOCH").build()))) + .addArguments( + Expression.newBuilder() + .setLiteral( + LiteralConstant.newBuilder() + .setValue(Value.newBuilder().setString("30:SECONDS").build()))) + .build(); + builder.addGroupBy(Expression.newBuilder().setFunction(groupByTimeUdf).build()); + return builder.build(); + } + + private QueryRequest buildSimpleMapQuery() { + Builder builder = QueryRequest.newBuilder(); + ColumnIdentifier spanId = + ColumnIdentifier.newBuilder().setColumnName("EVENT.id").setAlias("SpanIds").build(); + builder.addSelection(Expression.newBuilder().setColumnIdentifier(spanId).build()); + builder.addSelection(createSelection("EVENT.end_time_millis")); + builder.addSelection(createSelection("EVENT.displaySpanName")); + builder.addSelection(createSelection("EVENT.attributes.request_body")); + builder.addSelection(createSelection("EVENT.attributes.protocol_name")); + builder.addSelection(createSelection("EVENT.attributes.request_headers")); + builder.addSelection(createSelection("EVENT.attributes.response_headers")); + + builder.addSelection(createSelection("EVENT.start_time_millis")); + builder.addSelection(createSelection("EVENT.metrics.duration_millis")); + builder.addSelection(createSelection("Service.name")); + builder.addSelection(createSelection("EVENT.attributes.response_body")); + builder.addSelection(createSelection("EVENT.attributes.parent_span_id")); + + Filter startTimeFilter = + createTimeFilter( + "EVENT.start_time_millis", + Operator.GT, + System.currentTimeMillis() - 1000 * 60 * 60 * 24); + Filter endTimeFilter = + createTimeFilter("EVENT.end_time_millis", Operator.LT, System.currentTimeMillis()); + + Filter andFilter = + Filter.newBuilder() + .setOperator(Operator.AND) + .addChildFilter(startTimeFilter) + .addChildFilter(endTimeFilter) + .build(); + + builder.setFilter(andFilter); + + return builder.build(); + } + + private QueryRequest buildAggQuery() { + Builder builder = QueryRequest.newBuilder(); + Function maxStartTime = + Function.newBuilder() + .setFunctionName("MAX") + .addArguments( + Expression.newBuilder() + .setColumnIdentifier( + ColumnIdentifier.newBuilder() + .setColumnName("EVENT.start_time_millis") + .build())) + .setAlias("MAX_start_time_millis") + .build(); + + builder.addSelection(createSelection("EVENT.attributes.request_headers")); + builder.addSelection(Expression.newBuilder().setFunction(maxStartTime).build()); + + Filter startTimeFilter = + createTimeFilter( + "EVENT.start_time_millis", + Operator.GT, + System.currentTimeMillis() - 1000 * 60 * 60 * 24); + Filter endTimeFilter = + createTimeFilter("EVENT.end_time_millis", Operator.LT, System.currentTimeMillis()); + + Filter andFilter = + Filter.newBuilder() + .setOperator(Operator.AND) + .addChildFilter(startTimeFilter) + .addChildFilter(endTimeFilter) + .build(); + builder.setFilter(andFilter); + + return builder.build(); + } + + private Expression createSelection(String colName) { + return Expression.newBuilder() + .setColumnIdentifier(ColumnIdentifier.newBuilder().setColumnName(colName)) + .build(); + } + + private Filter createTimeFilter(String columnName, Operator op, long value) { + + ColumnIdentifier startTimeColumn = + ColumnIdentifier.newBuilder().setColumnName(columnName).build(); + Expression lhs = Expression.newBuilder().setColumnIdentifier(startTimeColumn).build(); + + // TODO: Why is this not LONG + LiteralConstant constant = + LiteralConstant.newBuilder() + .setValue(Value.newBuilder().setString(String.valueOf(value)).build()) + .build(); + Expression rhs = Expression.newBuilder().setLiteral(constant).build(); + return Filter.newBuilder().setLhs(lhs).setOperator(op).setRhs(rhs).build(); + } + + private Filter createStringColumnFilter(String columnName, Operator op, String value) { + ColumnIdentifier column = ColumnIdentifier.newBuilder().setColumnName(columnName).build(); + Expression lhs = Expression.newBuilder().setColumnIdentifier(column).build(); + + LiteralConstant constant = + LiteralConstant.newBuilder() + .setValue(Value.newBuilder().setString(String.valueOf(value)).build()) + .build(); + Expression rhs = Expression.newBuilder().setLiteral(constant).build(); + return Filter.newBuilder().setLhs(lhs).setOperator(op).setRhs(rhs).build(); + } +} diff --git a/query-service-impl/src/test/java/org/hypertrace/core/query/service/pinot/PinotBasedRequestHandlerTest.java b/query-service-impl/src/test/java/org/hypertrace/core/query/service/pinot/PinotBasedRequestHandlerTest.java new file mode 100644 index 00000000..cfac7981 --- /dev/null +++ b/query-service-impl/src/test/java/org/hypertrace/core/query/service/pinot/PinotBasedRequestHandlerTest.java @@ -0,0 +1,333 @@ +package org.hypertrace.core.query.service.pinot; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import java.io.IOException; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import org.apache.pinot.client.ResultSet; +import org.apache.pinot.client.ResultSetGroup; +import org.hypertrace.core.query.service.QueryContext; +import org.hypertrace.core.query.service.QueryRequestBuilderUtils; +import org.hypertrace.core.query.service.QueryResultCollector; +import org.hypertrace.core.query.service.RequestAnalyzer; +import org.hypertrace.core.query.service.api.QueryRequest; +import org.hypertrace.core.query.service.api.ResultSetChunk; +import org.hypertrace.core.query.service.api.Row; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +public class PinotBasedRequestHandlerTest { + // Test subject + private PinotBasedRequestHandler pinotBasedRequestHandler; + private final ObjectMapper objectMapper = new ObjectMapper(); + + @BeforeEach + public void setUp() { + // Mocks + PinotClientFactory pinotClientFactoryMock = mock(PinotClientFactory.class); + ResultSetTypePredicateProvider resultSetTypePredicateProviderMock = mock( + ResultSetTypePredicateProvider.class); + pinotBasedRequestHandler = + new PinotBasedRequestHandler(resultSetTypePredicateProviderMock, pinotClientFactoryMock); + + // Test ResultTableResultSet result set format parsing + when(resultSetTypePredicateProviderMock.isSelectionResultSetType(any(ResultSet.class))) + .thenReturn(false); + when(resultSetTypePredicateProviderMock.isResultTableResultSetType(any(ResultSet.class))) + .thenReturn(true); + } + + @Test + public void testConvertSimpleSelectionsQueryResultSet() throws IOException { + String[][] resultTable = + new String[][] { + {"operation-name-0", "service-name-0", "70", "80"}, + {"operation-name-1", "service-name-1", "71", "79"}, + {"operation-name-2", "service-name-2", "72", "78"}, + {"operation-name-3", "service-name-3", "73", "77"} + }; + List columnNames = List.of("operation_name", "service_name", "start_time_millis", "duration"); + ResultSet resultSet = mockResultSet(4, 4, columnNames, resultTable); + ResultSetGroup resultSetGroup = mockResultSetGroup(List.of(resultSet)); + TestQueryResultCollector testQueryResultCollector = new TestQueryResultCollector(); + + pinotBasedRequestHandler.convert( + resultSetGroup, testQueryResultCollector, new LinkedHashSet<>()); + + verifyResponseRows(testQueryResultCollector, resultTable); + } + + @Test + public void testConvertAggregationColumnsQueryResultSet() throws IOException { + String[][] resultTable = + new String[][] { + {"operation-name-10", "110", "40", "21"}, + {"operation-name-11", "111", "41", "22"}, + {"operation-name-12", "112", "42", "23"}, + {"operation-name-13", "113", "43", "24"} + }; + List columnNames = List.of("operation_name", "avg(duration)", "count(*)", "max(duration)"); + ResultSet resultSet = mockResultSet(4, 4, columnNames, resultTable); + ResultSetGroup resultSetGroup = mockResultSetGroup(List.of(resultSet)); + TestQueryResultCollector testQueryResultCollector = new TestQueryResultCollector(); + + pinotBasedRequestHandler.convert( + resultSetGroup, testQueryResultCollector, new LinkedHashSet<>()); + + verifyResponseRows(testQueryResultCollector, resultTable); + } + + @Test + public void testConvertSelectionsWithMapKeysAndValuesQueryResultSet() throws IOException { + String[][] resultTable = + new String[][] { + { + "operation-name-11", + stringify(List.of("t1", "t2")), + stringify(List.of("v1", "v2")), + "service-1", + stringify(List.of("t10")), + stringify(List.of("v10")) + }, + { + "operation-name-12", + stringify(List.of("a2")), + stringify(List.of("b2")), + "service-2", + stringify(List.of("c10", "c11")), + stringify(List.of("d10", "d11")) + }, + { + "operation-name-13", + stringify(List.of()), + stringify(List.of()), + "service-3", + stringify(List.of("e15")), + stringify(List.of("f15")) + } + }; + List columnNames = + List.of( + "operation_name", + "tags1" + ViewDefinition.MAP_KEYS_SUFFIX, + "tags1" + ViewDefinition.MAP_VALUES_SUFFIX, + "service_name", + "tags2" + ViewDefinition.MAP_KEYS_SUFFIX, + "tags2" + ViewDefinition.MAP_VALUES_SUFFIX); + ResultSet resultSet = mockResultSet(3, 6, columnNames, resultTable); + ResultSetGroup resultSetGroup = mockResultSetGroup(List.of(resultSet)); + TestQueryResultCollector testQueryResultCollector = new TestQueryResultCollector(); + + pinotBasedRequestHandler.convert( + resultSetGroup, testQueryResultCollector, new LinkedHashSet<>()); + + String[][] expectedRows = + new String[][] { + { + "operation-name-11", + stringify(Map.of("t1", "v1", "t2", "v2")), + "service-1", + stringify(Map.of("t10", "v10")) + }, + { + "operation-name-12", + stringify(Map.of("a2", "b2")), + "service-2", + stringify(Map.of("c10", "d10", "c11", "d11")) + }, + {"operation-name-13", stringify(Map.of()), "service-3", stringify(Map.of("e15", "f15"))} + }; + + verifyResponseRows(testQueryResultCollector, expectedRows); + } + + @Test + public void testConvertMultipleResultSetsInFResultSetGroup() throws IOException { + List columnNames = List.of("operation_name", "avg(duration)", "count(*)", "max(duration)"); + String[][] resultTable1 = + new String[][] { + {"operation-name-10", "110", "40", "21"}, + {"operation-name-11", "111", "41", "22"}, + {"operation-name-12", "112", "42", "23"}, + {"operation-name-13", "113", "43", "24"} + }; + ResultSet resultSet1 = mockResultSet(4, 4, columnNames, resultTable1); + + String[][] resultTable2 = + new String[][] { + {"operation-name-20", "200", "400", "20000"}, + {"operation-name-22", "220", "420", "22000"} + }; + ResultSet resultSet2 = mockResultSet(2, 4, columnNames, resultTable2); + ResultSetGroup resultSetGroup = mockResultSetGroup(List.of(resultSet1, resultSet2)); + TestQueryResultCollector testQueryResultCollector = new TestQueryResultCollector(); + + pinotBasedRequestHandler.convert( + resultSetGroup, testQueryResultCollector, new LinkedHashSet<>()); + + String[][] expectedRows = + new String[][] { + {"operation-name-10", "110", "40", "21"}, + {"operation-name-11", "111", "41", "22"}, + {"operation-name-12", "112", "42", "23"}, + {"operation-name-13", "113", "43", "24"}, + {"operation-name-20", "200", "400", "20000"}, + {"operation-name-22", "220", "420", "22000"} + }; + + verifyResponseRows(testQueryResultCollector, expectedRows); + } + + @Test + public void testNullQueryRequestContextThrowsNPE() { + Assertions.assertThrows( + NullPointerException.class, + () -> pinotBasedRequestHandler.handleRequest( + null, + QueryRequest.newBuilder().build(), + mock(QueryResultCollector.class), + mock(RequestAnalyzer.class))); + } + + @Test + public void testNullTenantIdQueryRequestContextThrowsNPE() { + Assertions.assertThrows( + NullPointerException.class, + () -> pinotBasedRequestHandler.handleRequest( + new QueryContext(null), + QueryRequest.newBuilder().build(), + mock(QueryResultCollector.class), + mock(RequestAnalyzer.class))); + } + + @Test + public void + testGroupBysAndAggregationsMixedWithSelectionsThrowsExeptionWhenDistinctSelectionIsSpecified() { + // Setting distinct selections and mixing selections and group bys should throw exception + Assertions.assertThrows( + IllegalArgumentException.class, + () -> pinotBasedRequestHandler.handleRequest( + new QueryContext("test-tenant-id"), + QueryRequest.newBuilder() + .setDistinctSelections(true) + .addSelection(QueryRequestBuilderUtils.createColumnExpression("col1")) + .addSelection(QueryRequestBuilderUtils.createColumnExpression("col2")) + .addGroupBy(QueryRequestBuilderUtils.createColumnExpression("col3")) + .build(), + mock(QueryResultCollector.class), + mock(RequestAnalyzer.class))); + + // Setting distinct selections and mixing selections and aggregations should throw exception + Assertions.assertThrows( + IllegalArgumentException.class, + () -> pinotBasedRequestHandler.handleRequest( + new QueryContext("test-tenant-id"), + QueryRequest.newBuilder() + .setDistinctSelections(true) + .addSelection(QueryRequestBuilderUtils.createColumnExpression("col1")) + .addSelection(QueryRequestBuilderUtils.createColumnExpression("col2")) + .addAggregation( + QueryRequestBuilderUtils.createFunctionExpression( + "AVG", "duration", "avg_duration")) + .build(), + mock(QueryResultCollector.class), + mock(RequestAnalyzer.class))); + + // Setting distinct selections and mixing selections, group bys and aggregations should throw + // exception + Assertions.assertThrows( + IllegalArgumentException.class, + () -> pinotBasedRequestHandler.handleRequest( + new QueryContext("test-tenant-id"), + QueryRequest.newBuilder() + .setDistinctSelections(true) + .addSelection(QueryRequestBuilderUtils.createColumnExpression("col1")) + .addSelection(QueryRequestBuilderUtils.createColumnExpression("col2")) + .addGroupBy(QueryRequestBuilderUtils.createColumnExpression("col3")) + .addAggregation( + QueryRequestBuilderUtils.createFunctionExpression( + "AVG", "duration", "avg_duration")) + .build(), + mock(QueryResultCollector.class), + mock(RequestAnalyzer.class))); + } + + private ResultSet mockResultSet( + int rowCount, int columnCount, List columnNames, String[][] resultsTable) { + ResultSet resultSet = mock(ResultSet.class); + when(resultSet.getRowCount()).thenReturn(rowCount); + when(resultSet.getColumnCount()).thenReturn(columnCount); + for (int colIdx = 0; colIdx < columnNames.size(); colIdx++) { + when(resultSet.getColumnName(colIdx)).thenReturn(columnNames.get(colIdx)); + } + + for (int rowIdx = 0; rowIdx < resultsTable.length; rowIdx++) { + for (int colIdx = 0; colIdx < resultsTable[0].length; colIdx++) { + when(resultSet.getString(rowIdx, colIdx)).thenReturn(resultsTable[rowIdx][colIdx]); + } + } + + return resultSet; + } + + private ResultSetGroup mockResultSetGroup(List resultSets) { + ResultSetGroup resultSetGroup = mock(ResultSetGroup.class); + + when(resultSetGroup.getResultSetCount()).thenReturn(resultSets.size()); + for (int i = 0; i < resultSets.size(); i++) { + when(resultSetGroup.getResultSet(i)).thenReturn(resultSets.get(i)); + } + + return resultSetGroup; + } + + private void verifyResponseRows( + TestQueryResultCollector testQueryResultCollector, String[][] expectedResultTable) + throws IOException { + List rows = testQueryResultCollector.getResultSetChunk().getRowList(); + Assertions.assertEquals(expectedResultTable.length, rows.size()); + for (int rowIdx = 0; rowIdx < rows.size(); rowIdx++) { + Row row = rows.get(rowIdx); + Assertions.assertEquals(expectedResultTable[rowIdx].length, row.getColumnCount()); + for (int colIdx = 0; colIdx < row.getColumnCount(); colIdx++) { + String val = row.getColumn(colIdx).getString(); + // In the scope of our unit tests, this is a map. Cannot JSON object comparison on it since + // it's not ordered. + if (val.startsWith("{") && val.endsWith("}")) { + Assertions.assertEquals( + objectMapper.readTree(expectedResultTable[rowIdx][colIdx]), + objectMapper.readTree(val)); + } else { + Assertions.assertEquals(expectedResultTable[rowIdx][colIdx], val); + } + } + } + } + + private String stringify(Object obj) throws JsonProcessingException { + return objectMapper.writeValueAsString(obj); + } + + static class TestQueryResultCollector implements QueryResultCollector { + private final ResultSetChunk.Builder resultSetChunkBuilder = ResultSetChunk.newBuilder(); + + @Override + public void collect(Row row) { + resultSetChunkBuilder.addRow(row); + } + + @Override + public void finish() {} + + public ResultSetChunk getResultSetChunk() { + return resultSetChunkBuilder.build(); + } + } +} diff --git a/query-service-impl/src/test/java/org/hypertrace/core/query/service/pinot/PinotMapConverterTest.java b/query-service-impl/src/test/java/org/hypertrace/core/query/service/pinot/PinotMapConverterTest.java new file mode 100644 index 00000000..995cb020 --- /dev/null +++ b/query-service-impl/src/test/java/org/hypertrace/core/query/service/pinot/PinotMapConverterTest.java @@ -0,0 +1,94 @@ +package org.hypertrace.core.query.service.pinot; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.collect.Lists; +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +public class PinotMapConverterTest { + private static final String KEY1 = "KEY1"; + private static final String KEY2 = "KEY2"; + private static final String VAL1 = "VAL1"; + private static final String VAL2 = "VAL2"; + + private List validKeys; + private List validValues; + private String validKeysJsonString; + private String validValueJsonString; + private String emptyMapJsonString; + private String expectedValidMapString; + private ObjectMapper objectMapper; + private Map expectedMap; + private PinotMapConverter target; + + @BeforeEach + public void setup() throws JsonProcessingException { + objectMapper = new ObjectMapper(); + emptyMapJsonString = objectMapper.writeValueAsString(Collections.emptyMap()); + + validKeys = Lists.newArrayList(KEY1, KEY2); + validValues = Lists.newArrayList(VAL1, VAL2); + validKeysJsonString = objectMapper.writeValueAsString(validKeys); + validValueJsonString = objectMapper.writeValueAsString(validValues); + + expectedMap = new HashMap<>(); + expectedMap.put(KEY1, VAL1); + expectedMap.put(KEY2, VAL2); + expectedValidMapString = objectMapper.writeValueAsString(expectedMap); + + target = new PinotMapConverter(); + } + + @Test + public void test_merge_nullKey_shouldThrowException() throws IOException { + assertThrows( + IOException.class, + () -> { + target.merge(null, ""); + }); + } + + @Test + public void test_merge_emptyListStringKey_shouldReturnEmptyMap() throws IOException { + assertEquals(emptyMapJsonString, target.merge("[]", "[]")); + } + + @Test + public void test_merge_PinotemptyList_shouldReturnEmptyMap() throws IOException { + assertEquals(emptyMapJsonString, target.merge("[\"\"]", "[\"\"]")); + } + + @Test + public void test_merge_validKeyEmptyStringValue_shouldReturnKeyWithEmptyValue() + throws IOException { + String expected = objectMapper.writeValueAsString(expectedMap); + assertEquals(expected, target.merge(validKeysJsonString, validValueJsonString)); + } + + @Test + public void test_merge_largerKeysThanValues_shouldReturnBasedOnKeys() throws IOException { + String newKey = "KEY3"; + expectedMap.put(newKey, null); + validKeys.add(newKey); + String largerKeysString = objectMapper.writeValueAsString(validKeys); + String expectedMapString = objectMapper.writeValueAsString(expectedMap); + assertEquals(expectedMapString, target.merge(largerKeysString, validValueJsonString)); + } + + @Test + public void test_merge_largerValuesThanKeys_shouldReturnBasedOnKeys() throws IOException { + String newValue = "VALUE3"; + validValues.add(newValue); + String largerValuesString = objectMapper.writeValueAsString(validValues); + assertEquals(expectedValidMapString, target.merge(validKeysJsonString, largerValuesString)); + } +} diff --git a/query-service-impl/src/test/java/org/hypertrace/core/query/service/pinot/PinotQueryTest.java b/query-service-impl/src/test/java/org/hypertrace/core/query/service/pinot/PinotQueryTest.java new file mode 100644 index 00000000..e537eb3a --- /dev/null +++ b/query-service-impl/src/test/java/org/hypertrace/core/query/service/pinot/PinotQueryTest.java @@ -0,0 +1,25 @@ +package org.hypertrace.core.query.service.pinot; + +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + +public class PinotQueryTest { + + @Test + public void testPinotQuery() { + final AdhocPinotQuery q1 = new AdhocPinotQuery("query1", null); + q1.setQuery("q1"); + final AdhocPinotQuery q2 = new AdhocPinotQuery("query2", null); + q2.setQuery("q2"); + final AdhocPinotQuery q3 = new AdhocPinotQuery("query2", null); + q3.setQuery("q1"); + Assertions.assertFalse(q1.equals(q2)); + Assertions.assertFalse(q1.equals(q3)); + Assertions.assertFalse(q2.equals(q3)); + Assertions.assertNotEquals(q1, q2); + Assertions.assertNotEquals(q2, q3); + q3.setQuery("q2"); + Assertions.assertEquals(q2, q3); + Assertions.assertTrue(q2.equals(q3)); + } +} diff --git a/query-service-impl/src/test/java/org/hypertrace/core/query/service/pinot/PinotResultAnalyzerTest.java b/query-service-impl/src/test/java/org/hypertrace/core/query/service/pinot/PinotResultAnalyzerTest.java new file mode 100644 index 00000000..4107c5f8 --- /dev/null +++ b/query-service-impl/src/test/java/org/hypertrace/core/query/service/pinot/PinotResultAnalyzerTest.java @@ -0,0 +1,156 @@ +package org.hypertrace.core.query.service.pinot; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Lists; +import java.io.IOException; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import org.apache.pinot.client.ResultSet; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.Mock; + +public class PinotResultAnalyzerTest { + // Attribute 1 + private static final String LOGICAL_COL1 = "COL1"; + private static final String PHYS_COL1 = "PHYS_COL1"; + private static final String VAL_COL1 = "COL1_VAL"; + + // Attribute 2 + private static final String LOGICAL_COL2 = "COL2"; + private static final String PHYS_COL2 = "PHYS_COL2"; + private static final String VAL_COL2 = "COL2_VAL"; + + // Map Attribute 1 + private static final String LOGICAL_MAP_NAME1 = "MAP_COL1"; + private static final String MAP1_KEY_NAME = "MAP_COL1__KEYS"; + private static final String MAP1_VAL_NAME = "MAP_COL1__VALUES"; + private static final String MAP1_KEY_VAL = "[\"Content-Type\"]"; + private static final String MAP1_VAL_VAL = "[\"application/json\"]"; + + // Map Attribute 3 + private static final String LOGICAL_MAP_NAME2 = "MAP_COL2"; + private static final String MAP2_KEY_NAME = "MAP_COL2__KEYS"; + private static final String MAP2_VAL_NAME = "MAP_COL2__VALUES"; + private static final String MAP2_KEY_VAL = "[\"Amazing\"]"; + private static final String MAP2_VAL_VAL = "[\"@TestOrg\"]"; + + @Mock private ResultSet resultSet; + @Mock private ViewDefinition viewDefinition; + + private PinotResultAnalyzer target; + private LinkedHashSet selectedAttributes; + private Map> viewDefinitionMap; + private List resultSetColumnNames; + private List resultSetColumnValues; + private PinotMapConverter pinotMapConverter; + + @BeforeEach + public void setup() { + viewDefinition = mock(ViewDefinition.class); + resultSet = mock(ResultSet.class); + + pinotMapConverter = new PinotMapConverter(); + viewDefinitionMap = + ImmutableMap.>builder() + .put(LOGICAL_COL1, Lists.newArrayList(PHYS_COL1)) + .put(LOGICAL_COL2, Lists.newArrayList(PHYS_COL2)) + .put(LOGICAL_MAP_NAME1, Lists.newArrayList(MAP1_KEY_NAME, MAP1_VAL_NAME)) + .put(LOGICAL_MAP_NAME2, Lists.newArrayList(MAP2_KEY_NAME, MAP2_VAL_NAME)) + .build(); + viewDefinitionMap.forEach( + (k, v) -> { + when(viewDefinition.getPhysicalColumnNames(k)).thenReturn(v); + if (v.size() > 1) { + when(viewDefinition.isMap(k)).thenReturn(true); + } else { + when(viewDefinition.isMap(k)).thenReturn(false); + } + }); + when(viewDefinition.getKeyColumnNameForMap(LOGICAL_MAP_NAME1)).thenReturn(MAP1_KEY_NAME); + when(viewDefinition.getValueColumnNameForMap(LOGICAL_MAP_NAME1)).thenReturn(MAP1_VAL_NAME); + when(viewDefinition.getKeyColumnNameForMap(LOGICAL_MAP_NAME2)).thenReturn(MAP2_KEY_NAME); + when(viewDefinition.getValueColumnNameForMap(LOGICAL_MAP_NAME2)).thenReturn(MAP2_VAL_NAME); + + selectedAttributes = + new LinkedHashSet<>( + ImmutableList.builder() + .add(LOGICAL_COL1) + .add(LOGICAL_MAP_NAME1) + .add(LOGICAL_COL2) + .add(LOGICAL_MAP_NAME2) + .build()); + resultSetColumnNames = + Lists.newArrayList( + PHYS_COL1, MAP1_KEY_NAME, MAP1_VAL_NAME, PHYS_COL2, MAP2_KEY_NAME, MAP2_VAL_NAME); + + resultSetColumnValues = + Lists.newArrayList( + VAL_COL1, MAP1_KEY_VAL, MAP1_VAL_VAL, VAL_COL2, MAP2_KEY_VAL, MAP2_VAL_VAL); + + mockResultSet(resultSetColumnNames, resultSetColumnValues); + target = PinotResultAnalyzer.create(resultSet, selectedAttributes, viewDefinition); + } + + @Test + public void test_create_validInputWithMap_shouldFindIndexCorrectly() { + // assert index for non-map attributes + assertEquals( + findIndexInResultSet(resultSetColumnNames, PHYS_COL1), + target.getPhysicalColumnIndex(LOGICAL_COL1)); + assertEquals( + findIndexInResultSet(resultSetColumnNames, PHYS_COL2), + target.getPhysicalColumnIndex(LOGICAL_COL2)); + + // assert index for map attributes + assertEquals( + findIndexInResultSet(resultSetColumnNames, MAP1_KEY_NAME), + target.getMapKeyIndex(LOGICAL_MAP_NAME1)); + assertEquals( + findIndexInResultSet(resultSetColumnNames, MAP2_KEY_NAME), + target.getMapKeyIndex(LOGICAL_MAP_NAME2)); + + assertEquals( + findIndexInResultSet(resultSetColumnNames, MAP1_VAL_NAME), + target.getMapValueIndex(LOGICAL_MAP_NAME1)); + assertEquals( + findIndexInResultSet(resultSetColumnNames, MAP2_VAL_NAME), + target.getMapValueIndex(LOGICAL_MAP_NAME2)); + } + + @Test + public void test_getDataFromRow_validInputWithTwoMaps_ShouldGetData() throws IOException { + assertEquals(VAL_COL1, target.getDataFromRow(0, LOGICAL_COL1)); + assertEquals(VAL_COL2, target.getDataFromRow(0, LOGICAL_COL2)); + assertEquals( + pinotMapConverter.merge(MAP1_KEY_VAL, MAP1_VAL_VAL), + target.getDataFromRow(0, LOGICAL_MAP_NAME1)); + assertEquals( + pinotMapConverter.merge(MAP2_KEY_VAL, MAP2_VAL_VAL), + target.getDataFromRow(0, LOGICAL_MAP_NAME2)); + } + + private Integer findIndexInResultSet(List resultSetColumns, String name) { + for (int idx = 0; idx < resultSetColumns.size(); idx++) { + if (name.equalsIgnoreCase(resultSetColumns.get(idx))) { + return idx; + } + } + return null; + } + + private void mockResultSet( + List resultSetColumnNames, List resultSetColumnValues) { + when(resultSet.getColumnCount()).thenReturn(resultSetColumnNames.size()); + for (int idx = 0; idx < resultSetColumnNames.size(); idx++) { + when(resultSet.getColumnName(idx)).thenReturn(resultSetColumnNames.get(idx)); + when(resultSet.getString(0, idx)).thenReturn(resultSetColumnValues.get(idx)); + } + } +} diff --git a/query-service-impl/src/test/java/org/hypertrace/core/query/service/pinot/PinotUtilsTest.java b/query-service-impl/src/test/java/org/hypertrace/core/query/service/pinot/PinotUtilsTest.java new file mode 100644 index 00000000..773440cf --- /dev/null +++ b/query-service-impl/src/test/java/org/hypertrace/core/query/service/pinot/PinotUtilsTest.java @@ -0,0 +1,19 @@ +package org.hypertrace.core.query.service.pinot; + +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + +public class PinotUtilsTest { + + @Test + public void testZkPath() { + Assertions.assertEquals( + "localhost:2181/pinot", PinotUtils.getZkPath("localhost:2181", "pinot")); + Assertions.assertEquals( + "localhost:2181/pinot", PinotUtils.getZkPath("localhost:2181/", "pinot")); + Assertions.assertEquals( + "localhost:2181/pinot/myView", PinotUtils.getZkPath("localhost:2181/pinot", "myView")); + Assertions.assertEquals( + "localhost:2181/pinot/myView", PinotUtils.getZkPath("localhost:2181/pinot/", "myView")); + } +} diff --git a/query-service-impl/src/test/java/org/hypertrace/core/query/service/pinot/QueryRequestToPinotSQLConverterTest.java b/query-service-impl/src/test/java/org/hypertrace/core/query/service/pinot/QueryRequestToPinotSQLConverterTest.java new file mode 100644 index 00000000..b1370273 --- /dev/null +++ b/query-service-impl/src/test/java/org/hypertrace/core/query/service/pinot/QueryRequestToPinotSQLConverterTest.java @@ -0,0 +1,830 @@ +package org.hypertrace.core.query.service.pinot; + +import static org.hypertrace.core.query.service.QueryRequestBuilderUtils.createColumnExpression; +import static org.hypertrace.core.query.service.QueryRequestBuilderUtils.createFunctionExpression; +import static org.hypertrace.core.query.service.QueryRequestBuilderUtils.createOrderByExpression; +import static org.mockito.ArgumentMatchers.any; + +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Lists; +import java.util.HashMap; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; +import org.apache.pinot.client.Connection; +import org.apache.pinot.client.Request; +import org.hypertrace.core.query.service.QueryContext; +import org.hypertrace.core.query.service.RequestHandlerInfo; +import org.hypertrace.core.query.service.RequestHandlerRegistry; +import org.hypertrace.core.query.service.api.ColumnIdentifier; +import org.hypertrace.core.query.service.api.Expression; +import org.hypertrace.core.query.service.api.Filter; +import org.hypertrace.core.query.service.api.Function; +import org.hypertrace.core.query.service.api.LiteralConstant; +import org.hypertrace.core.query.service.api.Operator; +import org.hypertrace.core.query.service.api.OrderByExpression; +import org.hypertrace.core.query.service.api.QueryRequest; +import org.hypertrace.core.query.service.api.QueryRequest.Builder; +import org.hypertrace.core.query.service.api.SortOrder; +import org.hypertrace.core.query.service.api.Value; +import org.hypertrace.core.query.service.api.ValueType; +import org.hypertrace.core.query.service.pinot.PinotClientFactory.PinotClient; +import org.hypertrace.core.query.service.util.QueryRequestUtil; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.ArgumentCaptor; +import org.mockito.Mockito; + +public class QueryRequestToPinotSQLConverterTest { + private static final String TENANT_ID = "__default"; + private static final String TENANT_COLUMN_NAME = "tenant_id"; + + private static ViewDefinition viewDefinition; + private static QueryContext queryContext; + private QueryRequestToPinotSQLConverter converter; + private Connection connection; + + @BeforeAll + public static void setUp() { + String handlerName = "pinotBasedRequestHandler"; + + Map config = new HashMap<>(); + Map columnSpecMap = new HashMap<>(); + Set mapFields = Set.of("tags", "request_headers"); + + Map> logicalToViewColumns = + ImmutableMap.>builder() + .put("Span.tags", Lists.newArrayList("tags")) + .put("Span.id", Lists.newArrayList("span_id")) + .put("Span.duration_millis", Lists.newArrayList("duration_millis")) + .put("Span.start_time_millis", Lists.newArrayList("start_time_millis")) + .put("Span.end_time_millis", Lists.newArrayList("end_time_millis")) + .put("Span.displaySpanName", Lists.newArrayList("span_name")) + .put("Span.is_entry", Lists.newArrayList("is_entry")) + .put("Span.attributes.request_headers", Lists.newArrayList("request_headers")) + .put("Span.attributes.request_body", Lists.newArrayList("request_body")) + .put("Span.attributes.protocol_name", Lists.newArrayList("protocol_name")) + .put("Span.attributes.response_headers", Lists.newArrayList("response_headers")) + .put("Span.attributes.response_body", Lists.newArrayList("response_body")) + .put("Span.metrics.duration_millis", Lists.newArrayList("duration_millis")) + .put("Span.serviceName", Lists.newArrayList("service_name")) + .put("Span.attributes.parent_span_id", Lists.newArrayList("parent_span_id")) + .build(); + + for (String logicalName : logicalToViewColumns.keySet()) { + PinotColumnSpec spec = new PinotColumnSpec(); + for (String viewName : logicalToViewColumns.get(logicalName)) { + if (mapFields.contains(viewName)) { + spec.setType(ValueType.STRING_MAP); + spec.addColumnName(viewName + "__KEYS"); + spec.addColumnName(viewName + "__VALUES"); + } else { + spec.addColumnName(viewName); + spec.setType(ValueType.STRING); + } + } + columnSpecMap.put(logicalName, spec); + } + viewDefinition = new ViewDefinition("SpanEventView", columnSpecMap, TENANT_COLUMN_NAME); + config.put(PinotBasedRequestHandler.VIEW_DEFINITION_CONFIG_KEY, viewDefinition); + RequestHandlerInfo requestHandlerInfo = + new RequestHandlerInfo(handlerName, PinotBasedRequestHandler.class, config); + RequestHandlerRegistry.get().register(handlerName, requestHandlerInfo); + + queryContext = new QueryContext(TENANT_ID); + } + + @BeforeEach + public void setup() { + converter = new QueryRequestToPinotSQLConverter(viewDefinition); + connection = Mockito.mock(Connection.class); + Mockito.when(connection.prepareStatement(any(Request.class))).thenCallRealMethod(); + } + + @Test + public void testQuery() { + Builder builder = QueryRequest.newBuilder(); + ColumnIdentifier spanId = ColumnIdentifier.newBuilder().setColumnName("Span.id").build(); + builder.addSelection(Expression.newBuilder().setColumnIdentifier(spanId).build()); + + ColumnIdentifier tags = ColumnIdentifier.newBuilder().setColumnName("Span.tags").build(); + builder.addSelection(Expression.newBuilder().setColumnIdentifier(tags).build()); + + ColumnIdentifier request_headers = + ColumnIdentifier.newBuilder().setColumnName("Span.attributes.request_headers").build(); + builder.addSelection(Expression.newBuilder().setColumnIdentifier(request_headers).build()); + + Filter startTimeFilter = + createTimeFilter("Span.start_time_millis", Operator.GT, 1557780911508L); + Filter endTimeFilter = createTimeFilter("Span.end_time_millis", Operator.LT, 1557780938419L); + + Filter andFilter = + Filter.newBuilder() + .setOperator(Operator.AND) + .addChildFilter(startTimeFilter) + .addChildFilter(endTimeFilter) + .build(); + builder.setFilter(andFilter); + + assertPQLQuery( + builder.build(), + "select span_id, tags__keys, tags__values, request_headers__keys, request_headers__values " + + "from SpanEventView " + + "where " + + viewDefinition.getTenantIdColumn() + + " = '" + + TENANT_ID + + "' " + + "and ( start_time_millis > '1557780911508' and end_time_millis < '1557780938419' )"); + } + + @Test + public void testQueryWithoutFilter() { + Builder builder = QueryRequest.newBuilder(); + ColumnIdentifier spanId = ColumnIdentifier.newBuilder().setColumnName("Span.id").build(); + builder.addSelection(Expression.newBuilder().setColumnIdentifier(spanId).build()); + assertPQLQuery( + builder.build(), + "Select span_id FROM SpanEventView " + + "where " + + viewDefinition.getTenantIdColumn() + + " = '" + + TENANT_ID + + "'"); + } + + @Test + public void testQuerySingleDistinctSelection() { + Builder builder = QueryRequest.newBuilder(); + builder.setDistinctSelections(true).addSelection(createColumnExpression("Span.id")); + assertPQLQuery( + builder.build(), + "Select distinct span_id FROM SpanEventView " + + "where " + + viewDefinition.getTenantIdColumn() + + " = '" + + TENANT_ID + + "'"); + } + + @Test + public void testQueryMultipleDistinctSelection() { + Builder builder = QueryRequest.newBuilder(); + builder + .setDistinctSelections(true) + .addSelection(createColumnExpression("Span.id")) + .addSelection(createColumnExpression("Span.displaySpanName")) + .addSelection(createColumnExpression("Span.serviceName")); + assertPQLQuery( + builder.build(), + "Select distinct span_id, span_name, service_name FROM SpanEventView " + + "where " + + viewDefinition.getTenantIdColumn() + + " = '" + + TENANT_ID + + "'"); + } + + @Test + public void testQueryWithStringFilter() { + QueryRequest queryRequest = + buildSimpleQueryWithFilter(createStringFilter("Span.displaySpanName", Operator.EQ, "GET /login")); + assertPQLQuery( + queryRequest, + "Select span_id FROM SpanEventView " + + "WHERE " + + viewDefinition.getTenantIdColumn() + + " = '" + + TENANT_ID + + "' " + + "AND span_name = 'GET /login'"); + } + + @Test + public void testSQLiWithStringValueFilter() { + QueryRequest queryRequest = + buildSimpleQueryWithFilter( + createStringFilter("Span.displaySpanName", Operator.EQ, "GET /login' OR tenant_id = 'tenant2")); + + assertPQLQuery( + queryRequest, + "Select span_id FROM SpanEventView WHERE " + + viewDefinition.getTenantIdColumn() + + " = '" + + TENANT_ID + + "' " + + "AND span_name = 'GET /login'' OR tenant_id = ''tenant2'"); + } + + @Test + public void testQueryWithBooleanFilter() { + QueryRequest queryRequest = + buildSimpleQueryWithFilter(createBooleanFilter("Span.is_entry", Operator.EQ, true)); + + assertPQLQuery( + queryRequest, + "Select span_id FROM SpanEventView WHERE " + + viewDefinition.getTenantIdColumn() + + " = '" + + TENANT_ID + + "' " + + "AND is_entry = 'true'"); + } + + @Test + public void testQueryWithDoubleFilter() { + QueryRequest queryRequest = + buildSimpleQueryWithFilter( + createDoubleFilter("Span.metrics.duration_millis", Operator.EQ, 1.2)); + + assertPQLQuery( + queryRequest, + "Select span_id FROM SpanEventView WHERE " + + viewDefinition.getTenantIdColumn() + + " = '" + + TENANT_ID + + "' " + + "AND duration_millis = 1.2"); + } + + @Test + public void testQueryWithFloatFilter() { + QueryRequest queryRequest = + buildSimpleQueryWithFilter( + createFloatFilter("Span.metrics.duration_millis", Operator.EQ, 1.2f)); + + assertPQLQuery( + queryRequest, + "Select span_id FROM SpanEventView WHERE " + + viewDefinition.getTenantIdColumn() + + " = '" + + TENANT_ID + + "' " + + "AND duration_millis = 1.2"); + } + + @Test + public void testQueryWithIntFilter() { + QueryRequest queryRequest = + buildSimpleQueryWithFilter(createIntFilter("Span.metrics.duration_millis", Operator.EQ, 1)); + + assertPQLQuery( + queryRequest, + "Select span_id FROM SpanEventView WHERE " + + viewDefinition.getTenantIdColumn() + + " = '" + + TENANT_ID + + "' " + + "AND duration_millis = 1"); + } + + @Test + public void testQueryWithTimestampFilter() { + QueryRequest queryRequest = + buildSimpleQueryWithFilter(createTimestampFilter("Span.is_entry", Operator.EQ, 123456)); + + assertPQLQuery( + queryRequest, + "Select span_id FROM SpanEventView WHERE " + + viewDefinition.getTenantIdColumn() + + " = '" + + TENANT_ID + + "' " + + "AND is_entry = 123456"); + } + + @Test + public void testQueryWithOrderBy() { + assertPQLQuery( + buildOrderByQuery(), + "Select span_id, start_time_millis, end_time_millis FROM SpanEventView WHERE " + + viewDefinition.getTenantIdColumn() + + " = '" + + TENANT_ID + + "' " + + "order by start_time_millis desc , end_time_millis limit 100"); + } + + @Test + public void testQueryWithOrderByWithPagination() { + QueryRequest orderByQueryRequest = buildOrderByQuery(); + Builder builder = QueryRequest.newBuilder(orderByQueryRequest); + builder.setOffset(1000); + assertPQLQuery( + builder.build(), + "Select span_id, start_time_millis, end_time_millis FROM SpanEventView WHERE " + + viewDefinition.getTenantIdColumn() + + " = '" + + TENANT_ID + + "' " + + "order by start_time_millis desc , end_time_millis limit 1000, 100"); + } + + @Test + public void testQueryWithGroupByWithMultipleAggregates() { + QueryRequest orderByQueryRequest = buildMultipleGroupByMultipleAggQuery(); + Builder builder = QueryRequest.newBuilder(orderByQueryRequest); + builder.setLimit(20); + assertPQLQuery( + builder.build(), + "select service_name, span_name, count(*), avg(duration_millis) from SpanEventView" + + " where " + + viewDefinition.getTenantIdColumn() + + " = '" + + TENANT_ID + + "' " + + "and ( start_time_millis > '1570658506605' and end_time_millis < '1570744906673' )" + + " group by service_name, span_name limit 20"); + } + + @Test + public void testQueryWithGroupByWithMultipleAggregatesAndOrderBy() { + QueryRequest orderByQueryRequest = buildMultipleGroupByMultipleAggAndOrderByQuery(); + Builder builder = QueryRequest.newBuilder(orderByQueryRequest); + builder.setLimit(20); + assertPQLQuery( + builder.build(), + "select service_name, span_name, count(*), avg(duration_millis) from SpanEventView" + + " where " + + viewDefinition.getTenantIdColumn() + + " = '" + + TENANT_ID + + "' " + + "and ( start_time_millis > '1570658506605' and end_time_millis < '1570744906673' )" + + " group by service_name, span_name order by service_name, avg(duration_millis) desc , count(*) desc limit 20"); + } + + @Test + public void testQueryWithDistinctCountAggregation() { + Filter startTimeFilter = + createTimeFilter("Span.start_time_millis", Operator.GT, 1570658506605L); + Filter endTimeFilter = createTimeFilter("Span.end_time_millis", Operator.LT, 1570744906673L); + QueryRequest queryRequest = + QueryRequest.newBuilder() + .addAggregation( + createFunctionExpression("DISTINCTCOUNT", "Span.id", "distinctcount_span_id")) + .setFilter( + Filter.newBuilder() + .setOperator(Operator.AND) + .addChildFilter(startTimeFilter) + .addChildFilter(endTimeFilter) + .build()) + .setLimit(15) + .build(); + + assertPQLQuery( + queryRequest, + "select distinctcount(span_id) from SpanEventView" + + " where " + + viewDefinition.getTenantIdColumn() + + " = '" + + TENANT_ID + + "' " + + "and ( start_time_millis > '1570658506605' and end_time_millis < '1570744906673' )" + + " limit 15"); + } + + @Test + public void testQueryWithDistinctCountAggregationAndGroupBy() { + Filter startTimeFilter = + createTimeFilter("Span.start_time_millis", Operator.GT, 1570658506605L); + Filter endTimeFilter = createTimeFilter("Span.end_time_millis", Operator.LT, 1570744906673L); + QueryRequest queryRequest = + QueryRequest.newBuilder() + .addSelection(createColumnExpression("Span.id")) + .addGroupBy(createColumnExpression("Span.id")) + .addAggregation( + createFunctionExpression("DISTINCTCOUNT", "Span.id", "distinctcount_span_id")) + .setFilter( + Filter.newBuilder() + .setOperator(Operator.AND) + .addChildFilter(startTimeFilter) + .addChildFilter(endTimeFilter) + .build()) + .addOrderBy( + createOrderByExpression( + createFunctionExpression("DISTINCTCOUNT", "Span.id", "distinctcount_span_id"), + SortOrder.ASC)) + .setLimit(15) + .build(); + + assertPQLQuery( + queryRequest, + "select span_id, distinctcount(span_id) from SpanEventView" + + " where " + + viewDefinition.getTenantIdColumn() + + " = '" + + TENANT_ID + + "' " + + "and ( start_time_millis > '1570658506605' and end_time_millis < '1570744906673' )" + + " group by span_id order by distinctcount(span_id) limit 15"); + } + + @Test + public void testQueryWithStringArray() { + Builder builder = QueryRequest.newBuilder(); + ColumnIdentifier spanId = ColumnIdentifier.newBuilder().setColumnName("Span.id").build(); + builder.addSelection(Expression.newBuilder().setColumnIdentifier(spanId).build()); + + String trace1 = "1"; + String trace2 = "2"; + LiteralConstant spanIds = + LiteralConstant.newBuilder() + .setValue( + Value.newBuilder() + .setValueType(ValueType.STRING_ARRAY) + .addStringArray(trace1) + .addStringArray(trace2) + .build()) + .build(); + + Filter filter = + Filter.newBuilder() + .setOperator(Operator.IN) + .setLhs(Expression.newBuilder().setColumnIdentifier(spanId).build()) + .setRhs(Expression.newBuilder().setLiteral(spanIds).build()) + .build(); + + builder.setFilter(filter); + + assertPQLQuery( + builder.build(), + "SELECT span_id FROM SpanEventView " + + "WHERE " + + viewDefinition.getTenantIdColumn() + + " = '" + + TENANT_ID + + "' " + + "AND span_id IN ('1', '2')"); + } + + @Test + public void testSQLiWithStringArrayFilter() { + Builder builder = QueryRequest.newBuilder(); + ColumnIdentifier spanId = ColumnIdentifier.newBuilder().setColumnName("Span.id").build(); + builder.addSelection(Expression.newBuilder().setColumnIdentifier(spanId).build()); + + String span1 = "1') OR tenant_id = 'tenant2' and span_id IN ('1"; + LiteralConstant spanIds = + LiteralConstant.newBuilder() + .setValue( + Value.newBuilder() + .setValueType(ValueType.STRING_ARRAY) + .addStringArray(span1) + .build()) + .build(); + + Filter filter = + Filter.newBuilder() + .setOperator(Operator.IN) + .setLhs(Expression.newBuilder().setColumnIdentifier(spanId).build()) + .setRhs(Expression.newBuilder().setLiteral(spanIds).build()) + .build(); + + builder.setFilter(filter); + assertPQLQuery( + builder.build(), + "SELECT span_id FROM SpanEventView WHERE " + + viewDefinition.getTenantIdColumn() + + " = '" + + TENANT_ID + + "' " + + "AND span_id IN ('1'') OR tenant_id = ''tenant2'' and span_id IN (''1')"); + } + + @Test + public void testQueryWithLikeOperator() { + Builder builder = QueryRequest.newBuilder(); + ColumnIdentifier spanId = ColumnIdentifier.newBuilder().setColumnName("Span.id").build(); + builder.addSelection(Expression.newBuilder().setColumnIdentifier(spanId).build()); + + Filter likeFilter = + Filter.newBuilder() + .setOperator(Operator.LIKE) + .setLhs(Expression.newBuilder().setColumnIdentifier(spanId).build()) + .setRhs( + Expression.newBuilder() + .setLiteral( + LiteralConstant.newBuilder() + .setValue(Value.newBuilder().setString("123").build())) + .build()) + .build(); + + builder.setFilter(likeFilter); + assertPQLQuery( + builder.build(), + "SELECT span_id FROM SpanEventView " + + "WHERE " + + viewDefinition.getTenantIdColumn() + + " = '" + + TENANT_ID + + "' " + + "AND REGEXP_LIKE(span_id,'123')"); + } + + @Test + public void testQueryWithContainsKeyOperator() { + Builder builder = QueryRequest.newBuilder(); + ColumnIdentifier spanTag = ColumnIdentifier.newBuilder().setColumnName("Span.tags").build(); + builder.addSelection(Expression.newBuilder().setColumnIdentifier(spanTag).build()); + + LiteralConstant tag = + LiteralConstant.newBuilder() + .setValue( + Value.newBuilder() + .setValueType(ValueType.STRING_ARRAY) + .addStringArray("FLAGS") + .addStringArray("0") + .build()) + .build(); + + Filter likeFilter = + Filter.newBuilder() + .setOperator(Operator.CONTAINS_KEY) + .setLhs(Expression.newBuilder().setColumnIdentifier(spanTag).build()) + .setRhs(Expression.newBuilder().setLiteral(tag).build()) + .build(); + + builder.setFilter(likeFilter); + assertPQLQuery( + builder.build(), + "SELECT tags__keys, tags__values FROM SpanEventView " + + "WHERE " + + viewDefinition.getTenantIdColumn() + + " = '" + + TENANT_ID + + "' " + + "AND tags__keys = 'flags'"); + } + + @Test + public void testQueryWithContainsKeyValueOperator() { + Builder builder = QueryRequest.newBuilder(); + ColumnIdentifier spanTag = ColumnIdentifier.newBuilder().setColumnName("Span.tags").build(); + builder.addSelection(Expression.newBuilder().setColumnIdentifier(spanTag).build()); + + LiteralConstant tag = + LiteralConstant.newBuilder() + .setValue( + Value.newBuilder() + .setValueType(ValueType.STRING_ARRAY) + .addStringArray("FLAGS") + .addStringArray("0") + .build()) + .build(); + + Filter likeFilter = + Filter.newBuilder() + .setOperator(Operator.CONTAINS_KEYVALUE) + .setLhs(Expression.newBuilder().setColumnIdentifier(spanTag).build()) + .setRhs(Expression.newBuilder().setLiteral(tag).build()) + .build(); + + builder.setFilter(likeFilter); + assertPQLQuery( + builder.build(), + "SELECT tags__keys, tags__values FROM SpanEventView " + + "WHERE " + + viewDefinition.getTenantIdColumn() + + " = '" + + TENANT_ID + + "' " + + "AND tags__keys = 'flags' and tags__values = '0' and mapvalue(tags__keys,'flags',tags__values) = '0'"); + } + + private Filter createTimeFilter(String columnName, Operator op, long value) { + ColumnIdentifier startTimeColumn = + ColumnIdentifier.newBuilder().setColumnName(columnName).build(); + Expression lhs = Expression.newBuilder().setColumnIdentifier(startTimeColumn).build(); + + LiteralConstant constant = + LiteralConstant.newBuilder() + .setValue(Value.newBuilder().setString(String.valueOf(value)).build()) + .build(); + Expression rhs = Expression.newBuilder().setLiteral(constant).build(); + return Filter.newBuilder().setLhs(lhs).setOperator(op).setRhs(rhs).build(); + } + + private Filter createStringFilter(String columnName, Operator op, String value) { + ColumnIdentifier booleanColumn = + ColumnIdentifier.newBuilder().setColumnName(columnName).build(); + Expression lhs = Expression.newBuilder().setColumnIdentifier(booleanColumn).build(); + + LiteralConstant constant = + LiteralConstant.newBuilder() + .setValue(Value.newBuilder().setValueType(ValueType.STRING).setString(value).build()) + .build(); + Expression rhs = Expression.newBuilder().setLiteral(constant).build(); + return Filter.newBuilder().setLhs(lhs).setOperator(op).setRhs(rhs).build(); + } + + private Filter createBooleanFilter(String columnName, Operator op, boolean value) { + ColumnIdentifier booleanColumn = + ColumnIdentifier.newBuilder().setColumnName(columnName).build(); + Expression lhs = Expression.newBuilder().setColumnIdentifier(booleanColumn).build(); + + LiteralConstant constant = + LiteralConstant.newBuilder() + .setValue(Value.newBuilder().setValueType(ValueType.BOOL).setBoolean(value).build()) + .build(); + Expression rhs = Expression.newBuilder().setLiteral(constant).build(); + return Filter.newBuilder().setLhs(lhs).setOperator(op).setRhs(rhs).build(); + } + + private Filter createTimestampFilter(String columnName, Operator op, long value) { + ColumnIdentifier booleanColumn = + ColumnIdentifier.newBuilder().setColumnName(columnName).build(); + Expression lhs = Expression.newBuilder().setColumnIdentifier(booleanColumn).build(); + + LiteralConstant constant = + LiteralConstant.newBuilder() + .setValue( + Value.newBuilder().setValueType(ValueType.TIMESTAMP).setTimestamp(value).build()) + .build(); + Expression rhs = Expression.newBuilder().setLiteral(constant).build(); + return Filter.newBuilder().setLhs(lhs).setOperator(op).setRhs(rhs).build(); + } + + private Filter createDoubleFilter(String columnName, Operator op, double value) { + ColumnIdentifier booleanColumn = + ColumnIdentifier.newBuilder().setColumnName(columnName).build(); + Expression lhs = Expression.newBuilder().setColumnIdentifier(booleanColumn).build(); + + LiteralConstant constant = + LiteralConstant.newBuilder() + .setValue(Value.newBuilder().setValueType(ValueType.DOUBLE).setDouble(value).build()) + .build(); + Expression rhs = Expression.newBuilder().setLiteral(constant).build(); + return Filter.newBuilder().setLhs(lhs).setOperator(op).setRhs(rhs).build(); + } + + private Filter createFloatFilter(String columnName, Operator op, float value) { + ColumnIdentifier booleanColumn = + ColumnIdentifier.newBuilder().setColumnName(columnName).build(); + Expression lhs = Expression.newBuilder().setColumnIdentifier(booleanColumn).build(); + + LiteralConstant constant = + LiteralConstant.newBuilder() + .setValue(Value.newBuilder().setValueType(ValueType.FLOAT).setFloat(value).build()) + .build(); + Expression rhs = Expression.newBuilder().setLiteral(constant).build(); + return Filter.newBuilder().setLhs(lhs).setOperator(op).setRhs(rhs).build(); + } + + private Filter createIntFilter(String columnName, Operator op, int value) { + ColumnIdentifier booleanColumn = + ColumnIdentifier.newBuilder().setColumnName(columnName).build(); + Expression lhs = Expression.newBuilder().setColumnIdentifier(booleanColumn).build(); + + LiteralConstant constant = + LiteralConstant.newBuilder() + .setValue(Value.newBuilder().setValueType(ValueType.INT).setInt(value).build()) + .build(); + Expression rhs = Expression.newBuilder().setLiteral(constant).build(); + return Filter.newBuilder().setLhs(lhs).setOperator(op).setRhs(rhs).build(); + } + + private QueryRequest buildOrderByQuery() { + Builder builder = QueryRequest.newBuilder(); + ColumnIdentifier spanId = ColumnIdentifier.newBuilder().setColumnName("Span.id").build(); + ColumnIdentifier startTimeColumn = + ColumnIdentifier.newBuilder().setColumnName("Span.start_time_millis").build(); + ColumnIdentifier endTimeColumn = + ColumnIdentifier.newBuilder().setColumnName("Span.end_time_millis").build(); + + builder.addSelection(Expression.newBuilder().setColumnIdentifier(spanId).build()); + builder.addSelection(Expression.newBuilder().setColumnIdentifier(startTimeColumn).build()); + builder.addSelection(Expression.newBuilder().setColumnIdentifier(endTimeColumn).build()); + + builder.addOrderBy( + OrderByExpression.newBuilder() + .setExpression(Expression.newBuilder().setColumnIdentifier(startTimeColumn).build()) + .setOrder(SortOrder.DESC) + .build()); + builder.addOrderBy( + OrderByExpression.newBuilder() + .setExpression(Expression.newBuilder().setColumnIdentifier(endTimeColumn).build()) + .setOrder(SortOrder.ASC) + .build()); + + builder.setLimit(100); + return builder.build(); + } + + private QueryRequest buildMultipleGroupByMultipleAggQuery() { + Builder builder = QueryRequest.newBuilder(); + builder.addAggregation(QueryRequestUtil.createCountByColumnSelection("Span.id")); + Function.Builder avg = + Function.newBuilder() + .setFunctionName("AVG") + .addArguments( + Expression.newBuilder() + .setColumnIdentifier( + ColumnIdentifier.newBuilder().setColumnName("Span.duration_millis"))); + builder.addAggregation(Expression.newBuilder().setFunction(avg)); + + Filter startTimeFilter = + createTimeFilter("Span.start_time_millis", Operator.GT, 1570658506605L); + Filter endTimeFilter = createTimeFilter("Span.end_time_millis", Operator.LT, 1570744906673L); + + Filter andFilter = + Filter.newBuilder() + .setOperator(Operator.AND) + .addChildFilter(startTimeFilter) + .addChildFilter(endTimeFilter) + .build(); + builder.setFilter(andFilter); + + builder.addGroupBy( + Expression.newBuilder() + .setColumnIdentifier( + ColumnIdentifier.newBuilder().setColumnName("Span.serviceName").build())); + builder.addGroupBy( + Expression.newBuilder() + .setColumnIdentifier(ColumnIdentifier.newBuilder().setColumnName("Span.displaySpanName").build())); + return builder.build(); + } + + private QueryRequest buildMultipleGroupByMultipleAggAndOrderByQuery() { + Builder builder = QueryRequest.newBuilder(); + builder.addAggregation(QueryRequestUtil.createCountByColumnSelection("Span.id")); + Function.Builder avg = + Function.newBuilder() + .setFunctionName("AVG") + .addArguments( + Expression.newBuilder() + .setColumnIdentifier( + ColumnIdentifier.newBuilder().setColumnName("Span.duration_millis"))); + builder.addAggregation(Expression.newBuilder().setFunction(avg)); + + Filter startTimeFilter = + createTimeFilter("Span.start_time_millis", Operator.GT, 1570658506605L); + Filter endTimeFilter = createTimeFilter("Span.end_time_millis", Operator.LT, 1570744906673L); + + Filter andFilter = + Filter.newBuilder() + .setOperator(Operator.AND) + .addChildFilter(startTimeFilter) + .addChildFilter(endTimeFilter) + .build(); + builder.setFilter(andFilter); + + builder.addGroupBy( + Expression.newBuilder() + .setColumnIdentifier( + ColumnIdentifier.newBuilder().setColumnName("Span.serviceName").build())); + builder.addGroupBy( + Expression.newBuilder() + .setColumnIdentifier(ColumnIdentifier.newBuilder().setColumnName("Span.displaySpanName").build())); + + builder.addOrderBy( + createOrderByExpression(createColumnExpression("Span.serviceName"), SortOrder.ASC)); + builder.addOrderBy( + createOrderByExpression( + createFunctionExpression("AVG", "Span.duration_millis", "avg_duration_millis"), + SortOrder.DESC)); + builder.addOrderBy( + createOrderByExpression( + createFunctionExpression("COUNT", "Span.id", "count_span_id"), SortOrder.DESC)); + return builder.build(); + } + + private QueryRequest buildSimpleQueryWithFilter(Filter filter) { + Builder builder = QueryRequest.newBuilder(); + ColumnIdentifier columnName = ColumnIdentifier.newBuilder().setColumnName("Span.id").build(); + builder.addSelection(Expression.newBuilder().setColumnIdentifier(columnName).build()); + + builder.setFilter(filter); + + return builder.build(); + } + + private void assertPQLQuery(QueryRequest queryRequest, String expectedQuery) { + QueryRequestToPinotSQLConverter converter = new QueryRequestToPinotSQLConverter(viewDefinition); + Entry statementToParam = + converter.toSQL(queryContext, queryRequest, createSelectionsFromQueryRequest(queryRequest)); + PinotClient pinotClient = new PinotClient(connection); + pinotClient.executeQuery(statementToParam.getKey(), statementToParam.getValue()); + ArgumentCaptor statementCaptor = ArgumentCaptor.forClass(Request.class); + Mockito.verify(connection, Mockito.times(1)).execute(statementCaptor.capture()); + Assertions.assertEquals( + expectedQuery.toLowerCase(), statementCaptor.getValue().getQuery().toLowerCase()); + } + + // This method will put the selections in a LinkedHashSet in the order that RequestAnalyzer does: + // group bys, + // selections then aggregations. + private LinkedHashSet createSelectionsFromQueryRequest(QueryRequest queryRequest) { + LinkedHashSet selections = new LinkedHashSet<>(); + + selections.addAll(queryRequest.getGroupByList()); + selections.addAll(queryRequest.getSelectionList()); + selections.addAll(queryRequest.getAggregationList()); + + return selections; + } +} diff --git a/query-service-impl/src/test/java/org/hypertrace/core/query/service/pinot/RequestAnalyzerTest.java b/query-service-impl/src/test/java/org/hypertrace/core/query/service/pinot/RequestAnalyzerTest.java new file mode 100644 index 00000000..0b75ea7d --- /dev/null +++ b/query-service-impl/src/test/java/org/hypertrace/core/query/service/pinot/RequestAnalyzerTest.java @@ -0,0 +1,315 @@ +package org.hypertrace.core.query.service.pinot; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; + +import com.google.common.collect.ImmutableSet; +import java.util.Iterator; +import java.util.Set; +import org.hypertrace.core.query.service.RequestAnalyzer; +import org.hypertrace.core.query.service.api.ColumnIdentifier; +import org.hypertrace.core.query.service.api.Expression; +import org.hypertrace.core.query.service.api.Filter; +import org.hypertrace.core.query.service.api.Function; +import org.hypertrace.core.query.service.api.LiteralConstant; +import org.hypertrace.core.query.service.api.Operator; +import org.hypertrace.core.query.service.api.QueryRequest; +import org.hypertrace.core.query.service.api.QueryRequest.Builder; +import org.hypertrace.core.query.service.api.ResultSetMetadata; +import org.hypertrace.core.query.service.api.Value; +import org.hypertrace.core.query.service.util.QueryRequestUtil; +import org.junit.jupiter.api.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class RequestAnalyzerTest { + private static final Logger LOGGER = LoggerFactory.getLogger(RequestAnalyzerTest.class); + + @Test + public void testRepeatedColumns() { + Builder builder = QueryRequest.newBuilder(); + // agg function with alias + Function count = + Function.newBuilder() + .setFunctionName("Count") + .setAlias("myCountAlias") + .addArguments( + Expression.newBuilder() + .setColumnIdentifier(ColumnIdentifier.newBuilder().setColumnName("Trace.id"))) + .build(); + builder.addAggregation(Expression.newBuilder().setFunction(count)); + + // agg function without alias + Function minFunction = + Function.newBuilder() + .setFunctionName("MIN") + .addArguments( + Expression.newBuilder() + .setColumnIdentifier( + ColumnIdentifier.newBuilder().setColumnName("Trace.duration"))) + .build(); + builder.addAggregation(Expression.newBuilder().setFunction(minFunction)); + + builder.addSelection( + Expression.newBuilder() + .setColumnIdentifier( + ColumnIdentifier.newBuilder().setColumnName("Trace.transaction_name"))); + + builder.addSelection( + Expression.newBuilder() + .setColumnIdentifier( + ColumnIdentifier.newBuilder().setColumnName("Trace.transaction_name"))); + + builder.addGroupBy( + Expression.newBuilder() + .setColumnIdentifier( + ColumnIdentifier.newBuilder().setColumnName("Trace.transaction_name"))); + QueryRequest queryRequest = builder.build(); + + RequestAnalyzer analyzer = new RequestAnalyzer(queryRequest); + analyzer.analyze(); + ResultSetMetadata resultSetMetadata = analyzer.getResultSetMetadata(); + System.out.println("resultSetMetadata = " + resultSetMetadata); + + assertNotNull(resultSetMetadata); + assertEquals(3, resultSetMetadata.getColumnMetadataCount()); + assertEquals("Trace.transaction_name", resultSetMetadata.getColumnMetadata(0).getColumnName()); + assertEquals("myCountAlias", resultSetMetadata.getColumnMetadata(1).getColumnName()); + assertEquals("MIN", resultSetMetadata.getColumnMetadata(2).getColumnName()); + + // Selections should correspond in size and order to the + // resultSetMetadata.getColumnMetadataList() + assertEquals(3, analyzer.getAllSelections().size()); + Iterator selectionsIterator = analyzer.getAllSelections().iterator(); + assertEquals( + Expression.newBuilder() + .setColumnIdentifier( + ColumnIdentifier.newBuilder().setColumnName("Trace.transaction_name")) + .build(), + selectionsIterator.next()); + assertEquals(Expression.newBuilder().setFunction(count).build(), selectionsIterator.next()); + assertEquals( + Expression.newBuilder().setFunction(minFunction).build(), selectionsIterator.next()); + } + + @Test + public void testFiltersWithLiterals() { + Builder builder = QueryRequest.newBuilder(); + builder.addSelection( + Expression.newBuilder() + .setColumnIdentifier( + ColumnIdentifier.newBuilder().setColumnName("Trace.transaction_name"))); + Expression expression = + Expression.newBuilder() + .setLiteral(LiteralConstant.newBuilder().setValue(Value.newBuilder().setString("test"))) + .build(); + builder.setFilter( + Filter.newBuilder() + .setLhs( + Expression.newBuilder() + .setColumnIdentifier( + ColumnIdentifier.newBuilder().setColumnName("Trace.transaction_name"))) + .setRhs(expression) + .setOperator(Operator.EQ)); + + QueryRequest queryRequest = builder.build(); + + RequestAnalyzer analyzer = new RequestAnalyzer(queryRequest); + analyzer.analyze(); + ResultSetMetadata resultSetMetadata = analyzer.getResultSetMetadata(); + LOGGER.info("resultSetMetadata = " + resultSetMetadata); + + assertNotNull(resultSetMetadata); + assertEquals(1, resultSetMetadata.getColumnMetadataCount()); + assertEquals("Trace.transaction_name", resultSetMetadata.getColumnMetadata(0).getColumnName()); + + // Selections should correspond in size and order to the + // resultSetMetadata.getColumnMetadataList() + assertEquals(1, analyzer.getAllSelections().size()); + Iterator selectionsIterator = analyzer.getAllSelections().iterator(); + assertEquals( + Expression.newBuilder() + .setColumnIdentifier( + ColumnIdentifier.newBuilder().setColumnName("Trace.transaction_name")) + .build(), + selectionsIterator.next()); + } + + @Test + public void testReferencedColumns() { + Builder builder = QueryRequest.newBuilder(); + builder.addSelection( + Expression.newBuilder() + .setColumnIdentifier( + ColumnIdentifier.newBuilder().setColumnName("Trace.transaction_name"))); + Expression expression = + Expression.newBuilder() + .setLiteral(LiteralConstant.newBuilder().setValue(Value.newBuilder().setString("test"))) + .build(); + Filter.Builder idFilter = + Filter.newBuilder() + .setLhs( + Expression.newBuilder() + .setColumnIdentifier(ColumnIdentifier.newBuilder().setColumnName("Trace.id"))) + .setRhs(expression) + .setOperator(Operator.EQ); + Filter startTimeFilter = + QueryRequestUtil.createTimeFilter( + "Trace.start_time_millis", + Operator.GT, + System.currentTimeMillis() - 1000 * 60 * 60 * 24); + Filter endTimeFilter = + QueryRequestUtil.createTimeFilter( + "Trace.end_time_millis", Operator.LT, System.currentTimeMillis()); + + Filter andFilter = + Filter.newBuilder() + .setOperator(Operator.AND) + .addChildFilter(startTimeFilter) + .addChildFilter(endTimeFilter) + .addChildFilter(idFilter) + .build(); + builder.setFilter(andFilter); + + QueryRequest queryRequest = builder.build(); + + RequestAnalyzer analyzer = new RequestAnalyzer(queryRequest); + analyzer.analyze(); + + Set selectedColumns = analyzer.getSelectedColumns(); + assertNotNull(selectedColumns); + assertEquals(1, selectedColumns.size()); + assertEquals("Trace.transaction_name", selectedColumns.iterator().next()); + + Set referencedColumns = analyzer.getReferencedColumns(); + assertNotNull(referencedColumns); + assertEquals(4, referencedColumns.size()); + assertEquals( + ImmutableSet.of( + "Trace.transaction_name", + "Trace.id", + "Trace.start_time_millis", + "Trace.end_time_millis"), + referencedColumns); + + ResultSetMetadata resultSetMetadata = analyzer.getResultSetMetadata(); + assertNotNull(resultSetMetadata); + assertEquals(1, resultSetMetadata.getColumnMetadataCount()); + assertEquals("Trace.transaction_name", resultSetMetadata.getColumnMetadata(0).getColumnName()); + + // Selections should correspond in size and order to the + // resultSetMetadata.getColumnMetadataList() + assertEquals(1, analyzer.getAllSelections().size()); + Iterator selectionsIterator = analyzer.getAllSelections().iterator(); + assertEquals( + Expression.newBuilder() + .setColumnIdentifier( + ColumnIdentifier.newBuilder().setColumnName("Trace.transaction_name")) + .build(), + selectionsIterator.next()); + } + + @Test + public void testSelectionsLinkedHashSet() { + Builder builder = QueryRequest.newBuilder(); + // agg function with alias + Function count = + Function.newBuilder() + .setFunctionName("Count") + .setAlias("myCountAlias") + .addArguments( + Expression.newBuilder() + .setColumnIdentifier(ColumnIdentifier.newBuilder().setColumnName("Trace.id"))) + .build(); + builder.addAggregation(Expression.newBuilder().setFunction(count)); + + // agg function without alias + Function minFunction = + Function.newBuilder() + .setFunctionName("MIN") + .addArguments( + Expression.newBuilder() + .setColumnIdentifier( + ColumnIdentifier.newBuilder().setColumnName("Trace.duration"))) + .build(); + builder.addAggregation(Expression.newBuilder().setFunction(minFunction)); + + // Add some selections + builder.addSelection( + Expression.newBuilder() + .setColumnIdentifier( + ColumnIdentifier.newBuilder().setColumnName("Trace.transaction_name"))); + builder.addSelection( + Expression.newBuilder() + .setColumnIdentifier(ColumnIdentifier.newBuilder().setColumnName("Trace.id"))); + + // An function added into selections list is treated as a selection + Function avg = + Function.newBuilder() + .setFunctionName("AVG") + .setAlias("myAvgAlias") + .addArguments( + Expression.newBuilder() + .setColumnIdentifier( + ColumnIdentifier.newBuilder().setColumnName("Trace.duration"))) + .build(); + builder.addSelection(Expression.newBuilder().setFunction(avg)); + + // Add some group bys + builder.addGroupBy( + Expression.newBuilder() + .setColumnIdentifier(ColumnIdentifier.newBuilder().setColumnName("Trace.api_name"))); + builder.addGroupBy( + Expression.newBuilder() + .setColumnIdentifier( + ColumnIdentifier.newBuilder().setColumnName("Trace.service_name"))); + QueryRequest queryRequest = builder.build(); + + RequestAnalyzer analyzer = new RequestAnalyzer(queryRequest); + analyzer.analyze(); + + // The order in resultSetMetadata.getColumnMetadataList() and selections is group bys, + // selections then aggregations + ResultSetMetadata resultSetMetadata = analyzer.getResultSetMetadata(); + + assertNotNull(resultSetMetadata); + assertEquals(7, resultSetMetadata.getColumnMetadataCount()); + assertEquals("Trace.api_name", resultSetMetadata.getColumnMetadata(0).getColumnName()); + assertEquals("Trace.service_name", resultSetMetadata.getColumnMetadata(1).getColumnName()); + assertEquals("Trace.transaction_name", resultSetMetadata.getColumnMetadata(2).getColumnName()); + assertEquals("Trace.id", resultSetMetadata.getColumnMetadata(3).getColumnName()); + assertEquals("myAvgAlias", resultSetMetadata.getColumnMetadata(4).getColumnName()); + assertEquals("myCountAlias", resultSetMetadata.getColumnMetadata(5).getColumnName()); + assertEquals("MIN", resultSetMetadata.getColumnMetadata(6).getColumnName()); + + // Selections should correspond in size and order to the + // resultSetMetadata.getColumnMetadataList() + assertEquals(7, analyzer.getAllSelections().size()); + Iterator selectionsIterator = analyzer.getAllSelections().iterator(); + assertEquals( + Expression.newBuilder() + .setColumnIdentifier(ColumnIdentifier.newBuilder().setColumnName("Trace.api_name")) + .build(), + selectionsIterator.next()); + assertEquals( + Expression.newBuilder() + .setColumnIdentifier(ColumnIdentifier.newBuilder().setColumnName("Trace.service_name")) + .build(), + selectionsIterator.next()); + assertEquals( + Expression.newBuilder() + .setColumnIdentifier( + ColumnIdentifier.newBuilder().setColumnName("Trace.transaction_name")) + .build(), + selectionsIterator.next()); + assertEquals( + Expression.newBuilder() + .setColumnIdentifier(ColumnIdentifier.newBuilder().setColumnName("Trace.id")) + .build(), + selectionsIterator.next()); + assertEquals(Expression.newBuilder().setFunction(avg).build(), selectionsIterator.next()); + assertEquals(Expression.newBuilder().setFunction(count).build(), selectionsIterator.next()); + assertEquals( + Expression.newBuilder().setFunction(minFunction).build(), selectionsIterator.next()); + } +} diff --git a/query-service-impl/src/test/resources/application.conf b/query-service-impl/src/test/resources/application.conf new file mode 100644 index 00000000..814ac5bc --- /dev/null +++ b/query-service-impl/src/test/resources/application.conf @@ -0,0 +1,66 @@ +service.name = "query-service" +service.port = 8090 +service.admin.port = 8091 +service.config = { + tenantColumnName = "tenant_id" + clients = [ + { + type = broker + connectionString = "pinotCluster0:8099" + } + { + type = zookeeper + connectionString = "pinotCluster1:2181" + } + ] + queryRequestHandlersConfig = [ + { + name = piontCluster0 + type = pinot + clientConfig = broker + requestHandlerInfo = { + viewDefinition = { + viewName = RawTraceView + mapFields = ["tags"] + fieldMap = { + "Trace.id": "trace_id", + "Trace.attributes.services": "services", + "Trace.start_time_millis": "start_time_millis", + "Trace.end_time_millis": "end_time_millis", + "Trace.duration_millis": "duration_millis", + "Trace.metrics.num_services": "num_services", + "Trace.metrics.num_spans": "num_spans", + "Trace.attributes": "attributes", + "Trace.metrics": "metrics" + "Trace.tags": "tags" + } + } + } + } + { + name = span-event-view-handler + type = pinot + clientConfig = zookeeper + requestHandlerInfo = { + viewDefinition = { + viewName = spanEventView + mapFields = ["tags"] + fieldMap = { + "EVENT.serviceName": "service_name", + "EVENT.id": "span_id", + "EVENT.startTime": "start_time_millis", + "EVENT.endTime": "end_time_millis", + "EVENT.traceId": "trace_id", + "EVENT.parentSpanId": "parent_span_id", + "EVENT.type": "span_kind", + "EVENT.statusCode": "status_code", + "EVENT.spanTags": "tags" + "EVENT.spanRequestUrl": "request_url", + "EVENT.duration": "duration_millis", + "EVENT.displaySpanName": "display_span_name" + } + } + } + } + ] +} diff --git a/query-service-impl/src/test/resources/log4j2.properties b/query-service-impl/src/test/resources/log4j2.properties new file mode 100644 index 00000000..62c371c3 --- /dev/null +++ b/query-service-impl/src/test/resources/log4j2.properties @@ -0,0 +1,8 @@ +status=error +name=PropertiesConfig +appender.console.type=Console +appender.console.name=STDOUT +appender.console.layout.type=PatternLayout +appender.console.layout.pattern=%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %c{1.} - %msg%n +rootLogger.level=INFO +rootLogger.appenderRef.stdout.ref=STDOUT diff --git a/query-service/build.gradle.kts b/query-service/build.gradle.kts new file mode 100644 index 00000000..f960e23f --- /dev/null +++ b/query-service/build.gradle.kts @@ -0,0 +1,27 @@ +plugins { + java + application + id("org.hypertrace.docker-java-application-plugin") version "0.2.2" + id("org.hypertrace.docker-publish-plugin") version "0.2.2" +} + +dependencies { + implementation(project(":query-service-impl")) + implementation("org.hypertrace.core.grpcutils:grpc-server-utils:0.1.0") + implementation("org.hypertrace.core.serviceframework:platform-service-framework:0.1.2") + implementation("io.grpc:grpc-netty:1.30.2") + + implementation("org.slf4j:slf4j-api:1.7.30") + runtimeOnly("org.apache.logging.log4j:log4j-slf4j-impl:2.13.3") + + implementation("com.typesafe:config:1.3.2") +} + +application { + mainClassName = "org.hypertrace.core.serviceframework.PlatformServiceLauncher" +} + +// Config for gw run to be able to run this locally. Just execute gw run here on Intellij or on the console. +tasks.run { + jvmArgs = listOf("-Dbootstrap.config.uri=file:${projectDir}/src/main/resources/configs", "-Dservice.name=${project.name}") +} diff --git a/query-service/src/main/java/org/hypertrace/core/query/service/QueryServiceStarter.java b/query-service/src/main/java/org/hypertrace/core/query/service/QueryServiceStarter.java new file mode 100644 index 00000000..fbebfb8d --- /dev/null +++ b/query-service/src/main/java/org/hypertrace/core/query/service/QueryServiceStarter.java @@ -0,0 +1,83 @@ +package org.hypertrace.core.query.service; + +import io.grpc.Server; +import io.grpc.ServerBuilder; +import java.io.IOException; +import org.hypertrace.core.grpcutils.server.InterceptorUtil; +import org.hypertrace.core.serviceframework.PlatformService; +import org.hypertrace.core.serviceframework.config.ConfigClient; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class QueryServiceStarter extends PlatformService { + private static final String SERVICE_NAME_CONFIG = "service.name"; + private static final String SERVICE_PORT_CONFIG = "service.port"; + private static final String QUERY_SERVICE_CONFIG = "service.config"; + private static final Logger LOG = LoggerFactory.getLogger(QueryServiceStarter.class); + private String serviceName; + private int serverPort; + private Server queryServiceServer; + + public QueryServiceStarter(ConfigClient configClient) { + super(configClient); + } + + @Override + protected void doInit() { + this.serviceName = getAppConfig().getString(SERVICE_NAME_CONFIG); + this.serverPort = getAppConfig().getInt(SERVICE_PORT_CONFIG); + + final QueryServiceImplConfig queryServiceImplConfig = + QueryServiceImplConfig.parse(getAppConfig().getConfig(QUERY_SERVICE_CONFIG)); + + LOG.info("Creating the Query Service Server on port {}", serverPort); + + queryServiceServer = + ServerBuilder.forPort(serverPort) + .addService( + InterceptorUtil.wrapInterceptors(new QueryServiceImpl(queryServiceImplConfig))) + .build(); + } + + @Override + protected void doStart() { + LOG.info("Attempting to start Query Service on port {}", serverPort); + + try { + queryServiceServer.start(); + LOG.info("Started Query Service on port {}", serverPort); + } catch (IOException e) { + LOG.error("Unable to start the Query Service"); + throw new RuntimeException(e); + } + + try { + queryServiceServer.awaitTermination(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new RuntimeException(e); + } + } + + @Override + protected void doStop() { + LOG.info("Shutting down service: {}", serviceName); + while (!queryServiceServer.isShutdown()) { + queryServiceServer.shutdown(); + try { + Thread.sleep(100); + } catch (InterruptedException ignore) { + } + } + } + + @Override + public boolean healthCheck() { + return true; + } + + @Override + public String getServiceName() { + return serviceName; + } +} diff --git a/query-service/src/main/resources/banner.txt b/query-service/src/main/resources/banner.txt new file mode 100644 index 00000000..d1506ae2 --- /dev/null +++ b/query-service/src/main/resources/banner.txt @@ -0,0 +1,6 @@ +================================================================================ + + Query + +================================================================================ + diff --git a/query-service/src/main/resources/configs/common/application.conf b/query-service/src/main/resources/configs/common/application.conf new file mode 100644 index 00000000..82f29e18 --- /dev/null +++ b/query-service/src/main/resources/configs/common/application.conf @@ -0,0 +1,61 @@ +main.class = org.hypertrace.core.query.service.QueryServiceStarter +service.name = query-service +service.port = 8090 +service.admin.port = 8091 +service.config = { + clients = [ + { + type = zookeeper + connectionString = "localhost:2181/pinot/org-views" + } + ] + queryRequestHandlersConfig = [ + # Update runtime configuration in helm/values.yaml. Only local test/debug needs the following + { + name = trace-view-handler + type = pinot + clientConfig = zookeeper + requestHandlerInfo = { + viewDefinition = { + viewName = rawTraceView + fieldMap = { + "TRACE.id": "trace_id", + "TRACE.startTime": "start_time_millis", + "TRACE.endTime": "end_time_millis", + "TRACE.duration": "duration_millis", + "TRACE.numServices": "num_services", + "TRACE.numSpans": "num_spans" + } + } + } + } + { + name = span-event-view-handler + type = pinot + clientConfig = zookeeper + requestHandlerInfo = { + viewDefinition = { + viewName = spanEventView + mapFields = ["tags"] + fieldMap = { + "EVENT.serviceName": "service_name", + "EVENT.id": "span_id", + "EVENT.startTime": "start_time_millis", + "EVENT.endTime": "end_time_millis", + "EVENT.traceId": "trace_id", + "EVENT.parentSpanId": "parent_span_id", + "EVENT.type": "span_kind", + "EVENT.statusCode": "status_code", + "EVENT.spanTags": "tags" + "EVENT.spanRequestUrl": "request_url", + "EVENT.duration": "duration_millis", + "EVENT.displaySpanName": "display_span_name", + } + } + } + } + ] +} + +metrics.reporter.names = ["prometheus"] +metrics.reporter.console.reportInterval = 30 diff --git a/query-service/src/main/resources/log4j2.properties b/query-service/src/main/resources/log4j2.properties new file mode 100644 index 00000000..d91bc7bf --- /dev/null +++ b/query-service/src/main/resources/log4j2.properties @@ -0,0 +1,23 @@ +status=error +name=PropertiesConfig +appender.console.type=Console +appender.console.name=STDOUT +appender.console.layout.type=PatternLayout +appender.console.layout.pattern=%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %c{1.} - %msg%n +appender.rolling.type=RollingFile +appender.rolling.name=ROLLING_FILE +appender.rolling.fileName=${sys:service.name:-service}.log +appender.rolling.filePattern=${sys:service.name:-service}-%d{MM-dd-yy-HH-mm-ss}-%i.log.gz +appender.rolling.layout.type=PatternLayout +appender.rolling.layout.pattern=%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %c{1.} - %msg%n +appender.rolling.policies.type=Policies +appender.rolling.policies.time.type=TimeBasedTriggeringPolicy +appender.rolling.policies.time.interval=3600 +appender.rolling.policies.time.modulate=true +appender.rolling.policies.size.type=SizeBasedTriggeringPolicy +appender.rolling.policies.size.size=20MB +appender.rolling.strategy.type=DefaultRolloverStrategy +appender.rolling.strategy.max=5 +rootLogger.level=INFO +rootLogger.appenderRef.stdout.ref=STDOUT +rootLogger.appenderRef.rolling.ref=ROLLING_FILE diff --git a/semantic-build-versioning.gradle b/semantic-build-versioning.gradle new file mode 100644 index 00000000..9bc16767 --- /dev/null +++ b/semantic-build-versioning.gradle @@ -0,0 +1,11 @@ +// Follows https://www.conventionalcommits.org/en/v1.0.0/#summary with one change: any commit is treated as a release, +// patch being the default if major or minor is not detected. + +autobump { + // match any message starting with a type/scope suffixed with !, or with a line starting with "BREAKING CHANGE:" + majorPattern = ~/(?m)(\A[^:]+(?<=!): |^BREAKING CHANGE:)/ + // match any commit message starting with "feat: " or "feat(any scope): " + minorPattern = ~/^feat(\([^)]+\))?: / + newPreReleasePattern = null // Not used - no prereleases + promoteToReleasePattern = null // Not used - every merge is a release +} \ No newline at end of file diff --git a/settings.gradle.kts b/settings.gradle.kts new file mode 100644 index 00000000..cce8556b --- /dev/null +++ b/settings.gradle.kts @@ -0,0 +1,18 @@ +rootProject.name = "query-service" + +pluginManagement { + repositories { + mavenLocal() + gradlePluginPortal() + maven("https://dl.bintray.com/hypertrace/maven") + } +} + +plugins { + id("org.hypertrace.version-settings") version "0.1.1" +} + +include(":query-service-api") +include(":query-service-client") +include(":query-service-impl") +include(":query-service")