diff --git a/.circleci/config.yml b/.circleci/config.yml
new file mode 100644
index 00000000..4a06e420
--- /dev/null
+++ b/.circleci/config.yml
@@ -0,0 +1,137 @@
+version: 2.1
+
+orbs:
+ codecov: codecov/codecov@1
+ snyk: snyk/snyk@0.0.10
+
+executors:
+ gradle_docker:
+ docker:
+ - image: cimg/openjdk:11.0
+ helm:
+ docker:
+ - image: hypertrace/helm-gcs-packager:0.1.1
+
+commands:
+ gradle:
+ description: 'Run the provided gradle command'
+ parameters:
+ args:
+ type: string
+ when:
+ default: "on_success"
+ type: enum
+ enum: ["on_fail", "on_success", "always"]
+ steps:
+ - run:
+ name: << parameters.args >>
+ command: ./gradlew << parameters.args >> --info --max-workers=2 -Dorg.gradle.jvmargs=-Xmx2g -Dorg.gradle.console=plain --continue
+ when: << parameters.when >>
+ setup_build_environment:
+ description: 'Generates cache key from a hash of all gradle files'
+ steps:
+ - checkout
+ - run:
+ name: Generate cache key
+ command: find . -type f -name "*.gradle*" -exec shasum {} + | sort > /tmp/checksum.txt && cat /tmp/checksum.txt
+ - restore_cache:
+ keys:
+ - v1-dependencies-{{ checksum "/tmp/checksum.txt" }}
+ # fallback to using the latest cache if no exact match is found
+ - v1-dependencies-
+ populate_and_save_cache:
+ description: 'Downloads all gradle dependencies and uploads cache for later use'
+ steps:
+ - gradle:
+ args: downloadDependencies
+ - save_cache:
+ paths:
+ - ~/.gradle
+ key: v1-dependencies-{{ checksum "/tmp/checksum.txt" }}
+
+jobs:
+ build:
+ executor: gradle_docker
+ steps:
+ - setup_build_environment
+ - setup_remote_docker
+ - populate_and_save_cache
+ - gradle:
+ args: build dockerBuildImages
+ - gradle:
+ args: jacocoTestReport
+ - codecov/upload:
+ flags: unit
+ publish:
+ executor: gradle_docker
+ steps:
+ - setup_build_environment
+ - setup_remote_docker
+ - gradle:
+ args: :tag -Prelease
+ - gradle:
+ args: publish dockerPushImages
+ - add_ssh_keys:
+ fingerprints:
+ - 'bd:a2:a2:90:46:6c:51:d0:af:8b:1b:c2:d6:e2:f2:e2'
+ - run: git push origin $(./gradlew -q :printVersion)
+ validate-charts:
+ executor: helm
+ steps:
+ - checkout
+ - run:
+ name: Helm Charts Lint and Template Render
+ command: |
+ helm lint --strict ./helm/
+ helm template ./helm/
+ snyk-scan:
+ executor:
+ name: gradle_docker
+ environment:
+ GRADLE_OPTS: -Dorg.gradle.workers.max=1 # Snyk doesn't handle parallelism well
+ steps:
+ - setup_build_environment
+ - snyk/scan:
+ additional-arguments: --all-sub-projects --policy-path=.snyk
+
+ package-charts:
+ executor: helm
+ steps:
+ - checkout
+ - run:
+ name: Package and Publish Helm Charts
+ command: |
+ CHART_VERSION=$(git describe --abbrev=0)
+ CHART_NAME=$(awk '/^name:/ {print $2}' ./helm/Chart.yaml)
+ export GOOGLE_APPLICATION_CREDENTIALS=${HOME}/helm-gcs-key.json
+ echo ${HELM_GCS_CREDENTIALS} > ${GOOGLE_APPLICATION_CREDENTIALS}
+ helm repo add helm-gcs ${HELM_GCS_REPOSITORY}
+ helm package --version ${CHART_VERSION} --app-version ${CHART_VERSION} ./helm/
+ helm gcs push ${CHART_NAME}-${CHART_VERSION}.tgz helm-gcs --public --retry
+
+workflows:
+ version: 2
+ build-and-publish:
+ jobs:
+ - build
+ - validate-charts
+ - snyk-scan:
+ context: hypertrace-vulnerability-scanning
+ - publish:
+ context: hypertrace-publishing
+ requires:
+ - build
+ - validate-charts
+ - snyk-scan
+ filters:
+ branches:
+ only:
+ - master
+ - package-charts:
+ context: hypertrace-publishing
+ requires:
+ - publish
+ filters:
+ branches:
+ only:
+ - master
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
new file mode 100644
index 00000000..99aa59dc
--- /dev/null
+++ b/.github/CODEOWNERS
@@ -0,0 +1,4 @@
+# Each line is a file pattern followed by one or more owners.
+
+# global
+* @buchi-busireddy @tim-mwangi @avinashkolluru @inespo
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 00000000..b7a3b059
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,25 @@
+.gradle
+build/
+cscope.*
+.classpath
+.project
+.svn
+target/
+bin/
+*/bin/
+*.iml
+.settings/
+out/
+.DS_Store
+test-output
+*.log
+*.patch
+*.log.gz
+*.code-workspace
+.idea/*.xml
+.idea/libraries/
+.idea/dictionaries/
+.idea/codeStyles/
+.idea/.name
+# Local config to handle using Java 8 vs java 11.
+.java-version
\ No newline at end of file
diff --git a/.snyk b/.snyk
new file mode 100644
index 00000000..05716d8d
--- /dev/null
+++ b/.snyk
@@ -0,0 +1,13 @@
+# Snyk (https://snyk.io) policy file, patches or ignores known vulnerabilities.
+version: v1.14.1
+# ignores vulnerabilities until expiry date; change duration by modifying expiry date
+ignore:
+ SNYK-JAVA-LOG4J-572732:
+ - '*':
+ reason: no available replacement
+ expires: 2020-07-31T00:00:00.000Z
+ SNYK-JAVA-IONETTY-473694:
+ - '*':
+ reason: no available replacement
+ expires: 2020-07-31T00:00:00.000Z
+patch: {}
diff --git a/LICENSE.md b/LICENSE.md
new file mode 100644
index 00000000..cba6f6a1
--- /dev/null
+++ b/LICENSE.md
@@ -0,0 +1,660 @@
+### GNU AFFERO GENERAL PUBLIC LICENSE
+
+Version 3, 19 November 2007
+
+Copyright (C) 2007 Free Software Foundation, Inc.
+
+
+Everyone is permitted to copy and distribute verbatim copies of this
+license document, but changing it is not allowed.
+
+### Preamble
+
+The GNU Affero General Public License is a free, copyleft license for
+software and other kinds of works, specifically designed to ensure
+cooperation with the community in the case of network server software.
+
+The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+our General Public Licenses are intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains
+free software for all its users.
+
+When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+Developers that use our General Public Licenses protect your rights
+with two steps: (1) assert copyright on the software, and (2) offer
+you this License which gives you legal permission to copy, distribute
+and/or modify the software.
+
+A secondary benefit of defending all users' freedom is that
+improvements made in alternate versions of the program, if they
+receive widespread use, become available for other developers to
+incorporate. Many developers of free software are heartened and
+encouraged by the resulting cooperation. However, in the case of
+software used on network servers, this result may fail to come about.
+The GNU General Public License permits making a modified version and
+letting the public access it on a server without ever releasing its
+source code to the public.
+
+The GNU Affero General Public License is designed specifically to
+ensure that, in such cases, the modified source code becomes available
+to the community. It requires the operator of a network server to
+provide the source code of the modified version running there to the
+users of that server. Therefore, public use of a modified version, on
+a publicly accessible server, gives the public access to the source
+code of the modified version.
+
+An older license, called the Affero General Public License and
+published by Affero, was designed to accomplish similar goals. This is
+a different license, not a version of the Affero GPL, but Affero has
+released a new version of the Affero GPL which permits relicensing
+under this license.
+
+The precise terms and conditions for copying, distribution and
+modification follow.
+
+### TERMS AND CONDITIONS
+
+#### 0. Definitions.
+
+"This License" refers to version 3 of the GNU Affero General Public
+License.
+
+"Copyright" also means copyright-like laws that apply to other kinds
+of works, such as semiconductor masks.
+
+"The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of
+an exact copy. The resulting work is called a "modified version" of
+the earlier work or a work "based on" the earlier work.
+
+A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user
+through a computer network, with no transfer of a copy, is not
+conveying.
+
+An interactive user interface displays "Appropriate Legal Notices" to
+the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+#### 1. Source Code.
+
+The "source code" for a work means the preferred form of the work for
+making modifications to it. "Object code" means any non-source form of
+a work.
+
+A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+The Corresponding Source need not include anything that users can
+regenerate automatically from other parts of the Corresponding Source.
+
+The Corresponding Source for a work in source code form is that same
+work.
+
+#### 2. Basic Permissions.
+
+All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+You may make, run and propagate covered works that you do not convey,
+without conditions so long as your license otherwise remains in force.
+You may convey covered works to others for the sole purpose of having
+them make modifications exclusively for you, or provide you with
+facilities for running those works, provided that you comply with the
+terms of this License in conveying all material for which you do not
+control copyright. Those thus making or running the covered works for
+you must do so exclusively on your behalf, under your direction and
+control, on terms that prohibit them from making any copies of your
+copyrighted material outside their relationship with you.
+
+Conveying under any other circumstances is permitted solely under the
+conditions stated below. Sublicensing is not allowed; section 10 makes
+it unnecessary.
+
+#### 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such
+circumvention is effected by exercising rights under this License with
+respect to the covered work, and you disclaim any intention to limit
+operation or modification of the work as a means of enforcing, against
+the work's users, your or third parties' legal rights to forbid
+circumvention of technological measures.
+
+#### 4. Conveying Verbatim Copies.
+
+You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+#### 5. Conveying Modified Source Versions.
+
+You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these
+conditions:
+
+- a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+- b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under
+ section 7. This requirement modifies the requirement in section 4
+ to "keep intact all notices".
+- c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+- d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+#### 6. Conveying Non-Source Forms.
+
+You may convey a covered work in object code form under the terms of
+sections 4 and 5, provided that you also convey the machine-readable
+Corresponding Source under the terms of this License, in one of these
+ways:
+
+- a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+- b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the Corresponding
+ Source from a network server at no charge.
+- c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+- d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+- e) Convey the object code using peer-to-peer transmission,
+ provided you inform other peers where the object code and
+ Corresponding Source of the work are being offered to the general
+ public at no charge under subsection 6d.
+
+A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal,
+family, or household purposes, or (2) anything designed or sold for
+incorporation into a dwelling. In determining whether a product is a
+consumer product, doubtful cases shall be resolved in favor of
+coverage. For a particular product received by a particular user,
+"normally used" refers to a typical or common use of that class of
+product, regardless of the status of the particular user or of the way
+in which the particular user actually uses, or expects or is expected
+to use, the product. A product is a consumer product regardless of
+whether the product has substantial commercial, industrial or
+non-consumer uses, unless such uses represent the only significant
+mode of use of the product.
+
+"Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to
+install and execute modified versions of a covered work in that User
+Product from a modified version of its Corresponding Source. The
+information must suffice to ensure that the continued functioning of
+the modified object code is in no case prevented or interfered with
+solely because modification has been made.
+
+If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or
+updates for a work that has been modified or installed by the
+recipient, or for the User Product in which it has been modified or
+installed. Access to a network may be denied when the modification
+itself materially and adversely affects the operation of the network
+or violates the rules and protocols for communication across the
+network.
+
+Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+#### 7. Additional Terms.
+
+"Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders
+of that material) supplement the terms of this License with terms:
+
+- a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+- b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+- c) Prohibiting misrepresentation of the origin of that material,
+ or requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+- d) Limiting the use for publicity purposes of names of licensors
+ or authors of the material; or
+- e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+- f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions
+ of it) with contractual assumptions of liability to the recipient,
+ for any liability that these contractual assumptions directly
+ impose on those licensors and authors.
+
+All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions; the
+above requirements apply either way.
+
+#### 8. Termination.
+
+You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+However, if you cease all violation of this License, then your license
+from a particular copyright holder is reinstated (a) provisionally,
+unless and until the copyright holder explicitly and finally
+terminates your license, and (b) permanently, if the copyright holder
+fails to notify you of the violation by some reasonable means prior to
+60 days after the cessation.
+
+Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+#### 9. Acceptance Not Required for Having Copies.
+
+You are not required to accept this License in order to receive or run
+a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+#### 10. Automatic Licensing of Downstream Recipients.
+
+Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+#### 11. Patents.
+
+A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+A contributor's "essential patent claims" are all patent claims owned
+or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+A patent license is "discriminatory" if it does not include within the
+scope of its coverage, prohibits the exercise of, or is conditioned on
+the non-exercise of one or more of the rights that are specifically
+granted under this License. You may not convey a covered work if you
+are a party to an arrangement with a third party that is in the
+business of distributing software, under which you make payment to the
+third party based on the extent of your activity of conveying the
+work, and under which the third party grants, to any of the parties
+who would receive the covered work from you, a discriminatory patent
+license (a) in connection with copies of the covered work conveyed by
+you (or copies made from those copies), or (b) primarily for and in
+connection with specific products or compilations that contain the
+covered work, unless you entered into that arrangement, or that patent
+license was granted, prior to 28 March 2007.
+
+Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+#### 12. No Surrender of Others' Freedom.
+
+If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under
+this License and any other pertinent obligations, then as a
+consequence you may not convey it at all. For example, if you agree to
+terms that obligate you to collect a royalty for further conveying
+from those to whom you convey the Program, the only way you could
+satisfy both those terms and this License would be to refrain entirely
+from conveying the Program.
+
+#### 13. Remote Network Interaction; Use with the GNU General Public License.
+
+Notwithstanding any other provision of this License, if you modify the
+Program, your modified version must prominently offer all users
+interacting with it remotely through a computer network (if your
+version supports such interaction) an opportunity to receive the
+Corresponding Source of your version by providing access to the
+Corresponding Source from a network server at no charge, through some
+standard or customary means of facilitating copying of software. This
+Corresponding Source shall include the Corresponding Source for any
+work covered by version 3 of the GNU General Public License that is
+incorporated pursuant to the following paragraph.
+
+Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the work with which it is combined will remain governed by version
+3 of the GNU General Public License.
+
+#### 14. Revised Versions of this License.
+
+The Free Software Foundation may publish revised and/or new versions
+of the GNU Affero General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Program
+specifies that a certain numbered version of the GNU Affero General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU Affero General Public License, you may choose any version ever
+published by the Free Software Foundation.
+
+If the Program specifies that a proxy can decide which future versions
+of the GNU Affero General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+#### 15. Disclaimer of Warranty.
+
+THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT
+WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND
+PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE
+DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR
+CORRECTION.
+
+#### 16. Limitation of Liability.
+
+IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR
+CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES
+ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT
+NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR
+LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM
+TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER
+PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
+
+#### 17. Interpretation of Sections 15 and 16.
+
+If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+END OF TERMS AND CONDITIONS
+
+### How to Apply These Terms to Your New Programs
+
+If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these
+terms.
+
+To do so, attach the following notices to the program. It is safest to
+attach them to the start of each source file to most effectively state
+the exclusion of warranty; and each file should have at least the
+"copyright" line and a pointer to where the full notice is found.
+
+
+ Copyright (C)
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License as
+ published by the Free Software Foundation, either version 3 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with this program. If not, see .
+
+Also add information on how to contact you by electronic and paper
+mail.
+
+If your software can interact with users remotely through a computer
+network, you should also make sure that it provides a way for users to
+get its source. For example, if your program is a web application, its
+interface could display a "Source" link that leads users to an archive
+of the code. There are many ways you could offer source, and different
+solutions will be better for different programs; see section 13 for
+the specific requirements.
+
+You should also get your employer (if you work as a programmer) or
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary. For more information on this, and how to apply and follow
+the GNU AGPL, see .
diff --git a/README.md b/README.md
new file mode 100644
index 00000000..f989bc54
--- /dev/null
+++ b/README.md
@@ -0,0 +1,2 @@
+# query-service
+The Query Service interfaces with Apache Pinot Data Store
diff --git a/build.gradle.kts b/build.gradle.kts
new file mode 100644
index 00000000..e4cf01bc
--- /dev/null
+++ b/build.gradle.kts
@@ -0,0 +1,15 @@
+plugins {
+ id("org.hypertrace.repository-plugin") version "0.1.2"
+ id("org.hypertrace.ci-utils-plugin") version "0.1.1"
+ id("org.hypertrace.publish-plugin") version "0.1.5" apply false
+ id("org.hypertrace.jacoco-report-plugin") version "0.1.0" apply false
+}
+
+subprojects {
+ group = "org.hypertrace.core.query.service"
+ pluginManager.withPlugin("org.hypertrace.publish-plugin") {
+ configure {
+ license.set(org.hypertrace.gradle.publishing.License.AGPL_V3)
+ }
+ }
+}
diff --git a/gradle.properties b/gradle.properties
new file mode 100644
index 00000000..13e3631b
--- /dev/null
+++ b/gradle.properties
@@ -0,0 +1,5 @@
+org.gradle.parallel=true
+org.gradle.daemon=true
+org.gradle.caching=true
+org.gradle.configureondemand=true
+
diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar
new file mode 100644
index 00000000..cc4fdc29
Binary files /dev/null and b/gradle/wrapper/gradle-wrapper.jar differ
diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties
new file mode 100644
index 00000000..4e1cc9db
--- /dev/null
+++ b/gradle/wrapper/gradle-wrapper.properties
@@ -0,0 +1,5 @@
+distributionBase=GRADLE_USER_HOME
+distributionPath=wrapper/dists
+distributionUrl=https\://services.gradle.org/distributions/gradle-6.1.1-all.zip
+zipStoreBase=GRADLE_USER_HOME
+zipStorePath=wrapper/dists
diff --git a/gradlew b/gradlew
new file mode 100755
index 00000000..2fe81a7d
--- /dev/null
+++ b/gradlew
@@ -0,0 +1,183 @@
+#!/usr/bin/env sh
+
+#
+# Copyright 2015 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+##############################################################################
+##
+## Gradle start up script for UN*X
+##
+##############################################################################
+
+# Attempt to set APP_HOME
+# Resolve links: $0 may be a link
+PRG="$0"
+# Need this for relative symlinks.
+while [ -h "$PRG" ] ; do
+ ls=`ls -ld "$PRG"`
+ link=`expr "$ls" : '.*-> \(.*\)$'`
+ if expr "$link" : '/.*' > /dev/null; then
+ PRG="$link"
+ else
+ PRG=`dirname "$PRG"`"/$link"
+ fi
+done
+SAVED="`pwd`"
+cd "`dirname \"$PRG\"`/" >/dev/null
+APP_HOME="`pwd -P`"
+cd "$SAVED" >/dev/null
+
+APP_NAME="Gradle"
+APP_BASE_NAME=`basename "$0"`
+
+# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
+DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"'
+
+# Use the maximum available, or set MAX_FD != -1 to use that value.
+MAX_FD="maximum"
+
+warn () {
+ echo "$*"
+}
+
+die () {
+ echo
+ echo "$*"
+ echo
+ exit 1
+}
+
+# OS specific support (must be 'true' or 'false').
+cygwin=false
+msys=false
+darwin=false
+nonstop=false
+case "`uname`" in
+ CYGWIN* )
+ cygwin=true
+ ;;
+ Darwin* )
+ darwin=true
+ ;;
+ MINGW* )
+ msys=true
+ ;;
+ NONSTOP* )
+ nonstop=true
+ ;;
+esac
+
+CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar
+
+# Determine the Java command to use to start the JVM.
+if [ -n "$JAVA_HOME" ] ; then
+ if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
+ # IBM's JDK on AIX uses strange locations for the executables
+ JAVACMD="$JAVA_HOME/jre/sh/java"
+ else
+ JAVACMD="$JAVA_HOME/bin/java"
+ fi
+ if [ ! -x "$JAVACMD" ] ; then
+ die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME
+
+Please set the JAVA_HOME variable in your environment to match the
+location of your Java installation."
+ fi
+else
+ JAVACMD="java"
+ which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
+
+Please set the JAVA_HOME variable in your environment to match the
+location of your Java installation."
+fi
+
+# Increase the maximum file descriptors if we can.
+if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then
+ MAX_FD_LIMIT=`ulimit -H -n`
+ if [ $? -eq 0 ] ; then
+ if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then
+ MAX_FD="$MAX_FD_LIMIT"
+ fi
+ ulimit -n $MAX_FD
+ if [ $? -ne 0 ] ; then
+ warn "Could not set maximum file descriptor limit: $MAX_FD"
+ fi
+ else
+ warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT"
+ fi
+fi
+
+# For Darwin, add options to specify how the application appears in the dock
+if $darwin; then
+ GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\""
+fi
+
+# For Cygwin or MSYS, switch paths to Windows format before running java
+if [ "$cygwin" = "true" -o "$msys" = "true" ] ; then
+ APP_HOME=`cygpath --path --mixed "$APP_HOME"`
+ CLASSPATH=`cygpath --path --mixed "$CLASSPATH"`
+ JAVACMD=`cygpath --unix "$JAVACMD"`
+
+ # We build the pattern for arguments to be converted via cygpath
+ ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null`
+ SEP=""
+ for dir in $ROOTDIRSRAW ; do
+ ROOTDIRS="$ROOTDIRS$SEP$dir"
+ SEP="|"
+ done
+ OURCYGPATTERN="(^($ROOTDIRS))"
+ # Add a user-defined pattern to the cygpath arguments
+ if [ "$GRADLE_CYGPATTERN" != "" ] ; then
+ OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)"
+ fi
+ # Now convert the arguments - kludge to limit ourselves to /bin/sh
+ i=0
+ for arg in "$@" ; do
+ CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -`
+ CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option
+
+ if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition
+ eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"`
+ else
+ eval `echo args$i`="\"$arg\""
+ fi
+ i=`expr $i + 1`
+ done
+ case $i in
+ 0) set -- ;;
+ 1) set -- "$args0" ;;
+ 2) set -- "$args0" "$args1" ;;
+ 3) set -- "$args0" "$args1" "$args2" ;;
+ 4) set -- "$args0" "$args1" "$args2" "$args3" ;;
+ 5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;;
+ 6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;;
+ 7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;;
+ 8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;;
+ 9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;;
+ esac
+fi
+
+# Escape application args
+save () {
+ for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done
+ echo " "
+}
+APP_ARGS=`save "$@"`
+
+# Collect all arguments for the java command, following the shell quoting and substitution rules
+eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS"
+
+exec "$JAVACMD" "$@"
diff --git a/gradlew.bat b/gradlew.bat
new file mode 100644
index 00000000..9618d8d9
--- /dev/null
+++ b/gradlew.bat
@@ -0,0 +1,100 @@
+@rem
+@rem Copyright 2015 the original author or authors.
+@rem
+@rem Licensed under the Apache License, Version 2.0 (the "License");
+@rem you may not use this file except in compliance with the License.
+@rem You may obtain a copy of the License at
+@rem
+@rem https://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+@rem
+
+@if "%DEBUG%" == "" @echo off
+@rem ##########################################################################
+@rem
+@rem Gradle startup script for Windows
+@rem
+@rem ##########################################################################
+
+@rem Set local scope for the variables with windows NT shell
+if "%OS%"=="Windows_NT" setlocal
+
+set DIRNAME=%~dp0
+if "%DIRNAME%" == "" set DIRNAME=.
+set APP_BASE_NAME=%~n0
+set APP_HOME=%DIRNAME%
+
+@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
+set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m"
+
+@rem Find java.exe
+if defined JAVA_HOME goto findJavaFromJavaHome
+
+set JAVA_EXE=java.exe
+%JAVA_EXE% -version >NUL 2>&1
+if "%ERRORLEVEL%" == "0" goto init
+
+echo.
+echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
+echo.
+echo Please set the JAVA_HOME variable in your environment to match the
+echo location of your Java installation.
+
+goto fail
+
+:findJavaFromJavaHome
+set JAVA_HOME=%JAVA_HOME:"=%
+set JAVA_EXE=%JAVA_HOME%/bin/java.exe
+
+if exist "%JAVA_EXE%" goto init
+
+echo.
+echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%
+echo.
+echo Please set the JAVA_HOME variable in your environment to match the
+echo location of your Java installation.
+
+goto fail
+
+:init
+@rem Get command-line arguments, handling Windows variants
+
+if not "%OS%" == "Windows_NT" goto win9xME_args
+
+:win9xME_args
+@rem Slurp the command line arguments.
+set CMD_LINE_ARGS=
+set _SKIP=2
+
+:win9xME_args_slurp
+if "x%~1" == "x" goto execute
+
+set CMD_LINE_ARGS=%*
+
+:execute
+@rem Setup the command line
+
+set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
+
+@rem Execute Gradle
+"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS%
+
+:end
+@rem End local scope for the variables with windows NT shell
+if "%ERRORLEVEL%"=="0" goto mainEnd
+
+:fail
+rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
+rem the _cmd.exe /c_ return code!
+if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1
+exit /b 1
+
+:mainEnd
+if "%OS%"=="Windows_NT" endlocal
+
+:omega
diff --git a/helm/.helmignore b/helm/.helmignore
new file mode 100644
index 00000000..fbe01f88
--- /dev/null
+++ b/helm/.helmignore
@@ -0,0 +1,22 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
\ No newline at end of file
diff --git a/helm/Chart.yaml b/helm/Chart.yaml
new file mode 100644
index 00000000..e8793c01
--- /dev/null
+++ b/helm/Chart.yaml
@@ -0,0 +1,23 @@
+# This Chart.yaml file will act as the template for the "helm package" command. The helm package will set the chart
+# version and appVersion.
+# Command to package:
+# helm package --version --app-version
+apiVersion: v2
+name: query-service
+description: Query Service Helm Chart
+
+# A chart can be either an 'application' or a 'library' chart.
+#
+# Application charts are a collection of templates that can be packaged into versioned archives
+# to be deployed.
+#
+# Library charts provide useful utilities or functions for the chart developer. They're included as
+# a dependency of application charts to inject those utilities and functions into the rendering
+# pipeline. Library charts do not define any templates and therefore cannot be deployed.
+type: application
+
+# This is the chart version. This version number should be incremented each time you make changes
+# to the chart and its templates, including the app version. The "helm package" command will take care of setting this.
+# A new chart will be created for each new version of the service.
+version: 0.1.0
+
diff --git a/helm/templates/deployment.yaml b/helm/templates/deployment.yaml
new file mode 100644
index 00000000..20379496
--- /dev/null
+++ b/helm/templates/deployment.yaml
@@ -0,0 +1,84 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ .Chart.Name }}
+ labels:
+ release: {{ .Release.Name }}
+ {{- with .Values.deploymentLabels }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+spec:
+ replicas: {{ .Values.replicaCount }}
+ strategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: {{ .Values.maxUnavailable }}
+ selector:
+ matchLabels:
+ {{- toYaml .Values.deploymentSelectorMatchLabels | nindent 6 }}
+ template:
+ metadata:
+ labels:
+ release: {{ .Release.Name }}
+ {{- with .Values.podLabels }}
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ annotations:
+ prometheus.io/scrape: "true"
+ prometheus.io/port: "{{ .Values.containerHealthProbePort }}"
+ checksum/config: {{ include (print $.Template.BasePath "/query-service-config.yaml") . | sha256sum }}
+ spec:
+ {{- with .Values.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ volumes:
+ - name: service-config
+ configMap:
+ name: {{ .Values.queryServiceConfig.name }}
+ - name: log4j-config
+ configMap:
+ name: {{ .Values.logConfig.name }}
+ {{- with .Values.nodeLabels }}
+ nodeSelector:
+ {{- toYaml . | nindent 8}}
+ {{- end }}
+ containers:
+ - name: {{ .Chart.Name }}
+ image: "{{ .Values.image.repository }}:{{ .Chart.AppVersion }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ ports:
+ - name: grpc-port
+ containerPort: {{ .Values.containerPort }}
+ protocol: TCP
+ - name: health-port
+ containerPort: {{ .Values.containerHealthProbePort }}
+ protocol: TCP
+ env:
+ - name: SERVICE_NAME
+ value: "{{ .Chart.Name }}"
+ - name: BOOTSTRAP_CONFIG_URI
+ value: "file:///app/resources/configs"
+ - name: LOG4J_CONFIGURATION_FILE
+ value: "/var/{{ .Chart.Name }}/log/log4j2.properties"
+ - name: JAVA_TOOL_OPTIONS
+ value: {{ .Values.javaOpts | quote }}
+ volumeMounts:
+ - name: service-config
+ mountPath: /app/resources/configs/{{ .Chart.Name }}/application.conf
+ subPath: application.conf
+ - name: log4j-config
+ mountPath: /var/{{ .Chart.Name }}/log
+ livenessProbe:
+ initialDelaySeconds: {{ int .Values.livenessProbe.initialDelaySeconds }}
+ periodSeconds: {{ int .Values.livenessProbe.periodSeconds }}
+ tcpSocket:
+ port: grpc-port
+ readinessProbe:
+ initialDelaySeconds: {{ int .Values.readinessProbe.initialDelaySeconds }}
+ periodSeconds: {{ int .Values.readinessProbe.periodSeconds }}
+ httpGet:
+ path: /health
+ port: {{ .Values.containerHealthProbePort }}
+ resources:
+ {{- toYaml .Values.resources | nindent 12 }}
diff --git a/helm/templates/logconfig.yaml b/helm/templates/logconfig.yaml
new file mode 100644
index 00000000..2177b482
--- /dev/null
+++ b/helm/templates/logconfig.yaml
@@ -0,0 +1,44 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ .Values.logConfig.name }}
+ labels:
+ release: {{ .Release.Name }}
+data:
+ log4j2.properties: |-
+ status = error
+ name = PropertiesConfig
+ {{- if .Values.logConfig.monitorInterval}}
+ monitorInterval = {{ .Values.logConfig.monitorInterval }}
+ {{- end }}
+
+ appender.console.type = Console
+ appender.console.name = STDOUT
+ appender.console.layout.type = PatternLayout
+ appender.console.layout.pattern = %d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %c{1.} - %msg%n
+
+ {{- if .Values.logConfig.appender.rolling.enabled }}
+ appender.rolling.type = RollingFile
+ appender.rolling.name = ROLLING_FILE
+ appender.rolling.fileName = ${env:SERVICE_NAME:-service}.log
+ appender.rolling.filePattern = ${env:SERVICE_NAME:-service}-%d{MM-dd-yy-HH-mm-ss}-%i.log.gz
+ appender.rolling.layout.type = PatternLayout
+ appender.rolling.layout.pattern = %d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %c{1.} - %msg%n
+ appender.rolling.policies.type = Policies
+ appender.rolling.policies.time.type = TimeBasedTriggeringPolicy
+ appender.rolling.policies.time.interval = 3600
+ appender.rolling.policies.time.modulate = true
+ appender.rolling.policies.size.type = SizeBasedTriggeringPolicy
+ appender.rolling.policies.size.size = 20MB
+ appender.rolling.strategy.type = DefaultRolloverStrategy
+ appender.rolling.strategy.max = 5
+ {{- end }}
+
+ rootLogger.level = {{ .Values.logConfig.rootLogger.level }}
+ rootLogger.appenderRef.stdout.ref = STDOUT
+ {{- if .Values.logConfig.appender.rolling.enabled }}
+ rootLogger.appenderRef.rolling.ref = ROLLING_FILE
+ {{- end }}
+ loggers = PINOT_HANDLER
+ logger.PINOT_HANDLER.name = org.hypertrace.core.query.service.pinot.PinotBasedRequestHandler
+ logger.PINOT_HANDLER.level = INFO
diff --git a/helm/templates/query-service-config.yaml b/helm/templates/query-service-config.yaml
new file mode 100644
index 00000000..3319c60c
--- /dev/null
+++ b/helm/templates/query-service-config.yaml
@@ -0,0 +1,31 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ .Values.queryServiceConfig.name }}
+ labels:
+ release: {{ .Release.Name }}
+data:
+ application.conf: |-
+ service.config = {
+ tenantColumnName = "{{ .Values.queryServiceConfig.data.tenantColumnName }}"
+ clients = [
+ {
+ type = zookeeper
+ connectionString = "{{ .Values.queryServiceConfig.data.zookeeperConnectionString }}"
+ }
+ ]
+ {{- if .Values.handlers }}
+ queryRequestHandlersConfig = [
+ {{- range .Values.handlers }}
+ {
+{{ tpl . $ | indent 10 }}
+ }
+ {{- end }}
+ {{- range .Values.extraHandlers }}
+ {
+{{ tpl . $ | indent 10 }}
+ }
+ {{- end }}
+ ]
+ {{- end }}
+ }
\ No newline at end of file
diff --git a/helm/templates/service.yaml b/helm/templates/service.yaml
new file mode 100644
index 00000000..e48833d9
--- /dev/null
+++ b/helm/templates/service.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ .Chart.Name }}
+ labels:
+ release: {{ .Release.Name }}s
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.port }}
+ targetPort: grpc-port
+ name: grpc-8090
+ selector:
+ {{- toYaml .Values.serviceSelectorLabels | nindent 4 }}
diff --git a/helm/values.yaml b/helm/values.yaml
new file mode 100644
index 00000000..0e3238f5
--- /dev/null
+++ b/helm/values.yaml
@@ -0,0 +1,125 @@
+###########
+# Deployment and Service
+###########
+replicaCount: 1
+maxUnavailable: 0
+
+image:
+ repository: hypertrace/query-service
+ pullPolicy: IfNotPresent
+
+containerPort: 8090
+containerHealthProbePort: 8091
+
+service:
+ type: ClusterIP
+ port: 8090
+
+imagePullSecrets: {}
+
+nodeLabels: {}
+
+javaOpts: "-XX:InitialRAMPercentage=50.0 -XX:MaxRAMPercentage=75.0"
+
+livenessProbe:
+ initialDelaySeconds: 10
+ periodSeconds: 5
+
+readinessProbe:
+ initialDelaySeconds: 2
+ periodSeconds: 5
+
+resources:
+ limits:
+ cpu: 2
+ memory: 768Mi
+ requests:
+ cpu: 100m
+ memory: 768Mi
+
+deploymentLabels:
+ app: query-service
+
+podLabels:
+ app: query-service
+
+deploymentSelectorMatchLabels:
+ app: query-service
+
+serviceSelectorLabels:
+ app: query-service
+
+###########
+# Config Maps
+###########
+queryServiceConfig:
+ name: query-service-config
+ data:
+ zookeeperConnectionString: zookeeper:2181/pinot/my-views
+ tenantColumnName: tenant_id
+
+handlers:
+ - |-
+ name = trace-view-handler
+ type = pinot
+ clientConfig = zookeeper
+ requestHandlerInfo = {
+ viewDefinition = {
+ viewName = rawTraceView
+ fieldMap = {
+ "TRACE.id": "trace_id",
+ "TRACE.name": "transaction_name",
+ "TRACE.startTime": "start_time_millis",
+ "TRACE.endTime": "end_time_millis",
+ "TRACE.transactionName": "transaction_name",
+ "TRACE.services": "services",
+ "TRACE.duration": "duration_millis",
+ "TRACE.numServices": "num_services",
+ "TRACE.numSpans": "num_spans"
+ }
+ }
+ }
+ - |-
+ name = span-event-view-handler
+ type = pinot
+ clientConfig = zookeeper
+ requestHandlerInfo = {
+ viewDefinition = {
+ viewName = spanEventView
+ mapFields = ["tags"]
+ fieldMap = {
+ "EVENT.serviceId": "service_id",
+ "EVENT.serviceName" : "service_name",
+ "EVENT.apiId" : "api_id",
+ "EVENT.apiName" : "api_name",
+ "EVENT.apiTraceId" : "api_trace_id",
+ "EVENT.id" : "span_id",
+ "EVENT.startTime": "start_time_millis",
+ "EVENT.endTime": "end_time_millis",
+ "EVENT.traceId" : "trace_id",
+ "EVENT.parentSpanId" : "parent_span_id",
+ "EVENT.type" : "span_kind",
+ "EVENT.entryApiId": "entry_api_id",
+ "EVENT.protocolName": "protocol_name",
+ "EVENT.statusCode": "status_code",
+ "EVENT.spanTags" : "tags"
+ "EVENT.spanRequestUrl" : "request_url",
+ "EVENT.duration": "duration_millis",
+ "EVENT.displayEntityName": "display_entity_name",
+ "EVENT.displaySpanName": "display_span_name",
+ "EVENT.errorCount": "error_count",
+ "EVENT.exceptionCount": "exception_count"
+ }
+ }
+ }
+
+extraHandlers: []
+
+logConfig:
+ name: query-service-log-appender-config
+ monitorInterval: 30
+ rootLogger:
+ level: INFO
+ appender:
+ rolling:
+ enabled: false
diff --git a/query-service-api/README.md b/query-service-api/README.md
new file mode 100644
index 00000000..7c5aa702
--- /dev/null
+++ b/query-service-api/README.md
@@ -0,0 +1,12 @@
+## Generating Golang Client with GRPC support
+The client currently can be generated locally by changing following properties in build.gradle.kts:
+```kotlin
+val generateLocalGoGrpcFiles = true
+
+path = "/bin/protoc-gen-go"
+
+```
+
+Next run ../gradlew clean build
+
+The go files are generated in build/generated/source/proto/main/*go directories.
diff --git a/query-service-api/build.gradle.kts b/query-service-api/build.gradle.kts
new file mode 100644
index 00000000..5a0b2e7c
--- /dev/null
+++ b/query-service-api/build.gradle.kts
@@ -0,0 +1,73 @@
+import com.google.protobuf.gradle.*
+
+plugins {
+ `java-library`
+ id("com.google.protobuf") version "0.8.8"
+ id("org.hypertrace.publish-plugin")
+ id("org.hypertrace.jacoco-report-plugin")
+}
+
+val generateLocalGoGrpcFiles = false
+
+protobuf {
+ protoc {
+ artifact = "com.google.protobuf:protoc:3.12.3"
+ }
+ plugins {
+ // Optional: an artifact spec for a protoc plugin, with "grpc" as
+ // the identifier, which can be referred to in the "plugins"
+ // container of the "generateProtoTasks" closure.
+ id("grpc_java") {
+ artifact = "io.grpc:protoc-gen-grpc-java:1.30.2"
+ }
+
+ if (generateLocalGoGrpcFiles) {
+ id("grpc_go") {
+ path = "/bin/protoc-gen-go"
+ }
+ }
+ }
+ generateProtoTasks {
+ ofSourceSet("main").forEach {
+ it.plugins {
+ // Apply the "grpc" plugin whose spec is defined above, without options.
+ id("grpc_java")
+
+ if (generateLocalGoGrpcFiles) {
+ id("grpc_go")
+ }
+ }
+ it.builtins {
+ java
+
+ if (generateLocalGoGrpcFiles) {
+ id("go")
+ }
+ }
+ }
+ }
+}
+
+sourceSets {
+ main {
+ java {
+ srcDirs("src/main/java", "build/generated/source/proto/main/java", "build/generated/source/proto/main/grpc_java")
+ }
+
+ proto {
+ srcDirs("src/main/proto")
+ }
+ }
+}
+
+tasks.test {
+ useJUnitPlatform()
+}
+
+dependencies {
+ api("io.grpc:grpc-protobuf:1.30.2")
+ api("io.grpc:grpc-stub:1.30.2")
+ api("javax.annotation:javax.annotation-api:1.3.2")
+
+ testImplementation("org.junit.jupiter:junit-jupiter:5.6.2")
+}
diff --git a/query-service-api/src/main/java/org/hypertrace/core/query/service/util/QueryRequestUtil.java b/query-service-api/src/main/java/org/hypertrace/core/query/service/util/QueryRequestUtil.java
new file mode 100644
index 00000000..facaf0e2
--- /dev/null
+++ b/query-service-api/src/main/java/org/hypertrace/core/query/service/util/QueryRequestUtil.java
@@ -0,0 +1,191 @@
+package org.hypertrace.core.query.service.util;
+
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+import org.hypertrace.core.query.service.api.ColumnIdentifier;
+import org.hypertrace.core.query.service.api.Expression;
+import org.hypertrace.core.query.service.api.Filter;
+import org.hypertrace.core.query.service.api.Function;
+import org.hypertrace.core.query.service.api.LiteralConstant;
+import org.hypertrace.core.query.service.api.Operator;
+import org.hypertrace.core.query.service.api.OrderByExpression;
+import org.hypertrace.core.query.service.api.SortOrder;
+import org.hypertrace.core.query.service.api.Value;
+import org.hypertrace.core.query.service.api.ValueType;
+
+/**
+ * Utility methods to easily create {@link org.hypertrace.core.query.service.api.QueryRequest} its
+ * selections and filters.
+ */
+public class QueryRequestUtil {
+
+ public static Filter createTimeFilter(String columnName, Operator op, long value) {
+ ColumnIdentifier.Builder timeColumn = ColumnIdentifier.newBuilder().setColumnName(columnName);
+ Expression.Builder lhs = Expression.newBuilder().setColumnIdentifier(timeColumn);
+
+ LiteralConstant.Builder constant =
+ LiteralConstant.newBuilder()
+ .setValue(
+ Value.newBuilder().setValueType(ValueType.STRING).setString(String.valueOf(value)));
+ Expression.Builder rhs = Expression.newBuilder().setLiteral(constant);
+ return Filter.newBuilder().setLhs(lhs).setOperator(op).setRhs(rhs).build();
+ }
+
+ public static Filter.Builder createBetweenTimesFilter(
+ String columnName, long lower, long higher) {
+ return Filter.newBuilder()
+ .setOperator(Operator.AND)
+ .addChildFilter(createTimeFilter(columnName, Operator.GE, lower))
+ .addChildFilter(createTimeFilter(columnName, Operator.LT, higher));
+ }
+
+ /** Given a column name, creates and returns an expression to select count(columnName). */
+ public static Expression.Builder createCountByColumnSelection(String... columnNames) {
+ Function.Builder count =
+ Function.newBuilder()
+ .setFunctionName("Count")
+ .addAllArguments(
+ Arrays.stream(columnNames)
+ .map(
+ columnName ->
+ Expression.newBuilder()
+ .setColumnIdentifier(
+ ColumnIdentifier.newBuilder().setColumnName(columnName))
+ .build())
+ .collect(Collectors.toList()));
+ return Expression.newBuilder().setFunction(count);
+ }
+
+ public static Filter.Builder createColumnValueFilter(
+ String columnName, Operator operator, String value) {
+ ColumnIdentifier.Builder column = ColumnIdentifier.newBuilder().setColumnName(columnName);
+ Expression.Builder lhs = Expression.newBuilder().setColumnIdentifier(column);
+
+ LiteralConstant.Builder constant =
+ LiteralConstant.newBuilder()
+ .setValue(Value.newBuilder().setValueType(ValueType.STRING).setString(value));
+ Expression.Builder rhs = Expression.newBuilder().setLiteral(constant);
+ return Filter.newBuilder().setLhs(lhs).setOperator(operator).setRhs(rhs);
+ }
+
+ public static Filter.Builder createBooleanFilter(Operator operator, List childFilters) {
+ return Filter.newBuilder().setOperator(operator).addAllChildFilter(childFilters);
+ }
+
+ public static Filter.Builder createValueInFilter(String columnName, Collection values) {
+ return createValuesOpFilter(columnName, values, Operator.IN);
+ }
+
+ public static Expression.Builder createColumnExpression(String columnName) {
+ return Expression.newBuilder()
+ .setColumnIdentifier(ColumnIdentifier.newBuilder().setColumnName(columnName));
+ }
+
+ public static Expression.Builder createFunctionExpression(
+ String functionName, Expression... expressions) {
+ Function.Builder functionBuilder = Function.newBuilder().setFunctionName(functionName);
+ for (Expression e : expressions) {
+ functionBuilder.addArguments(e);
+ }
+
+ return Expression.newBuilder().setFunction(functionBuilder);
+ }
+
+ public static Expression.Builder createStringLiteralExpression(String value) {
+ LiteralConstant.Builder constant =
+ LiteralConstant.newBuilder()
+ .setValue(
+ Value.newBuilder().setValueType(ValueType.STRING).setString(String.valueOf(value)));
+ Expression.Builder expression = Expression.newBuilder().setLiteral(constant);
+
+ return expression;
+ }
+
+ public static Expression.Builder createLongLiteralExpression(Long value) {
+ LiteralConstant.Builder constant =
+ LiteralConstant.newBuilder()
+ .setValue(Value.newBuilder().setValueType(ValueType.LONG).setLong(value));
+ Expression.Builder expression = Expression.newBuilder().setLiteral(constant);
+
+ return expression;
+ }
+
+ public static OrderByExpression.Builder createOrderByExpression(
+ String columnName, SortOrder order) {
+ return OrderByExpression.newBuilder()
+ .setOrder(order)
+ .setExpression(createColumnExpression(columnName));
+ }
+
+ public static Function.Builder createTimeColumnGroupByFunction(
+ String timeColumn, long periodSecs) {
+ return Function.newBuilder()
+ .setFunctionName("dateTimeConvert")
+ .addArguments(QueryRequestUtil.createColumnExpression(timeColumn))
+ .addArguments(
+ Expression.newBuilder()
+ .setLiteral(
+ LiteralConstant.newBuilder()
+ .setValue(
+ org.hypertrace.core.query.service.api.Value.newBuilder()
+ .setString("1:MILLISECONDS:EPOCH"))))
+ .addArguments(
+ Expression.newBuilder()
+ .setLiteral(
+ LiteralConstant.newBuilder()
+ .setValue(
+ org.hypertrace.core.query.service.api.Value.newBuilder()
+ .setString("1:MILLISECONDS:EPOCH"))))
+ .addArguments(
+ Expression.newBuilder()
+ .setLiteral(
+ LiteralConstant.newBuilder()
+ .setValue(
+ org.hypertrace.core.query.service.api.Value.newBuilder()
+ .setString(periodSecs + ":SECONDS"))));
+ }
+
+ public static Filter createValueEQFilter(List idColumns, List idColumnsValues) {
+ if (idColumns.size() != idColumnsValues.size()) {
+ throw new IllegalArgumentException(
+ String.format(
+ "Literal for composite id column doesn't have required number of values."
+ + " Invalid idColumnsValues:%s for idColumns:%s",
+ idColumnsValues, idColumns));
+ }
+ List childFilters =
+ IntStream.range(0, idColumnsValues.size())
+ .mapToObj(
+ i ->
+ Filter.newBuilder()
+ .setLhs(createColumnExpression(idColumns.get(i)))
+ .setOperator(Operator.EQ)
+ .setRhs(getLiteralExpression(idColumnsValues.get(i)))
+ .build())
+ .collect(Collectors.toList());
+ return Filter.newBuilder().setOperator(Operator.AND).addAllChildFilter(childFilters).build();
+ }
+
+ private static Expression.Builder getLiteralExpression(String value) {
+ return Expression.newBuilder()
+ .setLiteral(
+ LiteralConstant.newBuilder()
+ .setValue(Value.newBuilder().setString(value).setValueType(ValueType.STRING)));
+ }
+
+ private static Filter.Builder createValuesOpFilter(
+ String columnName, Collection values, Operator op) {
+ ColumnIdentifier.Builder column = ColumnIdentifier.newBuilder().setColumnName(columnName);
+ Expression.Builder lhs = Expression.newBuilder().setColumnIdentifier(column);
+
+ LiteralConstant.Builder constant =
+ LiteralConstant.newBuilder()
+ .setValue(
+ Value.newBuilder().setValueType(ValueType.STRING_ARRAY).addAllStringArray(values));
+ Expression.Builder rhs = Expression.newBuilder().setLiteral(constant);
+ return Filter.newBuilder().setLhs(lhs).setOperator(op).setRhs(rhs);
+ }
+}
diff --git a/query-service-api/src/main/proto/query-service.proto b/query-service-api/src/main/proto/query-service.proto
new file mode 100644
index 00000000..1990a20d
--- /dev/null
+++ b/query-service-api/src/main/proto/query-service.proto
@@ -0,0 +1,16 @@
+syntax = "proto3";
+
+option java_multiple_files = true;
+option java_package = "org.hypertrace.core.query.service.api";
+option java_outer_classname = "QueryServiceProto";
+
+
+package org.hypertrace.core.query.service;
+
+import "request.proto";
+import "response.proto";
+
+service QueryService {
+ rpc execute (QueryRequest) returns (stream ResultSetChunk) {
+ }
+}
diff --git a/query-service-api/src/main/proto/request.proto b/query-service-api/src/main/proto/request.proto
new file mode 100644
index 00000000..af963458
--- /dev/null
+++ b/query-service-api/src/main/proto/request.proto
@@ -0,0 +1,86 @@
+syntax = "proto3";
+
+option java_multiple_files = true;
+option java_package = "org.hypertrace.core.query.service.api";
+option java_outer_classname = "QueryRequestProto";
+
+package org.hypertrace.core.query.service;
+
+import "value.proto";
+
+message Expression {
+ oneof value {
+ ColumnIdentifier columnIdentifier = 1;
+ LiteralConstant literal = 2;
+ Function function = 3;
+ OrderByExpression orderBy = 4;
+ }
+}
+message QueryRequest {
+
+ repeated string source = 1;
+ Filter filter = 2;
+ repeated Expression selection = 3;
+ repeated Expression aggregation = 4;
+ repeated Expression groupBy = 5;
+ repeated OrderByExpression orderBy = 6;
+
+ int32 limit = 7;
+ int32 offset = 8;
+ bool distinctSelections = 9;
+}
+
+message Filter {
+
+ Expression lhs = 1;
+ Operator operator = 2;
+ Expression rhs = 3;
+ repeated Filter childFilter = 4;
+}
+
+enum Operator {
+ AND = 0;
+ OR = 1;
+ NOT = 2;
+ EQ = 3;
+ NEQ = 4;
+ IN = 5;
+ NOT_IN = 6;
+ RANGE = 7;
+ GT = 8;
+ LT = 9;
+ GE = 10;
+ LE = 11;
+ LIKE = 12;
+ CONTAINS_KEY = 13;
+ CONTAINS_KEYVALUE = 14;
+}
+
+
+message Function {
+ string functionName = 1;
+ repeated Expression arguments = 2;
+ string alias = 3;
+}
+
+message LiteralConstant {
+ Value value = 1;
+}
+
+message ColumnIdentifier {
+
+ string columnName = 1;
+ string alias = 2;
+}
+
+message OrderByExpression {
+ Expression expression = 1;
+ SortOrder order = 2;
+}
+
+enum SortOrder {
+ ASC = 0;
+ DESC = 1;
+}
+
+
diff --git a/query-service-api/src/main/proto/response.proto b/query-service-api/src/main/proto/response.proto
new file mode 100644
index 00000000..af59beaa
--- /dev/null
+++ b/query-service-api/src/main/proto/response.proto
@@ -0,0 +1,38 @@
+syntax = "proto3";
+
+option java_multiple_files = true;
+option java_package = "org.hypertrace.core.query.service.api";
+option java_outer_classname = "QueryResponseProto";
+
+package org.hypertrace.core.query.service;
+
+import "value.proto";
+
+message ColumnMetadata {
+ string column_name = 1;
+ ValueType value_type = 2;
+ //is the value of type array
+ bool is_repeated = 3;
+}
+
+message ResultSetMetadata {
+ repeated ColumnMetadata column_metadata = 1;
+}
+
+message ResultSetChunk {
+ int32 chunk_id = 1;
+ bool is_last_chunk = 3;
+ //only present in the first chunk
+ ResultSetMetadata result_set_metadata = 4;
+ repeated Row row = 5;
+
+ //can be in any chunk.
+ bool hasError = 6;
+ string errorMessage = 7;
+}
+
+
+message Row {
+ repeated Value column = 1;
+}
+
diff --git a/query-service-api/src/main/proto/value.proto b/query-service-api/src/main/proto/value.proto
new file mode 100644
index 00000000..9f98cb8f
--- /dev/null
+++ b/query-service-api/src/main/proto/value.proto
@@ -0,0 +1,48 @@
+syntax = "proto3";
+
+option java_multiple_files = true;
+option java_package = "org.hypertrace.core.query.service.api";
+option java_outer_classname = "ValueProto";
+
+package org.hypertrace.core.query.service;
+
+enum ValueType {
+ STRING = 0;
+ LONG = 1;
+ INT = 2;
+ FLOAT = 3;
+ DOUBLE = 4;
+ BYTES = 5;
+ BOOL = 6;
+ TIMESTAMP = 7;
+ STRING_ARRAY = 8;
+ LONG_ARRAY = 9;
+ INT_ARRAY = 10;
+ FLOAT_ARRAY = 11;
+ DOUBLE_ARRAY = 12;
+ BYTES_ARRAY = 13;
+ BOOLEAN_ARRAY = 14;
+ // assumes that key is always string
+ STRING_MAP = 15;
+}
+
+message Value {
+ ValueType valueType = 1;
+
+ string string = 3;
+ int64 long = 4;
+ int32 int = 5;
+ float float = 6;
+ double double = 7;
+ bytes bytes = 8;
+ bool boolean = 9;
+ sfixed64 timestamp = 15;
+ repeated string string_array = 16;
+ repeated int64 long_array = 17;
+ repeated int32 int_array = 18;
+ repeated float float_array = 19;
+ repeated double double_array = 20;
+ repeated bytes bytes_array = 21;
+ repeated bool boolean_array = 22;
+ map string_map = 23;
+}
diff --git a/query-service-api/src/test/java/org/hypertrace/core/query/service/util/QueryRequestUtilTest.java b/query-service-api/src/test/java/org/hypertrace/core/query/service/util/QueryRequestUtilTest.java
new file mode 100644
index 00000000..204d182f
--- /dev/null
+++ b/query-service-api/src/test/java/org/hypertrace/core/query/service/util/QueryRequestUtilTest.java
@@ -0,0 +1,54 @@
+package org.hypertrace.core.query.service.util;
+
+import org.hypertrace.core.query.service.api.ColumnIdentifier;
+import org.hypertrace.core.query.service.api.Expression;
+import org.hypertrace.core.query.service.api.Filter;
+import org.hypertrace.core.query.service.api.LiteralConstant;
+import org.hypertrace.core.query.service.api.Operator;
+import org.hypertrace.core.query.service.api.Value;
+import org.hypertrace.core.query.service.api.ValueType;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
+
+public class QueryRequestUtilTest {
+ @Test
+ public void testCreateBetweenTimesFilter() {
+ Filter.Builder timeFilter =
+ QueryRequestUtil.createBetweenTimesFilter("API.startTime", 20L, 30L);
+ Assertions.assertEquals(
+ Filter.newBuilder()
+ .setOperator(Operator.AND)
+ .addChildFilter(
+ Filter.newBuilder()
+ .setLhs(
+ Expression.newBuilder()
+ .setColumnIdentifier(
+ ColumnIdentifier.newBuilder().setColumnName("API.startTime")))
+ .setOperator(Operator.GE)
+ .setRhs(
+ Expression.newBuilder()
+ .setLiteral(
+ LiteralConstant.newBuilder()
+ .setValue(
+ Value.newBuilder()
+ .setValueType(ValueType.STRING)
+ .setString("20")))))
+ .addChildFilter(
+ Filter.newBuilder()
+ .setLhs(
+ Expression.newBuilder()
+ .setColumnIdentifier(
+ ColumnIdentifier.newBuilder().setColumnName("API.startTime")))
+ .setOperator(Operator.LT)
+ .setRhs(
+ Expression.newBuilder()
+ .setLiteral(
+ LiteralConstant.newBuilder()
+ .setValue(
+ Value.newBuilder()
+ .setValueType(ValueType.STRING)
+ .setString("30")))))
+ .build(),
+ timeFilter.build());
+ }
+}
diff --git a/query-service-client/build.gradle.kts b/query-service-client/build.gradle.kts
new file mode 100644
index 00000000..9a489c9c
--- /dev/null
+++ b/query-service-client/build.gradle.kts
@@ -0,0 +1,16 @@
+plugins {
+ `java-library`
+ jacoco
+ id("org.hypertrace.publish-plugin")
+ id("org.hypertrace.jacoco-report-plugin")
+}
+
+dependencies {
+ api(project(":query-service-api"))
+ implementation("org.hypertrace.core.grpcutils:grpc-client-utils:0.1.0")
+
+ // Logging
+ implementation("org.slf4j:slf4j-api:1.7.30")
+ // Config
+ implementation("com.typesafe:config:1.3.2")
+}
diff --git a/query-service-client/src/main/java/org/hypertrace/core/query/service/client/QueryServiceClient.java b/query-service-client/src/main/java/org/hypertrace/core/query/service/client/QueryServiceClient.java
new file mode 100644
index 00000000..1a2e2082
--- /dev/null
+++ b/query-service-client/src/main/java/org/hypertrace/core/query/service/client/QueryServiceClient.java
@@ -0,0 +1,52 @@
+package org.hypertrace.core.query.service.client;
+
+import io.grpc.Deadline;
+import io.grpc.ManagedChannel;
+import io.grpc.ManagedChannelBuilder;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+import org.hypertrace.core.grpcutils.client.GrpcClientRequestContextUtil;
+import org.hypertrace.core.grpcutils.client.RequestContextClientCallCredsProviderFactory;
+import org.hypertrace.core.query.service.api.QueryRequest;
+import org.hypertrace.core.query.service.api.QueryServiceGrpc;
+import org.hypertrace.core.query.service.api.QueryServiceGrpc.QueryServiceBlockingStub;
+import org.hypertrace.core.query.service.api.ResultSetChunk;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class QueryServiceClient {
+ private static final Logger LOG = LoggerFactory.getLogger(QueryServiceClient.class);
+ /**
+ * Since Pinot truncates the GroupBy results to 10, we need to set higher value when we need more
+ * values than 10 or all results. We might need to increase it to even higher but starting with a
+ * reasonably small value.
+ */
+ public static final int DEFAULT_QUERY_SERVICE_GROUP_BY_LIMIT = 10000;
+
+ private final QueryServiceBlockingStub queryServiceClient;
+
+ public QueryServiceClient(QueryServiceConfig queryServiceConfig) {
+ ManagedChannel managedChannel =
+ ManagedChannelBuilder.forAddress(
+ queryServiceConfig.getQueryServiceHost(), queryServiceConfig.getQueryServicePort())
+ .usePlaintext()
+ .build();
+ queryServiceClient =
+ QueryServiceGrpc.newBlockingStub(managedChannel)
+ .withCallCredentials(
+ RequestContextClientCallCredsProviderFactory.getClientCallCredsProvider().get());
+ }
+
+ public Iterator executeQuery(
+ QueryRequest request, Map context, int timeoutMillis) {
+ LOG.debug(
+ "Sending query to query service with timeout: {}, and request: {}", timeoutMillis, request);
+ return GrpcClientRequestContextUtil.executeWithHeadersContext(
+ context,
+ () ->
+ queryServiceClient
+ .withDeadline(Deadline.after(timeoutMillis, TimeUnit.MILLISECONDS))
+ .execute(request));
+ }
+}
diff --git a/query-service-client/src/main/java/org/hypertrace/core/query/service/client/QueryServiceConfig.java b/query-service-client/src/main/java/org/hypertrace/core/query/service/client/QueryServiceConfig.java
new file mode 100644
index 00000000..ecb05e70
--- /dev/null
+++ b/query-service-client/src/main/java/org/hypertrace/core/query/service/client/QueryServiceConfig.java
@@ -0,0 +1,27 @@
+package org.hypertrace.core.query.service.client;
+
+import com.typesafe.config.Config;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/** Config object used to pass the QueryService details that are to be used by the EntityGateway. */
+public class QueryServiceConfig {
+ private static final Logger LOG = LoggerFactory.getLogger(QueryServiceConfig.class);
+
+ private final String queryServiceHost;
+ private final int queryServicePort;
+
+ public QueryServiceConfig(Config config) {
+ LOG.info(config.toString());
+ this.queryServiceHost = config.getString("host");
+ this.queryServicePort = config.getInt("port");
+ }
+
+ public String getQueryServiceHost() {
+ return this.queryServiceHost;
+ }
+
+ public int getQueryServicePort() {
+ return queryServicePort;
+ }
+}
diff --git a/query-service-impl/README.md b/query-service-impl/README.md
new file mode 100644
index 00000000..35e4f3c4
--- /dev/null
+++ b/query-service-impl/README.md
@@ -0,0 +1,17 @@
+# Query Service
+
+Run it with:
+
+../gradlew run
+
+Test the client using integration test (this requires E2E platform setup with sample data):
+
+sh run-integration-tests.sh
+
+Sample result:
+```
+[Test worker] INFO org.hypertrace.core.query.service.QueryClientTest - traceId: "G\025\235\255O\306i\270\225\332wL\036\231\245\234"
+spanId: "+q\346|\2175\245\207"
+process: "{service_name=frontend, tags=[]}"
+operationName: "Sent.hipstershop.ProductCatalogService.ListProducts"
+```
diff --git a/query-service-impl/build.gradle.kts b/query-service-impl/build.gradle.kts
new file mode 100644
index 00000000..28ab1ad6
--- /dev/null
+++ b/query-service-impl/build.gradle.kts
@@ -0,0 +1,42 @@
+plugins {
+ `java-library`
+ jacoco
+ id("org.hypertrace.jacoco-report-plugin")
+}
+
+tasks.test {
+ useJUnitPlatform()
+}
+
+dependencies {
+ constraints {
+ implementation("com.fasterxml.jackson.core:jackson-databind:2.11.0") {
+ because("Deserialization of Untrusted Data [High Severity][https://snyk.io/vuln/SNYK-JAVA-COMFASTERXMLJACKSONCORE-561587] in com.fasterxml.jackson.core:jackson-databind@2.9.8\n" +
+ " used by org.apache.pinot:pinot-java-client")
+ }
+ implementation("io.netty:netty:3.10.3.Final") {
+ because("HTTP Request Smuggling [Medium Severity][https://snyk.io/vuln/SNYK-JAVA-IONETTY-473694] in io.netty:netty@3.9.6.Final\n" +
+ " introduced by org.apache.pinot:pinot-java-client")
+ }
+ implementation("org.apache.zookeeper:zookeeper:3.6.1") {
+ because("Authentication Bypass [High Severity][https://snyk.io/vuln/SNYK-JAVA-ORGAPACHEZOOKEEPER-32301] in org.apache.zookeeper:zookeeper@3.4.6\n" +
+ " introduced by org.apache.pinot:pinot-java-client")
+ }
+ implementation("commons-codec:commons-codec:1.13") {
+ because("Information Exposure [Low Severity][https://snyk.io/vuln/SNYK-JAVA-COMMONSCODEC-561518] in commons-codec:commons-codec@1.11"
+ + " introduced org.apache.httpcomponents:httpclient@4.5.12")
+ }
+ }
+ api(project(":query-service-api"))
+ implementation("org.hypertrace.core.grpcutils:grpc-context-utils:0.1.0")
+ implementation("org.apache.pinot:pinot-java-client:0.3.0") {
+ // We want to use log4j2 impl so exclude the log4j binding of slf4j
+ exclude("org.slf4j", "slf4j-log4j12")
+ }
+ implementation("org.slf4j:slf4j-api:1.7.30")
+ implementation("com.typesafe:config:1.3.2")
+
+ testImplementation(project(":query-service-api"))
+ testImplementation("org.junit.jupiter:junit-jupiter:5.6.2")
+ testImplementation("org.mockito:mockito-core:3.3.3")
+}
diff --git a/query-service-impl/config.yml b/query-service-impl/config.yml
new file mode 100644
index 00000000..0d047ff4
--- /dev/null
+++ b/query-service-impl/config.yml
@@ -0,0 +1,11 @@
+logging:
+ level: INFO
+ loggers:
+ org.hypertrace.core.query.service: INFO
+server:
+ applicationConnectors:
+ - type: http
+ port: 8080
+ adminConnectors:
+ - type: http
+ port: 8081
diff --git a/query-service-impl/src/main/java/org/hypertrace/core/query/service/QueryContext.java b/query-service-impl/src/main/java/org/hypertrace/core/query/service/QueryContext.java
new file mode 100644
index 00000000..575ee486
--- /dev/null
+++ b/query-service-impl/src/main/java/org/hypertrace/core/query/service/QueryContext.java
@@ -0,0 +1,18 @@
+package org.hypertrace.core.query.service;
+
+/**
+ * Class to hold context for a query from the incoming request. We maintain a separate class for
+ * QueryService so that the context for this service can evolve independent from the platform
+ * RequestContext class.
+ */
+public class QueryContext {
+ private final String tenantId;
+
+ public QueryContext(String tenantId) {
+ this.tenantId = tenantId;
+ }
+
+ public String getTenantId() {
+ return tenantId;
+ }
+}
diff --git a/query-service-impl/src/main/java/org/hypertrace/core/query/service/QueryCost.java b/query-service-impl/src/main/java/org/hypertrace/core/query/service/QueryCost.java
new file mode 100644
index 00000000..86908a62
--- /dev/null
+++ b/query-service-impl/src/main/java/org/hypertrace/core/query/service/QueryCost.java
@@ -0,0 +1,32 @@
+package org.hypertrace.core.query.service;
+
+public class QueryCost {
+
+ /**
+ * Return the cost to evaluate the request.
+ *
+ * @return -1 means it cannot handle the request else 0 (super fast) to 1 very expensive
+ */
+ double cost;
+ /**
+ * Allows the request handler to return additional context as part of RequestHandler.canHandle
+ * method in RequestHandler. This will be passed in to the RequestHandler.handleRequest
+ */
+ Object context;
+
+ public double getCost() {
+ return cost;
+ }
+
+ public void setCost(double cost) {
+ this.cost = cost;
+ }
+
+ public Object getContext() {
+ return context;
+ }
+
+ public void setContext(Object context) {
+ this.context = context;
+ }
+}
diff --git a/query-service-impl/src/main/java/org/hypertrace/core/query/service/QueryResultCollector.java b/query-service-impl/src/main/java/org/hypertrace/core/query/service/QueryResultCollector.java
new file mode 100644
index 00000000..3b6af59e
--- /dev/null
+++ b/query-service-impl/src/main/java/org/hypertrace/core/query/service/QueryResultCollector.java
@@ -0,0 +1,15 @@
+package org.hypertrace.core.query.service;
+
+/** Interface which is passed as a callback to {@link RequestHandler} */
+public interface QueryResultCollector {
+
+ /**
+ * Collect and handle the response received in T.
+ *
+ * @param t One of the items in the result.
+ */
+ void collect(T t);
+
+ /** Finish collecting all the results and wrap up the query. */
+ void finish();
+}
diff --git a/query-service-impl/src/main/java/org/hypertrace/core/query/service/QueryServiceImpl.java b/query-service-impl/src/main/java/org/hypertrace/core/query/service/QueryServiceImpl.java
new file mode 100644
index 00000000..98afe6e7
--- /dev/null
+++ b/query-service-impl/src/main/java/org/hypertrace/core/query/service/QueryServiceImpl.java
@@ -0,0 +1,108 @@
+package org.hypertrace.core.query.service;
+
+import com.google.common.base.Preconditions;
+import com.typesafe.config.Config;
+import io.grpc.Status;
+import io.grpc.stub.StreamObserver;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.NoSuchElementException;
+import java.util.stream.Collectors;
+import org.hypertrace.core.grpcutils.context.RequestContext;
+import org.hypertrace.core.query.service.QueryServiceImplConfig.ClientConfig;
+import org.hypertrace.core.query.service.QueryServiceImplConfig.RequestHandlerConfig;
+import org.hypertrace.core.query.service.api.QueryRequest;
+import org.hypertrace.core.query.service.api.QueryServiceGrpc;
+import org.hypertrace.core.query.service.api.ResultSetChunk;
+import org.hypertrace.core.query.service.pinot.PinotBasedRequestHandler;
+import org.hypertrace.core.query.service.pinot.PinotClientFactory;
+import org.hypertrace.core.query.service.pinot.ViewDefinition;
+
+public class QueryServiceImpl extends QueryServiceGrpc.QueryServiceImplBase {
+
+ private static final org.slf4j.Logger LOG =
+ org.slf4j.LoggerFactory.getLogger(QueryServiceImpl.class);
+
+ private final RequestHandlerSelector selector;
+
+ public QueryServiceImpl(QueryServiceImplConfig config) {
+ Map clientConfigMap =
+ config.getClients().stream()
+ .map(ClientConfig::parse)
+ .collect(Collectors.toMap(ClientConfig::getType, clientConfig -> clientConfig));
+ String tenantColumnName = config.getTenantColumnName();
+
+ if (tenantColumnName == null || tenantColumnName.isBlank()) {
+ throw new RuntimeException(
+ "Tenant column name is not defined. Need to set service.config.tenantColumnName in the application config.");
+ }
+
+ for (Config requestHandlerConfig : config.getQueryRequestHandlersConfig()) {
+ initRequestHandler(
+ RequestHandlerConfig.parse(requestHandlerConfig), clientConfigMap, tenantColumnName);
+ }
+ selector = new RequestHandlerSelector(RequestHandlerRegistry.get());
+ }
+
+ private void initRequestHandler(
+ RequestHandlerConfig config,
+ Map clientConfigMap,
+ String tenantColumnName) {
+
+ // Register Pinot RequestHandler
+ if ("pinot".equals(config.getType())) {
+ Map requestHandlerInfoConf = new HashMap<>();
+ requestHandlerInfoConf.put(
+ PinotBasedRequestHandler.VIEW_DEFINITION_CONFIG_KEY,
+ ViewDefinition.parse(
+ (Map)
+ config
+ .getRequestHandlerInfo()
+ .get(PinotBasedRequestHandler.VIEW_DEFINITION_CONFIG_KEY),
+ tenantColumnName));
+ RequestHandlerRegistry.get()
+ .register(
+ config.getName(),
+ new RequestHandlerInfo(
+ config.getName(), PinotBasedRequestHandler.class, requestHandlerInfoConf));
+ } else {
+ throw new UnsupportedOperationException(
+ "Unsupported RequestHandler type - " + config.getType());
+ }
+
+ // Register Pinot Client
+ ClientConfig clientConfig = clientConfigMap.get(config.getClientConfig());
+ Preconditions.checkNotNull(clientConfig);
+ PinotClientFactory.createPinotClient(
+ config.getName(), clientConfig.getType(), clientConfig.getConnectionString());
+ }
+
+ @Override
+ public void execute(QueryRequest queryRequest, StreamObserver responseObserver) {
+ try {
+ RequestAnalyzer analyzer = new RequestAnalyzer(queryRequest);
+ analyzer.analyze();
+ RequestHandler requestHandler = selector.select(queryRequest, analyzer);
+ if (requestHandler == null) {
+ // An error is logged in the select() method
+ responseObserver.onError(
+ Status.NOT_FOUND
+ .withDescription("Could not find any handler to handle the request")
+ .asException());
+ return;
+ }
+
+ ResultSetChunkCollector collector = new ResultSetChunkCollector(responseObserver);
+ collector.init(analyzer.getResultSetMetadata());
+
+ String tenantId = RequestContext.CURRENT.get().getTenantId().get();
+ requestHandler.handleRequest(new QueryContext(tenantId), queryRequest, collector, analyzer);
+ } catch (NoSuchElementException e) {
+ LOG.error("TenantId is missing in the context.", e);
+ responseObserver.onError(e);
+ } catch (Exception e) {
+ LOG.error("Error processing request: {}", queryRequest, e);
+ responseObserver.onError(e);
+ }
+ }
+}
diff --git a/query-service-impl/src/main/java/org/hypertrace/core/query/service/QueryServiceImplConfig.java b/query-service-impl/src/main/java/org/hypertrace/core/query/service/QueryServiceImplConfig.java
new file mode 100644
index 00000000..179d0742
--- /dev/null
+++ b/query-service-impl/src/main/java/org/hypertrace/core/query/service/QueryServiceImplConfig.java
@@ -0,0 +1,110 @@
+package org.hypertrace.core.query.service;
+
+import com.typesafe.config.Config;
+import com.typesafe.config.ConfigBeanFactory;
+import java.util.List;
+import java.util.Map;
+
+public class QueryServiceImplConfig {
+ private String tenantColumnName;
+ private List clients;
+ private List queryRequestHandlersConfig;
+
+ public static QueryServiceImplConfig parse(Config config) {
+ return ConfigBeanFactory.create(config, QueryServiceImplConfig.class);
+ }
+
+ public String getTenantColumnName() {
+ return tenantColumnName;
+ }
+
+ public List getClients() {
+ return this.clients;
+ }
+
+ public void setClients(List clients) {
+ this.clients = clients;
+ }
+
+ public List getQueryRequestHandlersConfig() {
+ return this.queryRequestHandlersConfig;
+ }
+
+ public void setQueryRequestHandlersConfig(List queryRequestHandlersConfig) {
+ this.queryRequestHandlersConfig = queryRequestHandlersConfig;
+ }
+
+ public void setTenantColumnName(String tenantColumnName) {
+ this.tenantColumnName = tenantColumnName;
+ }
+
+ public static class RequestHandlerConfig {
+
+ private String name;
+ private String type;
+ private String clientConfig;
+ private Map requestHandlerInfo;
+
+ public static RequestHandlerConfig parse(Config config) {
+ return ConfigBeanFactory.create(config, RequestHandlerConfig.class);
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public void setName(String name) {
+ this.name = name;
+ }
+
+ public String getType() {
+ return type;
+ }
+
+ public void setType(String type) {
+ this.type = type;
+ }
+
+ public String getClientConfig() {
+ return clientConfig;
+ }
+
+ public void setClientConfig(String clientConfig) {
+ this.clientConfig = clientConfig;
+ }
+
+ public Map getRequestHandlerInfo() {
+ return requestHandlerInfo;
+ }
+
+ public void setRequestHandlerInfo(Map requestHandlerInfo) {
+ this.requestHandlerInfo = requestHandlerInfo;
+ }
+ }
+
+ public static class ClientConfig {
+
+ private String type;
+ private String connectionString;
+
+ public static ClientConfig parse(Config config) {
+ return ConfigBeanFactory.create(config, ClientConfig.class);
+ }
+
+ public String getType() {
+ return type;
+ }
+
+ public void setType(String type) {
+ this.type = type;
+ }
+
+ public String getConnectionString() {
+ return connectionString;
+ }
+
+ public void setConnectionString(String connectionString) {
+ this.connectionString = connectionString;
+ }
+ }
+}
diff --git a/query-service-impl/src/main/java/org/hypertrace/core/query/service/RequestAnalyzer.java b/query-service-impl/src/main/java/org/hypertrace/core/query/service/RequestAnalyzer.java
new file mode 100644
index 00000000..3cf894ab
--- /dev/null
+++ b/query-service-impl/src/main/java/org/hypertrace/core/query/service/RequestAnalyzer.java
@@ -0,0 +1,170 @@
+package org.hypertrace.core.query.service;
+
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.LinkedHashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Set;
+import org.hypertrace.core.query.service.api.ColumnIdentifier;
+import org.hypertrace.core.query.service.api.ColumnMetadata;
+import org.hypertrace.core.query.service.api.Expression;
+import org.hypertrace.core.query.service.api.Expression.ValueCase;
+import org.hypertrace.core.query.service.api.Filter;
+import org.hypertrace.core.query.service.api.Function;
+import org.hypertrace.core.query.service.api.OrderByExpression;
+import org.hypertrace.core.query.service.api.QueryRequest;
+import org.hypertrace.core.query.service.api.ResultSetMetadata;
+import org.hypertrace.core.query.service.api.ValueType;
+
+public class RequestAnalyzer {
+
+ private QueryRequest request;
+ private Set referencedColumns;
+ private LinkedHashSet selectedColumns;
+ private ResultSetMetadata resultSetMetadata;
+ // Contains all selections to be made in the DB: selections on group by, single columns and
+ // aggregations in that order.
+ // There should be a one-to-one mapping between this and the columnMetadataSet in
+ // ResultSetMetadata.
+ // The difference between this and selectedColumns above is that this is a set of Expressions
+ // while the selectedColumns
+ // is a set of column names.
+ private final LinkedHashSet allSelections;
+
+ public RequestAnalyzer(QueryRequest request) {
+ this.request = request;
+ this.selectedColumns = new LinkedHashSet<>();
+ this.allSelections = new LinkedHashSet<>();
+ }
+
+ public void analyze() {
+ List filterColumns = new ArrayList<>();
+ LinkedList filterQueue = new LinkedList<>();
+ filterQueue.add(request.getFilter());
+ while (!filterQueue.isEmpty()) {
+ Filter filter = filterQueue.pop();
+ if (filter.getChildFilterCount() > 0) {
+ for (Filter childFilter : filter.getChildFilterList()) {
+ filterQueue.add(childFilter);
+ }
+ } else {
+ extractColumns(filterColumns, filter.getLhs());
+ extractColumns(filterColumns, filter.getRhs());
+ }
+ }
+ List postFilterColumns = new ArrayList<>();
+ List selectedList = new ArrayList<>();
+ LinkedHashSet columnMetadataSet = new LinkedHashSet<>();
+
+ // group by columns must be first in the response
+ if (request.getGroupByCount() > 0) {
+ for (Expression expression : request.getGroupByList()) {
+ extractColumns(postFilterColumns, expression);
+ columnMetadataSet.add(toColumnMetadata(expression));
+ allSelections.add(expression);
+ }
+ }
+ if (request.getSelectionCount() > 0) {
+ for (Expression expression : request.getSelectionList()) {
+ extractColumns(selectedList, expression);
+ postFilterColumns.addAll(selectedList);
+ columnMetadataSet.add(toColumnMetadata(expression));
+ allSelections.add(expression);
+ }
+ }
+ if (request.getAggregationCount() > 0) {
+ for (Expression expression : request.getAggregationList()) {
+ extractColumns(postFilterColumns, expression);
+ columnMetadataSet.add(toColumnMetadata(expression));
+ allSelections.add(expression);
+ }
+ }
+
+ referencedColumns = new HashSet<>();
+ referencedColumns.addAll(filterColumns);
+ referencedColumns.addAll(postFilterColumns);
+ resultSetMetadata =
+ ResultSetMetadata.newBuilder().addAllColumnMetadata(columnMetadataSet).build();
+ selectedColumns.addAll(selectedList);
+ }
+
+ private ColumnMetadata toColumnMetadata(Expression expression) {
+ ColumnMetadata.Builder builder = ColumnMetadata.newBuilder();
+ ValueCase valueCase = expression.getValueCase();
+ switch (valueCase) {
+ case COLUMNIDENTIFIER:
+ ColumnIdentifier columnIdentifier = expression.getColumnIdentifier();
+ String alias = columnIdentifier.getAlias();
+ if (alias != null && alias.trim().length() > 0) {
+ builder.setColumnName(alias);
+ } else {
+ builder.setColumnName(columnIdentifier.getColumnName());
+ }
+ builder.setValueType(ValueType.STRING);
+ builder.setIsRepeated(false);
+ break;
+ case LITERAL:
+ break;
+ case FUNCTION:
+ Function function = expression.getFunction();
+ alias = function.getAlias();
+ if (alias != null && alias.trim().length() > 0) {
+ builder.setColumnName(alias);
+ } else {
+ // todo: handle recursive functions max(rollup(time,50)
+ // workaround is to use alias for now
+ builder.setColumnName(function.getFunctionName());
+ }
+ builder.setValueType(ValueType.STRING);
+ builder.setIsRepeated(false);
+ break;
+ case ORDERBY:
+ break;
+ case VALUE_NOT_SET:
+ break;
+ }
+ return builder.build();
+ }
+
+ private void extractColumns(List columns, Expression expression) {
+ ValueCase valueCase = expression.getValueCase();
+ switch (valueCase) {
+ case COLUMNIDENTIFIER:
+ ColumnIdentifier columnIdentifier = expression.getColumnIdentifier();
+ columns.add(columnIdentifier.getColumnName());
+ break;
+ case LITERAL:
+ // no columns
+ break;
+ case FUNCTION:
+ Function function = expression.getFunction();
+ for (Expression childExpression : function.getArgumentsList()) {
+ extractColumns(columns, childExpression);
+ }
+ break;
+ case ORDERBY:
+ OrderByExpression orderBy = expression.getOrderBy();
+ extractColumns(columns, orderBy.getExpression());
+ break;
+ case VALUE_NOT_SET:
+ break;
+ }
+ }
+
+ public Set getReferencedColumns() {
+ return referencedColumns;
+ }
+
+ public ResultSetMetadata getResultSetMetadata() {
+ return resultSetMetadata;
+ }
+
+ public LinkedHashSet getSelectedColumns() {
+ return selectedColumns;
+ }
+
+ public LinkedHashSet getAllSelections() {
+ return this.allSelections;
+ }
+}
diff --git a/query-service-impl/src/main/java/org/hypertrace/core/query/service/RequestHandler.java b/query-service-impl/src/main/java/org/hypertrace/core/query/service/RequestHandler.java
new file mode 100644
index 00000000..1409edb9
--- /dev/null
+++ b/query-service-impl/src/main/java/org/hypertrace/core/query/service/RequestHandler.java
@@ -0,0 +1,22 @@
+package org.hypertrace.core.query.service;
+
+import java.util.Map;
+import java.util.Set;
+import org.hypertrace.core.query.service.api.QueryRequest;
+
+public interface RequestHandler {
+
+ /** Get the name of Request Handler */
+ String getName();
+
+ QueryCost canHandle(T request, Set referencedSources, Set referencedColumns);
+
+ /** Handle the request and add rows to the collector. */
+ void handleRequest(
+ QueryContext queryContext,
+ QueryRequest request,
+ QueryResultCollector collector,
+ RequestAnalyzer requestAnalyzer);
+
+ void init(String name, Map config);
+}
diff --git a/query-service-impl/src/main/java/org/hypertrace/core/query/service/RequestHandlerInfo.java b/query-service-impl/src/main/java/org/hypertrace/core/query/service/RequestHandlerInfo.java
new file mode 100644
index 00000000..20a8fd7d
--- /dev/null
+++ b/query-service-impl/src/main/java/org/hypertrace/core/query/service/RequestHandlerInfo.java
@@ -0,0 +1,34 @@
+package org.hypertrace.core.query.service;
+
+import java.util.Map;
+
+public class RequestHandlerInfo {
+
+ private String name;
+
+ private Class extends RequestHandler> requestHandlerClazz;
+
+ // todo:change to concrete class later
+ private Map config;
+
+ public RequestHandlerInfo(
+ String name,
+ Class extends RequestHandler> requestHandlerClazz,
+ Map config) {
+ this.name = name;
+ this.requestHandlerClazz = requestHandlerClazz;
+ this.config = config;
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public Class extends RequestHandler> getRequestHandlerClazz() {
+ return requestHandlerClazz;
+ }
+
+ public Map getConfig() {
+ return config;
+ }
+}
diff --git a/query-service-impl/src/main/java/org/hypertrace/core/query/service/RequestHandlerRegistry.java b/query-service-impl/src/main/java/org/hypertrace/core/query/service/RequestHandlerRegistry.java
new file mode 100644
index 00000000..b4956179
--- /dev/null
+++ b/query-service-impl/src/main/java/org/hypertrace/core/query/service/RequestHandlerRegistry.java
@@ -0,0 +1,34 @@
+package org.hypertrace.core.query.service;
+
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map;
+import org.slf4j.LoggerFactory;
+
+public class RequestHandlerRegistry {
+
+ private static final org.slf4j.Logger LOG = LoggerFactory.getLogger(RequestHandlerRegistry.class);
+
+ Map requestHandlerInfoMap = new HashMap<>();
+
+ private static final RequestHandlerRegistry INSTANCE = new RequestHandlerRegistry();
+
+ private RequestHandlerRegistry() {}
+
+ public boolean register(String handlerName, RequestHandlerInfo requestHandlerInfo) {
+ if (requestHandlerInfoMap.containsKey(handlerName)) {
+ LOG.error("RequestHandlerInfo registration failed. Duplicate Handler:{} ", handlerName);
+ return false;
+ }
+ requestHandlerInfoMap.put(handlerName, requestHandlerInfo);
+ return true;
+ }
+
+ public Collection getAll() {
+ return requestHandlerInfoMap.values();
+ }
+
+ public static RequestHandlerRegistry get() {
+ return INSTANCE;
+ }
+}
diff --git a/query-service-impl/src/main/java/org/hypertrace/core/query/service/RequestHandlerSelector.java b/query-service-impl/src/main/java/org/hypertrace/core/query/service/RequestHandlerSelector.java
new file mode 100644
index 00000000..6bdf0fbb
--- /dev/null
+++ b/query-service-impl/src/main/java/org/hypertrace/core/query/service/RequestHandlerSelector.java
@@ -0,0 +1,76 @@
+package org.hypertrace.core.query.service;
+
+import java.lang.reflect.Constructor;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import org.hypertrace.core.query.service.api.QueryRequest;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class RequestHandlerSelector {
+
+ private static final Logger LOG = LoggerFactory.getLogger(RequestHandlerSelector.class);
+
+ List requestHandlers = new ArrayList<>();
+
+ public RequestHandlerSelector(List requestHandlers) {
+ this.requestHandlers = requestHandlers;
+ }
+
+ public RequestHandlerSelector(RequestHandlerRegistry registry) {
+ Collection requestHandlerInfoList = registry.getAll();
+ for (RequestHandlerInfo requestHandlerInfo : requestHandlerInfoList) {
+ try {
+ Constructor extends RequestHandler> constructor =
+ requestHandlerInfo.getRequestHandlerClazz().getConstructor(new Class[] {});
+ RequestHandler requestHandler = constructor.newInstance();
+ requestHandler.init(requestHandlerInfo.getName(), requestHandlerInfo.getConfig());
+ requestHandlers.add(requestHandler);
+ } catch (Exception e) {
+ LOG.error("Error initializing request Handler:{}", requestHandlerInfo, e);
+ }
+ }
+ }
+
+ public RequestHandler select(QueryRequest request, RequestAnalyzer analyzer) {
+
+ // check if each of the requestHandler can handle the request and return the cost of serving
+ // that query
+ double minCost = Double.MAX_VALUE;
+ RequestHandler selectedHandler = null;
+ Set referencedColumns = analyzer.getReferencedColumns();
+ Set referencedSources = new HashSet<>(request.getSourceList());
+ for (RequestHandler requestHandler : requestHandlers) {
+ QueryCost queryCost = requestHandler.canHandle(request, referencedSources, referencedColumns);
+ double cost = queryCost.getCost();
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Request handler: {}, query cost: {}", requestHandler.getName(), cost);
+ }
+ if (cost >= 0 && cost < minCost) {
+ minCost = cost;
+ selectedHandler = requestHandler;
+ }
+ }
+
+ if (selectedHandler != null) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug(
+ "Selected requestHandler: {} for the query: {}; referencedColumns: {}, cost: {}",
+ selectedHandler.getName(),
+ request,
+ referencedColumns,
+ minCost);
+ }
+ } else {
+ LOG.error(
+ "No requestHandler for the query: {}; referencedColumns: {}, cost: {}",
+ request,
+ referencedColumns,
+ minCost);
+ }
+ return selectedHandler;
+ }
+}
diff --git a/query-service-impl/src/main/java/org/hypertrace/core/query/service/ResultSetChunkCollector.java b/query-service-impl/src/main/java/org/hypertrace/core/query/service/ResultSetChunkCollector.java
new file mode 100644
index 00000000..7d1f282d
--- /dev/null
+++ b/query-service-impl/src/main/java/org/hypertrace/core/query/service/ResultSetChunkCollector.java
@@ -0,0 +1,65 @@
+package org.hypertrace.core.query.service;
+
+import io.grpc.stub.StreamObserver;
+import java.io.PrintWriter;
+import java.io.StringWriter;
+import org.hypertrace.core.query.service.api.ResultSetChunk;
+import org.hypertrace.core.query.service.api.ResultSetMetadata;
+import org.hypertrace.core.query.service.api.Row;
+
+public class ResultSetChunkCollector implements QueryResultCollector {
+
+ private static int DEFAULT_CHUNK_SIZE = 10000; // 10k rows
+ private StreamObserver grpcObserver;
+ private int maxChunkSize;
+ private int currentChunkSize;
+ private int chunkId;
+ private ResultSetChunk.Builder currentBuilder;
+
+ public ResultSetChunkCollector(StreamObserver grpcObserver) {
+ this(grpcObserver, DEFAULT_CHUNK_SIZE);
+ }
+
+ public ResultSetChunkCollector(StreamObserver grpcObserver, int chunkSize) {
+ this.grpcObserver = grpcObserver;
+ this.maxChunkSize = chunkSize;
+ this.chunkId = 0;
+ this.currentBuilder = ResultSetChunk.newBuilder();
+ currentBuilder.setChunkId(this.chunkId);
+ }
+
+ public void init(ResultSetMetadata metadata) {
+ currentBuilder.setResultSetMetadata(metadata);
+ }
+
+ public void collect(Row row) {
+ currentBuilder.addRow(row);
+ currentChunkSize++;
+ if (currentChunkSize >= maxChunkSize) {
+ ResultSetChunk resultSetChunk = currentBuilder.build();
+ grpcObserver.onNext(resultSetChunk);
+ currentBuilder.clear();
+ chunkId = chunkId + 1;
+ currentChunkSize = 0;
+ currentBuilder.setChunkId(chunkId);
+ }
+ }
+
+ public void error(Throwable t) {
+ currentBuilder.setIsLastChunk(true);
+ currentBuilder.setHasError(true);
+ StringWriter sw = new StringWriter();
+ PrintWriter pw = new PrintWriter(sw);
+ t.printStackTrace(pw);
+ currentBuilder.setErrorMessage(sw.toString());
+ grpcObserver.onNext(currentBuilder.build());
+ }
+
+ public void finish() {
+ // NOTE: Always send a one ResultChunk with isLastChunk = true
+ currentBuilder.setIsLastChunk(true);
+ ResultSetChunk resultSetChunk = currentBuilder.build();
+ grpcObserver.onNext(resultSetChunk);
+ grpcObserver.onCompleted();
+ }
+}
diff --git a/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/AdhocPinotQuery.java b/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/AdhocPinotQuery.java
new file mode 100644
index 00000000..076b578f
--- /dev/null
+++ b/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/AdhocPinotQuery.java
@@ -0,0 +1,52 @@
+package org.hypertrace.core.query.service.pinot;
+
+import java.util.Map;
+import javax.annotation.concurrent.NotThreadSafe;
+import org.apache.pinot.client.ResultSetGroup;
+
+/*
+ * AdhocPinotQuery could take any Pinot query and return ResultSetGroup which is the raw Pinot
+ * Response.
+ */
+@NotThreadSafe
+public class AdhocPinotQuery extends PinotQuery {
+
+ private String query;
+
+ public AdhocPinotQuery(String name, PinotClientFactory.PinotClient pinotClient) {
+ super(name, pinotClient);
+ }
+
+ @Override
+ public String getQuery(Map args) {
+ return this.query;
+ }
+
+ public void setQuery(String query) {
+ this.query = query;
+ }
+
+ @Override
+ ResultSetGroup convertQueryResults(ResultSetGroup queryResults) {
+ return queryResults;
+ }
+
+ @Override
+ public int hashCode() {
+ int hash = super.hashCode();
+ hash = 31 * hash + (query == null ? 0 : query.hashCode());
+ return hash;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (super.equals(o)) {
+ if (this.getClass() != o.getClass()) {
+ return false;
+ }
+ AdhocPinotQuery apq = (AdhocPinotQuery) o;
+ return (this.query.equals(apq.query));
+ }
+ return false;
+ }
+}
diff --git a/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/DefaultResultSetTypePredicateProvider.java b/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/DefaultResultSetTypePredicateProvider.java
new file mode 100644
index 00000000..42f0dea8
--- /dev/null
+++ b/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/DefaultResultSetTypePredicateProvider.java
@@ -0,0 +1,15 @@
+package org.hypertrace.core.query.service.pinot;
+
+import org.apache.pinot.client.ResultSet;
+
+public class DefaultResultSetTypePredicateProvider implements ResultSetTypePredicateProvider {
+ @Override
+ public boolean isSelectionResultSetType(ResultSet resultSet) {
+ return resultSet.getClass().getName().contains("SelectionResultSet");
+ }
+
+ @Override
+ public boolean isResultTableResultSetType(ResultSet resultSet) {
+ return resultSet.getClass().getName().contains("ResultTableResultSet");
+ }
+}
diff --git a/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/Params.java b/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/Params.java
new file mode 100644
index 00000000..02396924
--- /dev/null
+++ b/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/Params.java
@@ -0,0 +1,102 @@
+package org.hypertrace.core.query.service.pinot;
+
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * Holds the params that need to be set in the PreparedStatement for constructing the final PQL
+ * query
+ */
+public class Params {
+
+ // Map of index to the corresponding param value
+ private Map integerParams;
+ private Map longParams;
+ private Map stringParams;
+ private Map floatParams;
+ private Map doubleParams;
+
+ private Params(
+ Map integerParams,
+ Map longParams,
+ Map stringParams,
+ Map floatParams,
+ Map doubleParams) {
+ this.integerParams = integerParams;
+ this.longParams = longParams;
+ this.stringParams = stringParams;
+ this.floatParams = floatParams;
+ this.doubleParams = doubleParams;
+ }
+
+ public Map getIntegerParams() {
+ return integerParams;
+ }
+
+ public Map getLongParams() {
+ return longParams;
+ }
+
+ public Map getStringParams() {
+ return stringParams;
+ }
+
+ public Map getFloatParams() {
+ return floatParams;
+ }
+
+ public Map getDoubleParams() {
+ return doubleParams;
+ }
+
+ public static Builder newBuilder() {
+ return new Builder();
+ }
+
+ public static class Builder {
+ private int nextIndex;
+ private Map integerParams;
+ private Map longParams;
+ private Map stringParams;
+ private Map floatParams;
+ private Map doubleParams;
+
+ private Builder() {
+ nextIndex = 0;
+ integerParams = new HashMap<>();
+ longParams = new HashMap<>();
+ stringParams = new HashMap<>();
+ floatParams = new HashMap<>();
+ doubleParams = new HashMap<>();
+ }
+
+ public Builder addIntegerParam(int paramValue) {
+ integerParams.put(nextIndex++, paramValue);
+ return this;
+ }
+
+ public Builder addLongParam(long paramValue) {
+ longParams.put(nextIndex++, paramValue);
+ return this;
+ }
+
+ public Builder addStringParam(String paramValue) {
+ stringParams.put(nextIndex++, paramValue);
+ return this;
+ }
+
+ public Builder addFloatParam(float paramValue) {
+ floatParams.put(nextIndex++, paramValue);
+ return this;
+ }
+
+ public Builder addDoubleParam(double paramValue) {
+ doubleParams.put(nextIndex++, paramValue);
+ return this;
+ }
+
+ public Params build() {
+ return new Params(integerParams, longParams, stringParams, floatParams, doubleParams);
+ }
+ }
+}
diff --git a/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/PinotBasedRequestHandler.java b/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/PinotBasedRequestHandler.java
new file mode 100644
index 00000000..80c87622
--- /dev/null
+++ b/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/PinotBasedRequestHandler.java
@@ -0,0 +1,281 @@
+package org.hypertrace.core.query.service.pinot;
+
+import com.google.common.base.Preconditions;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import org.apache.pinot.client.ResultSet;
+import org.apache.pinot.client.ResultSetGroup;
+import org.hypertrace.core.query.service.QueryContext;
+import org.hypertrace.core.query.service.QueryCost;
+import org.hypertrace.core.query.service.QueryResultCollector;
+import org.hypertrace.core.query.service.RequestAnalyzer;
+import org.hypertrace.core.query.service.RequestHandler;
+import org.hypertrace.core.query.service.api.QueryRequest;
+import org.hypertrace.core.query.service.api.Row;
+import org.hypertrace.core.query.service.api.Row.Builder;
+import org.hypertrace.core.query.service.api.Value;
+import org.hypertrace.core.query.service.pinot.PinotClientFactory.PinotClient;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class PinotBasedRequestHandler implements RequestHandler {
+
+ private static final Logger LOG = LoggerFactory.getLogger(PinotBasedRequestHandler.class);
+
+ public static String VIEW_DEFINITION_CONFIG_KEY = "viewDefinition";
+ private static final int SLOW_REQUEST_THRESHOLD_MS = 3000; // A 3 seconds request is too slow
+
+ private String name;
+ private ViewDefinition viewDefinition;
+ private QueryRequestToPinotSQLConverter request2PinotSqlConverter;
+ private final PinotMapConverter pinotMapConverter;
+ // The implementations of ResultSet are package private and hence there's no way to determine the
+ // shape of the results
+ // other than to do string comparison on the simple class names. In order to be able to unit test
+ // the logic for
+ // parsing the Pinot response we need to be able to mock out the ResultSet interface and hence we
+ // create an interface
+ // for the logic to determine the handling function based in the ResultSet class name. See usages
+ // of resultSetTypePredicateProvider
+ // to see how it used.
+ private final ResultSetTypePredicateProvider resultSetTypePredicateProvider;
+ private final PinotClientFactory pinotClientFactory;
+
+ public PinotBasedRequestHandler() {
+ this(new DefaultResultSetTypePredicateProvider(), PinotClientFactory.get());
+ }
+
+ PinotBasedRequestHandler(
+ ResultSetTypePredicateProvider resultSetTypePredicateProvider,
+ PinotClientFactory pinotClientFactory) {
+ this.resultSetTypePredicateProvider = resultSetTypePredicateProvider;
+ this.pinotClientFactory = pinotClientFactory;
+ this.pinotMapConverter = new PinotMapConverter();
+ }
+
+ @Override
+ public String getName() {
+ return name;
+ }
+
+ @Override
+ public void init(String name, Map config) {
+ this.name = name;
+ // TODO:use typesafe HOCON object
+ this.viewDefinition = (ViewDefinition) config.get(VIEW_DEFINITION_CONFIG_KEY);
+ request2PinotSqlConverter = new QueryRequestToPinotSQLConverter(viewDefinition);
+ }
+
+ @Override
+ public QueryCost canHandle(
+ QueryRequest request, Set referencedSources, Set referencedColumns) {
+ double cost = -1;
+ boolean found = true;
+ for (String referencedColumn : referencedColumns) {
+ if (!viewDefinition.containsColumn(referencedColumn)) {
+ found = false;
+ break;
+ }
+ }
+ // successfully found a view that can handle the request
+ if (found) {
+ // TODO: Come up with a way to compute the cost based on request and view definition
+ // Higher columns --> Higher cost,
+ // Finer the time granularity --> Higher the cost.
+ cost = 0.5;
+ }
+ QueryCost queryCost = new QueryCost();
+ queryCost.setCost(cost);
+ return queryCost;
+ }
+
+ @Override
+ public void handleRequest(
+ QueryContext queryContext,
+ QueryRequest request,
+ QueryResultCollector collector,
+ RequestAnalyzer requestAnalyzer) {
+ long start = System.currentTimeMillis();
+ validateQueryRequest(queryContext, request);
+ Entry pql =
+ request2PinotSqlConverter.toSQL(queryContext, request, requestAnalyzer.getAllSelections());
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Trying to execute PQL: [ {} ] by RequestHandler: [ {} ]", pql, this.getName());
+ }
+ final PinotClient pinotClient = pinotClientFactory.getPinotClient(this.getName());
+ try {
+ final ResultSetGroup resultSetGroup = pinotClient.executeQuery(pql.getKey(), pql.getValue());
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Query results: [ {} ]", resultSetGroup.toString());
+ }
+ // need to merge data especially for Pinot. That's why we need to track the map columns
+ convert(resultSetGroup, collector, requestAnalyzer.getSelectedColumns());
+ long requestTimeMs = System.currentTimeMillis() - start;
+ if (requestTimeMs > SLOW_REQUEST_THRESHOLD_MS) {
+ LOG.warn("Query Execution time: {} millis\nQuery Request: {}", requestTimeMs, request);
+ }
+ } catch (Exception ex) {
+ // Catch this exception to log the Pinot SQL query that caused the issue
+ LOG.error("An error occurred while executing: {}", pql.getKey(), ex);
+ // Rethrow for the caller to return an error.
+ throw ex;
+ }
+ }
+
+ void convert(
+ ResultSetGroup resultSetGroup,
+ QueryResultCollector collector,
+ LinkedHashSet selectedAttributes) {
+ List rowBuilderList = new ArrayList<>();
+ if (resultSetGroup.getResultSetCount() > 0) {
+ ResultSet resultSet = resultSetGroup.getResultSet(0);
+ // Pinot has different Response format for selection and aggregation/group by query.
+ if (resultSetTypePredicateProvider.isSelectionResultSetType(resultSet)) {
+ // map merging is only supported in the selection. Filtering and Group by has its own
+ // syntax in Pinot
+ handleSelection(resultSetGroup, rowBuilderList, selectedAttributes);
+ } else if (resultSetTypePredicateProvider.isResultTableResultSetType(resultSet)) {
+ handleTableFormatResultSet(resultSetGroup, rowBuilderList);
+ } else {
+ handleAggregationAndGroupBy(resultSetGroup, rowBuilderList);
+ }
+ }
+ for (Row.Builder builder : rowBuilderList) {
+ final Row row = builder.build();
+ LOG.debug("collect a row: {}", row);
+ collector.collect(row);
+ }
+ collector.finish();
+ }
+
+ private void handleSelection(
+ ResultSetGroup resultSetGroup,
+ List rowBuilderList,
+ LinkedHashSet selectedAttributes) {
+ int resultSetGroupCount = resultSetGroup.getResultSetCount();
+ for (int i = 0; i < resultSetGroupCount; i++) {
+ ResultSet resultSet = resultSetGroup.getResultSet(i);
+ // Find the index in the result's column for each selected attributes
+ PinotResultAnalyzer resultAnalyzer =
+ PinotResultAnalyzer.create(resultSet, selectedAttributes, viewDefinition);
+
+ // For each row returned from Pinot,
+ // build the row according to the selected attributes from the request
+ for (int rowId = 0; rowId < resultSet.getRowCount(); rowId++) {
+ Builder builder;
+ builder = Row.newBuilder();
+ rowBuilderList.add(builder);
+
+ // for each selected attributes in the request get the data from the
+ // Pinot row result
+ for (String logicalName : selectedAttributes) {
+ // colVal will never be null. But getDataRow can throw a runtime exception if it failed
+ // to retrieve data
+ String colVal = resultAnalyzer.getDataFromRow(rowId, logicalName);
+ builder.addColumn(Value.newBuilder().setString(colVal).build());
+ }
+ }
+ }
+ }
+
+ private void handleAggregationAndGroupBy(
+ ResultSetGroup resultSetGroup, List rowBuilderList) {
+ int resultSetGroupCount = resultSetGroup.getResultSetCount();
+ Map groupKey2RowIdMap = new HashMap<>();
+ for (int i = 0; i < resultSetGroupCount; i++) {
+ ResultSet resultSet = resultSetGroup.getResultSet(i);
+ for (int rowId = 0; rowId < resultSet.getRowCount(); rowId++) {
+ Builder builder;
+ //
+ int groupKeyLength = resultSet.getGroupKeyLength();
+ String groupKey;
+ StringBuilder groupKeyBuilder = new StringBuilder();
+ String groupKeyDelim = "";
+ for (int g = 0; g < groupKeyLength; g++) {
+ String colVal = resultSet.getGroupKeyString(rowId, g);
+ groupKeyBuilder.append(groupKeyDelim).append(colVal);
+ groupKeyDelim = "|";
+ }
+ groupKey = groupKeyBuilder.toString();
+ if (!groupKey2RowIdMap.containsKey(groupKey)) {
+ builder = Row.newBuilder();
+ rowBuilderList.add(builder);
+ groupKey2RowIdMap.put(groupKey, rowId);
+ for (int g = 0; g < groupKeyLength; g++) {
+ String colVal = resultSet.getGroupKeyString(rowId, g);
+ // add it only the first time
+ builder.addColumn(Value.newBuilder().setString(colVal).build());
+ groupKeyBuilder.append(colVal).append(groupKeyDelim);
+ groupKeyDelim = "|";
+ }
+ } else {
+ builder = rowBuilderList.get(groupKey2RowIdMap.get(groupKey));
+ }
+ int columnCount = resultSet.getColumnCount();
+ if (columnCount > 0) {
+ for (int c = 0; c < columnCount; c++) {
+ String colVal = resultSet.getString(rowId, c);
+ builder.addColumn(Value.newBuilder().setString(colVal).build());
+ }
+ }
+ }
+ }
+ }
+
+ private void handleTableFormatResultSet(
+ ResultSetGroup resultSetGroup, List rowBuilderList) {
+ int resultSetGroupCount = resultSetGroup.getResultSetCount();
+ for (int i = 0; i < resultSetGroupCount; i++) {
+ ResultSet resultSet = resultSetGroup.getResultSet(i);
+ for (int rowIdx = 0; rowIdx < resultSet.getRowCount(); rowIdx++) {
+ Builder builder;
+ builder = Row.newBuilder();
+ rowBuilderList.add(builder);
+
+ for (int colIdx = 0; colIdx < resultSet.getColumnCount(); colIdx++) {
+ if (resultSet.getColumnName(colIdx).endsWith(ViewDefinition.MAP_KEYS_SUFFIX)) {
+ // Read the key and value column values. The columns should be side by side. That's how
+ // the Pinot query
+ // is structured
+ String mapKeys = resultSet.getString(rowIdx, colIdx);
+ String mapVals = resultSet.getString(rowIdx, colIdx + 1);
+ try {
+ builder.addColumn(
+ Value.newBuilder().setString(pinotMapConverter.merge(mapKeys, mapVals)).build());
+ } catch (IOException ex) {
+ LOG.error("An error occured while merging mapKeys and mapVals", ex);
+ throw new RuntimeException(
+ "An error occurred while parsing the Pinot Table format response", ex);
+ }
+ // advance colIdx by 1 since we have read 2 columns
+ colIdx++;
+ } else {
+ String val = resultSet.getString(rowIdx, colIdx);
+ builder.addColumn(Value.newBuilder().setString(val).build());
+ }
+ }
+ }
+ }
+ }
+
+ private void validateQueryRequest(QueryContext queryContext, QueryRequest request) {
+ // Validate QueryContext and tenant id presence
+ Preconditions.checkNotNull(queryContext);
+ Preconditions.checkNotNull(queryContext.getTenantId());
+
+ // Validate DISTINCT selections
+ if (request.getDistinctSelections()) {
+ boolean noGroupBy = request.getGroupByCount() == 0;
+ boolean noAggregations = request.getAggregationCount() == 0;
+ Preconditions.checkArgument(
+ noGroupBy && noAggregations,
+ "If distinct selections are requested, there should be no groupBys or aggregations.");
+ }
+ }
+}
diff --git a/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/PinotClientFactory.java b/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/PinotClientFactory.java
new file mode 100644
index 00000000..1e1fe613
--- /dev/null
+++ b/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/PinotClientFactory.java
@@ -0,0 +1,101 @@
+package org.hypertrace.core.query.service.pinot;
+
+import com.google.common.annotations.VisibleForTesting;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.Future;
+import org.apache.pinot.client.Connection;
+import org.apache.pinot.client.ConnectionFactory;
+import org.apache.pinot.client.PreparedStatement;
+import org.apache.pinot.client.Request;
+import org.apache.pinot.client.ResultSetGroup;
+
+/*
+ * Factory to create PinotClient based on given zookeeper path.
+ */
+public class PinotClientFactory {
+
+ private static final org.slf4j.Logger LOG =
+ org.slf4j.LoggerFactory.getLogger(PinotClientFactory.class);
+ // Singleton instance
+ private static final PinotClientFactory INSTANCE = new PinotClientFactory();
+
+ private final ConcurrentHashMap clientMap = new ConcurrentHashMap<>();
+
+ private PinotClientFactory() {}
+
+ // Create a Pinot Client.
+ public static PinotClient createPinotClient(String pinotCluster, String pathType, String path) {
+ if (!get().containsClient(pinotCluster)) {
+ synchronized (get()) {
+ if (!get().containsClient(pinotCluster)) {
+ get().addPinotClient(pinotCluster, new PinotClient(pathType, path));
+ }
+ }
+ }
+ return get().getPinotClient(pinotCluster);
+ }
+
+ public static PinotClientFactory get() {
+ return INSTANCE;
+ }
+
+ private void addPinotClient(String cluster, PinotClient client) {
+ this.clientMap.put(cluster, client);
+ }
+
+ public boolean containsClient(String clusterName) {
+ return this.clientMap.containsKey(clusterName);
+ }
+
+ public PinotClient getPinotClient(String clusterName) {
+ return this.clientMap.get(clusterName);
+ }
+
+ public static class PinotClient {
+ private static final String SQL_FORMAT = "sql";
+
+ private final Connection connection;
+
+ @VisibleForTesting
+ public PinotClient(Connection connection) {
+ this.connection = connection;
+ }
+
+ private PinotClient(String pathType, String path) {
+ switch (pathType.toLowerCase()) {
+ case "zk":
+ case "zookeeper":
+ LOG.info("Trying to create a Pinot client connected to Zookeeper: {}", path);
+ this.connection = ConnectionFactory.fromZookeeper(path);
+ break;
+ case "broker":
+ LOG.info("Trying to create a Pinot client with default brokerlist: {}", path);
+ this.connection = ConnectionFactory.fromHostList(path);
+ break;
+ default:
+ throw new RuntimeException("Unsupported Pinot Client scheme: " + pathType);
+ }
+ }
+
+ public ResultSetGroup executeQuery(String statement, Params params) {
+ PreparedStatement preparedStatement = buildPreparedStatement(statement, params);
+ return preparedStatement.execute();
+ }
+
+ public Future executeQueryAsync(String statement, Params params) {
+ PreparedStatement preparedStatement = buildPreparedStatement(statement, params);
+ return preparedStatement.executeAsync();
+ }
+
+ private PreparedStatement buildPreparedStatement(String statement, Params params) {
+ Request request = new Request(SQL_FORMAT, statement);
+ PreparedStatement preparedStatement = connection.prepareStatement(request);
+ params.getStringParams().forEach(preparedStatement::setString);
+ params.getIntegerParams().forEach(preparedStatement::setInt);
+ params.getLongParams().forEach(preparedStatement::setLong);
+ params.getDoubleParams().forEach(preparedStatement::setDouble);
+ params.getFloatParams().forEach(preparedStatement::setFloat);
+ return preparedStatement;
+ }
+ }
+}
diff --git a/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/PinotColumnSpec.java b/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/PinotColumnSpec.java
new file mode 100644
index 00000000..c3f06277
--- /dev/null
+++ b/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/PinotColumnSpec.java
@@ -0,0 +1,31 @@
+package org.hypertrace.core.query.service.pinot;
+
+import java.util.ArrayList;
+import java.util.List;
+import org.hypertrace.core.query.service.api.ValueType;
+
+public class PinotColumnSpec {
+
+ private final List columnNames;
+ private ValueType type;
+
+ public PinotColumnSpec() {
+ columnNames = new ArrayList<>();
+ }
+
+ public List getColumnNames() {
+ return columnNames;
+ }
+
+ public void addColumnName(String columnName) {
+ columnNames.add(columnName);
+ }
+
+ public ValueType getType() {
+ return type;
+ }
+
+ public void setType(ValueType type) {
+ this.type = type;
+ }
+}
diff --git a/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/PinotMapConverter.java b/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/PinotMapConverter.java
new file mode 100644
index 00000000..49662d82
--- /dev/null
+++ b/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/PinotMapConverter.java
@@ -0,0 +1,91 @@
+package org.hypertrace.core.query.service.pinot;
+
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.core.type.TypeReference;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class PinotMapConverter {
+ // This is how empty list is represented in Pinot
+ private static final String PINOT_EMPTY_LIST = "[\"\"]";
+ private static final Logger LOG = LoggerFactory.getLogger(PinotMapConverter.class);
+ private static final TypeReference> listOfString = new TypeReference<>() {};
+
+ private final ObjectMapper objectMapper;
+
+ public PinotMapConverter() {
+ this.objectMapper = new ObjectMapper();
+ }
+
+ String merge(String keyData, String valueData) throws IOException {
+ Map map = new HashMap<>();
+ // default should not be null
+ if (keyData == null || valueData == null) {
+ // throw IOException so that it can be caught be the caller and provide additional
+ // context
+ throw new IOException("Key Data or Value Data of this map is null.");
+ }
+
+ List keys;
+ if (PINOT_EMPTY_LIST.equals(keyData)) {
+ keys = new ArrayList<>();
+ } else {
+ try {
+ keys = objectMapper.readValue(keyData, listOfString);
+ } catch (IOException e) {
+ LOG.error(
+ "Failed to deserialize map's key to list of string object. Raw Json String: {}",
+ keyData);
+ throw e;
+ }
+ }
+
+ List values;
+ if (PINOT_EMPTY_LIST.equals(valueData)) {
+ values = new ArrayList<>();
+ } else {
+ try {
+ values = objectMapper.readValue(valueData, listOfString);
+ } catch (IOException e) {
+ LOG.error(
+ "Failed to deserialize map's value to list of string object. Raw Json String {}",
+ valueData);
+ throw e;
+ }
+ }
+
+ if (keys.size() != values.size()) {
+ LOG.warn(
+ "Keys and Values data size does not match. Data will be return based on the kyes"
+ + "Keys Size: {}, Values Size: {}",
+ keys.size(),
+ values.size());
+ // todo: make this debug once in production
+ LOG.info("Keys: {}, \n Values:{}", keys, values);
+ }
+
+ // If the size does not match, the key is driving the map data. Any excessive values
+ // will be dropped
+ for (int idx = 0; idx < keys.size(); idx++) {
+ if (idx < values.size()) {
+ map.put(keys.get(idx), values.get(idx));
+ } else {
+ // to handle unbalanced size
+ map.put(keys.get(idx), null);
+ }
+ }
+
+ try {
+ return objectMapper.writeValueAsString(map);
+ } catch (JsonProcessingException e) {
+ LOG.error("Unable to write the merged map as json. Raw Data: {}", map);
+ throw e;
+ }
+ }
+}
diff --git a/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/PinotQuery.java b/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/PinotQuery.java
new file mode 100644
index 00000000..6b5cdda6
--- /dev/null
+++ b/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/PinotQuery.java
@@ -0,0 +1,61 @@
+package org.hypertrace.core.query.service.pinot;
+
+import java.util.Collections;
+import java.util.Map;
+import org.apache.pinot.client.ResultSetGroup;
+import org.hypertrace.core.query.service.pinot.PinotClientFactory.PinotClient;
+
+/*
+ * PinotQuery provides basic interface for getting query response from Pinot.
+ */
+public abstract class PinotQuery {
+
+ private final String name;
+ private final PinotClient pinotClient;
+
+ public PinotQuery(String name, PinotClient pinotClient) {
+ this.name = name;
+ this.pinotClient = pinotClient;
+ }
+
+ public String getName() {
+ return this.name;
+ }
+
+ abstract String getQuery(Map args);
+
+ abstract T convertQueryResults(ResultSetGroup queryResults);
+
+ public T execute() {
+ return execute(Collections.emptyMap());
+ }
+
+ public T execute(Map args) {
+ final ResultSetGroup queryResults =
+ this.pinotClient.executeQuery(getQuery(args), Params.newBuilder().build());
+ return convertQueryResults(queryResults);
+ }
+
+ @Override
+ public int hashCode() {
+ int hash = 7;
+ hash = 31 * hash + (name == null ? 0 : name.hashCode());
+ hash = 31 * hash + (pinotClient == null ? 0 : pinotClient.hashCode());
+ return hash;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null) {
+ return false;
+ }
+ if (this.getClass() != o.getClass()) {
+ return false;
+ }
+ PinotQuery pq = (PinotQuery) o;
+ return (this.name.equals(pq.name) && this.pinotClient == pq.pinotClient);
+ }
+}
diff --git a/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/PinotResultAnalyzer.java b/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/PinotResultAnalyzer.java
new file mode 100644
index 00000000..62c91131
--- /dev/null
+++ b/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/PinotResultAnalyzer.java
@@ -0,0 +1,152 @@
+package org.hypertrace.core.query.service.pinot;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.RateLimiter;
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Map;
+import javax.annotation.Nonnull;
+import org.apache.pinot.client.ResultSet;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/** Discovers the map attributes indexes from Pinot Result Set */
+class PinotResultAnalyzer {
+ private static final Logger LOG = LoggerFactory.getLogger(PinotResultAnalyzer.class);
+
+ /* Stores the Map Attributes logical name to Physical Names */
+ private final Map mapLogicalNameToKeyIndex;
+ private final Map mapLogicalNameToValueIndex;
+
+ /* Stores the Non-Map Attributes logical name to Physical Name index */
+ private final Map logicalNameToPhysicalNameIndex;
+ private final ResultSet resultSet;
+ private final ViewDefinition viewDefinition;
+ private final Map attributeLogRateLimitter;
+ private final PinotMapConverter pinotMapConverter;
+
+ PinotResultAnalyzer(
+ ResultSet resultSet,
+ LinkedHashSet selectedAttributes,
+ ViewDefinition viewDefinition,
+ Map mapLogicalNameToKeyIndex,
+ Map mapLogicalNameToValueIndex,
+ Map logicalNameToPhysicalNameIndex) {
+ this.mapLogicalNameToKeyIndex = mapLogicalNameToKeyIndex;
+ this.mapLogicalNameToValueIndex = mapLogicalNameToValueIndex;
+ this.logicalNameToPhysicalNameIndex = logicalNameToPhysicalNameIndex;
+ this.resultSet = resultSet;
+ this.viewDefinition = viewDefinition;
+ this.attributeLogRateLimitter = new HashMap<>();
+ selectedAttributes.forEach(e -> attributeLogRateLimitter.put(e, RateLimiter.create(0.5)));
+ this.pinotMapConverter = new PinotMapConverter();
+ }
+
+ /** For each selected attributes build the map of logical name to result index. */
+ static PinotResultAnalyzer create(
+ ResultSet resultSet,
+ LinkedHashSet selectedAttributes,
+ ViewDefinition viewDefinition) {
+ Map mapLogicalNameToKeyIndex = new HashMap<>();
+ Map mapLogicalNameToValueIndex = new HashMap<>();
+ Map logicalNameToPhysicalNameIndex = new HashMap<>();
+
+ for (String logicalName : selectedAttributes) {
+ if (viewDefinition.isMap(logicalName)) {
+ String keyPhysicalName = viewDefinition.getKeyColumnNameForMap(logicalName);
+ String valuePhysicalName = viewDefinition.getValueColumnNameForMap(logicalName);
+ for (int colIndex = 0; colIndex < resultSet.getColumnCount(); colIndex++) {
+ String physName = resultSet.getColumnName(colIndex);
+ if (physName.equalsIgnoreCase(keyPhysicalName)) {
+ mapLogicalNameToKeyIndex.put(logicalName, colIndex);
+ } else if (physName.equalsIgnoreCase(valuePhysicalName)) {
+ mapLogicalNameToValueIndex.put(logicalName, colIndex);
+ }
+ }
+ } else {
+ List names = viewDefinition.getPhysicalColumnNames(logicalName);
+ Preconditions.checkArgument(names.size() == 1);
+ for (int colIndex = 0; colIndex < resultSet.getColumnCount(); colIndex++) {
+ String physName = resultSet.getColumnName(colIndex);
+ if (physName.equalsIgnoreCase(names.get(0))) {
+ logicalNameToPhysicalNameIndex.put(logicalName, colIndex);
+ break;
+ }
+ }
+ }
+ }
+ LOG.info("Map LogicalName to Key Index: {} ", mapLogicalNameToKeyIndex);
+ LOG.info("Map LogicalName to Value Index: {}", mapLogicalNameToValueIndex);
+ LOG.info("Attributes to Index: {}", logicalNameToPhysicalNameIndex);
+ return new PinotResultAnalyzer(
+ resultSet,
+ selectedAttributes,
+ viewDefinition,
+ mapLogicalNameToKeyIndex,
+ mapLogicalNameToValueIndex,
+ logicalNameToPhysicalNameIndex);
+ }
+
+ @VisibleForTesting
+ Integer getMapKeyIndex(String logicalName) {
+ return mapLogicalNameToKeyIndex.get(logicalName);
+ }
+
+ @VisibleForTesting
+ Integer getMapValueIndex(String logicalName) {
+ return mapLogicalNameToValueIndex.get(logicalName);
+ }
+
+ @VisibleForTesting
+ Integer getPhysicalColumnIndex(String logicalName) {
+ return logicalNameToPhysicalNameIndex.get(logicalName);
+ }
+
+ /**
+ * Gets the data from Result Set Row, will never null
+ *
+ * @throws IllegalStateException if index is missing for merging or there's an issue with the data
+ * format in Pinot
+ * @return merged map data if in correct format. Will never return null
+ */
+ @Nonnull
+ String getDataFromRow(int rowIndex, String logicalName) {
+
+ String result;
+ if (viewDefinition.isMap(logicalName)) {
+ Integer keyIndex = getMapKeyIndex(logicalName);
+ if (keyIndex == null) {
+ LOG.info("Map LogicalName to Key Index: {} ", mapLogicalNameToKeyIndex);
+ LOG.info("Attributes to Index: {}", logicalNameToPhysicalNameIndex);
+ throw new IllegalStateException(
+ "Unable to find the key index to attribute: " + logicalName);
+ }
+ String keyData = resultSet.getString(rowIndex, keyIndex);
+
+ String valueData = "";
+ Integer valueIndex = getMapValueIndex(logicalName);
+ if (valueIndex == null) {
+ if (attributeLogRateLimitter.get(logicalName).tryAcquire()) {
+ LOG.error("Unable to find the map value column index for Attribute: {}.", logicalName);
+ LOG.info("Map LogicalName to Value Index: {} ", mapLogicalNameToValueIndex);
+ LOG.info("Attributes to Index: {}", logicalNameToPhysicalNameIndex);
+ }
+ } else {
+ valueData = resultSet.getString(rowIndex, valueIndex);
+ }
+ try {
+ result = pinotMapConverter.merge(keyData, valueData);
+ } catch (IOException e) {
+ throw new IllegalStateException(
+ "Unable to merge the map data for attribute " + logicalName, e);
+ }
+ } else {
+ Integer colIndex = getPhysicalColumnIndex(logicalName);
+ result = resultSet.getString(rowIndex, colIndex);
+ }
+ return result;
+ }
+}
diff --git a/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/PinotUtils.java b/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/PinotUtils.java
new file mode 100644
index 00000000..8a4497af
--- /dev/null
+++ b/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/PinotUtils.java
@@ -0,0 +1,10 @@
+package org.hypertrace.core.query.service.pinot;
+
+public class PinotUtils {
+
+ public static String getZkPath(String zkBasePath, String pinotClusterName) {
+ return zkBasePath.endsWith("/")
+ ? zkBasePath + pinotClusterName
+ : zkBasePath + "/" + pinotClusterName;
+ }
+}
diff --git a/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/QueryRequestToPinotSQLConverter.java b/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/QueryRequestToPinotSQLConverter.java
new file mode 100644
index 00000000..6eeb7e9b
--- /dev/null
+++ b/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/QueryRequestToPinotSQLConverter.java
@@ -0,0 +1,365 @@
+package org.hypertrace.core.query.service.pinot;
+
+import static org.hypertrace.core.query.service.api.Expression.ValueCase.COLUMNIDENTIFIER;
+import static org.hypertrace.core.query.service.api.Expression.ValueCase.LITERAL;
+
+import com.google.common.base.Joiner;
+import java.util.AbstractMap.SimpleEntry;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Map.Entry;
+import org.hypertrace.core.query.service.QueryContext;
+import org.hypertrace.core.query.service.api.Expression;
+import org.hypertrace.core.query.service.api.Filter;
+import org.hypertrace.core.query.service.api.Function;
+import org.hypertrace.core.query.service.api.LiteralConstant;
+import org.hypertrace.core.query.service.api.Operator;
+import org.hypertrace.core.query.service.api.OrderByExpression;
+import org.hypertrace.core.query.service.api.QueryRequest;
+import org.hypertrace.core.query.service.api.SortOrder;
+import org.hypertrace.core.query.service.api.Value;
+import org.hypertrace.core.query.service.api.ValueType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/** Converts {@link QueryRequest} to Pinot SQL query */
+class QueryRequestToPinotSQLConverter {
+ private static final Logger LOG = LoggerFactory.getLogger(QueryRequestToPinotSQLConverter.class);
+
+ private static final String QUESTION_MARK = "?";
+ private static final String REGEX_OPERATOR = "REGEXP_LIKE";
+ private static final String MAP_VALUE = "mapValue";
+ private static final int MAP_KEY_INDEX = 0;
+ private static final int MAP_VALUE_INDEX = 1;
+
+ private ViewDefinition viewDefinition;
+ private Joiner joiner = Joiner.on(", ").skipNulls();
+
+ QueryRequestToPinotSQLConverter(ViewDefinition viewDefinition) {
+ this.viewDefinition = viewDefinition;
+ }
+
+ Entry toSQL(
+ QueryContext queryContext, QueryRequest request, LinkedHashSet allSelections) {
+ Params.Builder paramsBuilder = Params.newBuilder();
+ StringBuilder pqlBuilder = new StringBuilder("Select ");
+ String delim = "";
+
+ // Set the DISTINCT keyword if the request has set distinctSelections.
+ if (request.getDistinctSelections()) {
+ pqlBuilder.append("DISTINCT ");
+ }
+
+ // allSelections contain all the various expressions in QueryRequest that we want selections on.
+ // Group bys, selections and aggregations in that order. See RequestAnalyzer#analyze() to see
+ // how it is created.
+ for (Expression expr : allSelections) {
+ pqlBuilder.append(delim);
+ pqlBuilder.append(convertExpression2String(expr, paramsBuilder));
+ delim = ", ";
+ }
+
+ pqlBuilder.append(" FROM ").append(viewDefinition.getViewName());
+
+ // Add the tenantId filter
+ pqlBuilder.append(" WHERE ").append(viewDefinition.getTenantIdColumn()).append(" = ?");
+ paramsBuilder.addStringParam(queryContext.getTenantId());
+
+ if (request.hasFilter()) {
+ pqlBuilder.append(" AND ");
+ String filterClause = convertFilter2String(request.getFilter(), paramsBuilder);
+ pqlBuilder.append(filterClause);
+ }
+
+ if (request.getGroupByCount() > 0) {
+ pqlBuilder.append(" GROUP BY ");
+ delim = "";
+ for (Expression groupByExpression : request.getGroupByList()) {
+ pqlBuilder.append(delim);
+ pqlBuilder.append(convertExpression2String(groupByExpression, paramsBuilder));
+ delim = ", ";
+ }
+ }
+ if (!request.getOrderByList().isEmpty()) {
+ pqlBuilder.append(" ORDER BY ");
+ delim = "";
+ for (OrderByExpression orderByExpression : request.getOrderByList()) {
+ pqlBuilder.append(delim);
+ String orderBy = convertExpression2String(orderByExpression.getExpression(), paramsBuilder);
+ pqlBuilder.append(orderBy);
+ if (SortOrder.DESC.equals(orderByExpression.getOrder())) {
+ pqlBuilder.append(" desc ");
+ }
+ delim = ", ";
+ }
+ }
+ if (request.getLimit() > 0) {
+ if (request.getOffset() > 0) {
+ pqlBuilder
+ .append(" limit ")
+ .append(request.getOffset())
+ .append(", ")
+ .append(request.getLimit());
+ } else {
+ pqlBuilder.append(" limit ").append(request.getLimit());
+ }
+ }
+
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Converted QueryRequest to Pinot SQL: {}", pqlBuilder);
+ }
+ return new SimpleEntry<>(pqlBuilder.toString(), paramsBuilder.build());
+ }
+
+ private String convertFilter2String(Filter filter, Params.Builder paramsBuilder) {
+ StringBuilder builder = new StringBuilder();
+ String operator = convertOperator2String(filter.getOperator());
+ if (filter.getChildFilterCount() > 0) {
+ String delim = "";
+ builder.append("( ");
+ for (Filter childFilter : filter.getChildFilterList()) {
+ builder.append(delim);
+ builder.append(convertFilter2String(childFilter, paramsBuilder));
+ builder.append(" ");
+ delim = operator + " ";
+ }
+ builder.append(")");
+ } else {
+ switch (filter.getOperator()) {
+ case LIKE:
+ // The like operation in PQL looks like `regexp_like(lhs, rhs)`
+ builder.append(operator);
+ builder.append("(");
+ builder.append(convertExpression2String(filter.getLhs(), paramsBuilder));
+ builder.append(",");
+ builder.append(convertExpression2String(filter.getRhs(), paramsBuilder));
+ builder.append(")");
+ break;
+ case CONTAINS_KEY:
+ LiteralConstant[] kvp = convertExpressionToMapLiterals(filter.getRhs());
+ builder.append(convertExpressionToMapKeyColumn(filter.getLhs()));
+ builder.append(" = ");
+ builder.append(convertLiteralToString(kvp[MAP_KEY_INDEX], paramsBuilder));
+ break;
+ case CONTAINS_KEYVALUE:
+ kvp = convertExpressionToMapLiterals(filter.getRhs());
+ String keyCol = convertExpressionToMapKeyColumn(filter.getLhs());
+ String valCol = convertExpressionToMapValueColumn(filter.getLhs());
+ builder.append(keyCol);
+ builder.append(" = ");
+ builder.append(convertLiteralToString(kvp[MAP_KEY_INDEX], paramsBuilder));
+ builder.append(" AND ");
+ builder.append(valCol);
+ builder.append(" = ");
+ builder.append(convertLiteralToString(kvp[MAP_VALUE_INDEX], paramsBuilder));
+ builder.append(" AND ");
+ builder.append(MAP_VALUE);
+ builder.append("(");
+ builder.append(keyCol);
+ builder.append(",");
+ builder.append(convertLiteralToString(kvp[MAP_KEY_INDEX], paramsBuilder));
+ builder.append(",");
+ builder.append(valCol);
+ builder.append(") = ");
+ builder.append(convertLiteralToString(kvp[MAP_VALUE_INDEX], paramsBuilder));
+ break;
+ default:
+ builder.append(convertExpression2String(filter.getLhs(), paramsBuilder));
+ builder.append(" ");
+ builder.append(operator);
+ builder.append(" ");
+ builder.append(convertExpression2String(filter.getRhs(), paramsBuilder));
+ }
+ }
+ return builder.toString();
+ }
+
+ private String convertOperator2String(Operator operator) {
+ switch (operator) {
+ case AND:
+ return "AND";
+ case OR:
+ return "OR";
+ case NOT:
+ return "NOT";
+ case EQ:
+ return "=";
+ case NEQ:
+ return "!=";
+ case IN:
+ return "IN";
+ case NOT_IN:
+ return "NOT IN";
+ case GT:
+ return ">";
+ case LT:
+ return "<";
+ case GE:
+ return ">=";
+ case LE:
+ return "<=";
+ case LIKE:
+ return REGEX_OPERATOR;
+ case CONTAINS_KEY:
+ case CONTAINS_KEYVALUE:
+ return MAP_VALUE;
+ case RANGE:
+ throw new UnsupportedOperationException("RANGE NOT supported use >= and <=");
+ case UNRECOGNIZED:
+ default:
+ throw new UnsupportedOperationException("Unknown operator:" + operator);
+ }
+ }
+
+ private String convertExpression2String(Expression expression, Params.Builder paramsBuilder) {
+ switch (expression.getValueCase()) {
+ case COLUMNIDENTIFIER:
+ String logicalColumnName = expression.getColumnIdentifier().getColumnName();
+ // this takes care of the Map Type where it's split into 2 columns
+ List columnNames = viewDefinition.getPhysicalColumnNames(logicalColumnName);
+ return joiner.join(columnNames);
+ case LITERAL:
+ return convertLiteralToString(expression.getLiteral(), paramsBuilder);
+ case FUNCTION:
+ Function function = expression.getFunction();
+ String functionName = function.getFunctionName();
+ // For COUNT(column_name), Pinot sql format converts it to COUNT(*) and even only works with
+ // COUNT(*) for ORDER BY
+ if (functionName.equalsIgnoreCase("COUNT")) {
+ return functionName + "(*)";
+ }
+ List argumentsList = function.getArgumentsList();
+ String[] args = new String[argumentsList.size()];
+ for (int i = 0; i < argumentsList.size(); i++) {
+ Expression expr = argumentsList.get(i);
+ args[i] = convertExpression2String(expr, paramsBuilder);
+ }
+ return functionName + "(" + joiner.join(args) + ")";
+ case ORDERBY:
+ OrderByExpression orderBy = expression.getOrderBy();
+ return convertExpression2String(orderBy.getExpression(), paramsBuilder);
+ case VALUE_NOT_SET:
+ break;
+ }
+ return "";
+ }
+
+ private String convertExpressionToMapKeyColumn(Expression expression) {
+ if (expression.getValueCase() == COLUMNIDENTIFIER) {
+ String logicalColumnName = expression.getColumnIdentifier().getColumnName();
+ String col = viewDefinition.getKeyColumnNameForMap(logicalColumnName);
+ if (col != null && col.length() > 0) {
+ return col;
+ }
+ }
+ throw new IllegalArgumentException(
+ "operator CONTAINS_KEY/KEYVALUE supports multi value column only");
+ }
+
+ private String convertExpressionToMapValueColumn(Expression expression) {
+ if (expression.getValueCase() == COLUMNIDENTIFIER) {
+ String logicalColumnName = expression.getColumnIdentifier().getColumnName();
+ String col = viewDefinition.getValueColumnNameForMap(logicalColumnName);
+ if (col != null && col.length() > 0) {
+ return col;
+ }
+ }
+ throw new IllegalArgumentException(
+ "operator CONTAINS_KEY/KEYVALUE supports multi value column only");
+ }
+
+ private LiteralConstant[] convertExpressionToMapLiterals(Expression expression) {
+ LiteralConstant[] literals = new LiteralConstant[2];
+ if (expression.getValueCase() == LITERAL) {
+ LiteralConstant value = expression.getLiteral();
+ if (value.getValue().getValueType() == ValueType.STRING_ARRAY) {
+ for (int i = 0; i < 2 && i < value.getValue().getStringArrayCount(); i++) {
+ literals[i] =
+ LiteralConstant.newBuilder()
+ .setValue(
+ Value.newBuilder().setString(value.getValue().getStringArray(i)).build())
+ .build();
+ }
+ } else {
+ throw new IllegalArgumentException(
+ "operator CONTAINS_KEYVALUE supports "
+ + ValueType.STRING_ARRAY.name()
+ + " value type only");
+ }
+ }
+
+ for (int i = 0; i < literals.length; i++) {
+ if (literals[i] == null) {
+ literals[i] =
+ LiteralConstant.newBuilder().setValue(Value.newBuilder().setString("").build()).build();
+ }
+ }
+
+ return literals;
+ }
+
+ /** TODO:Handle all types */
+ private String convertLiteralToString(LiteralConstant literal, Params.Builder paramsBuilder) {
+ Value value = literal.getValue();
+ String ret = null;
+ switch (value.getValueType()) {
+ case STRING_ARRAY:
+ StringBuilder builder = new StringBuilder("(");
+ String delim = "";
+ for (String item : value.getStringArrayList()) {
+ builder.append(delim);
+ builder.append(QUESTION_MARK);
+ paramsBuilder.addStringParam(item);
+ delim = ", ";
+ }
+ builder.append(")");
+ ret = builder.toString();
+ break;
+ case LONG_ARRAY:
+ break;
+ case INT_ARRAY:
+ break;
+ case FLOAT_ARRAY:
+ break;
+ case DOUBLE_ARRAY:
+ break;
+ case BYTES_ARRAY:
+ break;
+ case BOOLEAN_ARRAY:
+ break;
+ case STRING:
+ ret = QUESTION_MARK;
+ paramsBuilder.addStringParam(value.getString());
+ break;
+ case LONG:
+ ret = QUESTION_MARK;
+ paramsBuilder.addLongParam(value.getLong());
+ break;
+ case INT:
+ ret = QUESTION_MARK;
+ paramsBuilder.addIntegerParam(value.getInt());
+ break;
+ case FLOAT:
+ ret = QUESTION_MARK;
+ paramsBuilder.addFloatParam(value.getFloat());
+ break;
+ case DOUBLE:
+ ret = QUESTION_MARK;
+ paramsBuilder.addDoubleParam(value.getDouble());
+ break;
+ case BYTES:
+ break;
+ case BOOL:
+ ret = QUESTION_MARK;
+ paramsBuilder.addStringParam(String.valueOf(value.getBoolean()));
+ break;
+ case TIMESTAMP:
+ ret = QUESTION_MARK;
+ paramsBuilder.addLongParam(value.getTimestamp());
+ break;
+ case UNRECOGNIZED:
+ break;
+ }
+ return ret;
+ }
+}
diff --git a/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/ResultSetTypePredicateProvider.java b/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/ResultSetTypePredicateProvider.java
new file mode 100644
index 00000000..5a8a4b21
--- /dev/null
+++ b/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/ResultSetTypePredicateProvider.java
@@ -0,0 +1,21 @@
+package org.hypertrace.core.query.service.pinot;
+
+import org.apache.pinot.client.ResultSet;
+
+/**
+ * This interface is used to determine which handler will parse the Pinot ResultSet in
+ * PinotBasedRequestHandler#convert(). We define it to make it easy to unit test the parsing logic
+ * since: - The implementations of ResultSet are package private and there's no way to determine the
+ * concrete type of the ResultSet object other than using the class name. See
+ * DefaultResultSetTypePredicateProvider class. - The ResultSet interface itself is implemented non
+ * uniformly by its implementations. The defined methods in the interface do not return consistent
+ * data across the implementations and the format of the implementations is different. - However,
+ * since it seems like for "sql" format the ResultTableResultSet is being returned for all Pinot
+ * query types we might be able to get rid of this in the future and have a single flow to parse the
+ * Pinot Response.
+ */
+public interface ResultSetTypePredicateProvider {
+ boolean isSelectionResultSetType(ResultSet resultSet);
+
+ boolean isResultTableResultSetType(ResultSet resultSet);
+}
diff --git a/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/ViewDefinition.java b/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/ViewDefinition.java
new file mode 100644
index 00000000..91b74a7c
--- /dev/null
+++ b/query-service-impl/src/main/java/org/hypertrace/core/query/service/pinot/ViewDefinition.java
@@ -0,0 +1,93 @@
+package org.hypertrace.core.query.service.pinot;
+
+import com.google.common.base.Preconditions;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+import org.hypertrace.core.query.service.api.ValueType;
+
+public class ViewDefinition {
+ static final String MAP_KEYS_SUFFIX = "__KEYS";
+ static final String MAP_VALUES_SUFFIX = "__VALUES";
+
+ private final String viewName;
+ private final Map columnSpecMap;
+ private final String tenantColumnName;
+
+ public ViewDefinition(
+ String viewName, Map columnSpecMap, String tenantColumnName) {
+ this.viewName = viewName;
+ this.columnSpecMap = columnSpecMap;
+ this.tenantColumnName = tenantColumnName;
+ }
+
+ public static ViewDefinition parse(Map config, String tenantColumnName) {
+ String viewName = (String) config.get("viewName");
+ Map columnSpecMap = new HashMap<>();
+ final Map fieldMap = (Map) config.get("fieldMap");
+ // todo: refactor to use attr service
+ final List mapFieldsList = ((List) config.get("mapFields"));
+ Set mapFields = new HashSet<>();
+ if (mapFieldsList != null) {
+ mapFields.addAll(mapFieldsList);
+ }
+
+ for (String logicalName : fieldMap.keySet()) {
+ String physName = fieldMap.get(logicalName);
+ PinotColumnSpec spec = new PinotColumnSpec();
+ // todo: replace this with call to attribute service
+ if (mapFields.contains(fieldMap.get(logicalName))) {
+ spec.setType(ValueType.STRING_MAP);
+ // split them to 2 automatically here
+ spec.addColumnName(physName + MAP_KEYS_SUFFIX);
+ spec.addColumnName(physName + MAP_VALUES_SUFFIX);
+ } else {
+ spec.addColumnName(physName);
+ spec.setType(ValueType.STRING);
+ }
+ columnSpecMap.put(logicalName, spec);
+ }
+ return new ViewDefinition(viewName, columnSpecMap, tenantColumnName);
+ }
+
+ public String getViewName() {
+ return viewName;
+ }
+
+ public String getTenantIdColumn() {
+ return tenantColumnName;
+ }
+
+ public boolean containsColumn(String referencedColumn) {
+ return columnSpecMap.containsKey(referencedColumn);
+ }
+
+ public List getPhysicalColumnNames(String logicalColumnName) {
+ return columnSpecMap.get(logicalColumnName).getColumnNames();
+ }
+
+ public boolean isMap(String logicalName) {
+ return (ValueType.STRING_MAP.equals(columnSpecMap.get(logicalName).getType()));
+ }
+
+ public String getKeyColumnNameForMap(String logicalName) {
+ List keys = findPyhsicalNameWithSuffix(logicalName, MAP_KEYS_SUFFIX);
+ Preconditions.checkArgument(keys.size() <= 1);
+ return keys.isEmpty() ? null : keys.get(0);
+ }
+
+ public String getValueColumnNameForMap(String logicalName) {
+ List keys = findPyhsicalNameWithSuffix(logicalName, MAP_VALUES_SUFFIX);
+ Preconditions.checkArgument(keys.size() <= 1);
+ return keys.isEmpty() ? null : keys.get(0);
+ }
+
+ private List findPyhsicalNameWithSuffix(String logicalName, String suffix) {
+ return columnSpecMap.get(logicalName).getColumnNames().stream()
+ .filter(e -> e.toUpperCase().endsWith(suffix))
+ .collect(Collectors.toList());
+ }
+}
diff --git a/query-service-impl/src/test/java/org/hypertrace/core/query/service/QueryRequestBuilderUtils.java b/query-service-impl/src/test/java/org/hypertrace/core/query/service/QueryRequestBuilderUtils.java
new file mode 100644
index 00000000..2b59c585
--- /dev/null
+++ b/query-service-impl/src/test/java/org/hypertrace/core/query/service/QueryRequestBuilderUtils.java
@@ -0,0 +1,32 @@
+package org.hypertrace.core.query.service;
+
+import org.hypertrace.core.query.service.api.ColumnIdentifier;
+import org.hypertrace.core.query.service.api.Expression;
+import org.hypertrace.core.query.service.api.Function;
+import org.hypertrace.core.query.service.api.OrderByExpression;
+import org.hypertrace.core.query.service.api.SortOrder;
+
+public class QueryRequestBuilderUtils {
+ public static Expression.Builder createColumnExpression(String columnName) {
+ return Expression.newBuilder()
+ .setColumnIdentifier(ColumnIdentifier.newBuilder().setColumnName(columnName));
+ }
+
+ public static Expression.Builder createFunctionExpression(
+ String functionName, String columnNameArg, String alias) {
+ return Expression.newBuilder()
+ .setFunction(
+ Function.newBuilder()
+ .setAlias(alias)
+ .setFunctionName(functionName)
+ .addArguments(
+ Expression.newBuilder()
+ .setColumnIdentifier(
+ ColumnIdentifier.newBuilder().setColumnName(columnNameArg))));
+ }
+
+ public static OrderByExpression.Builder createOrderByExpression(
+ Expression.Builder expression, SortOrder sortOrder) {
+ return OrderByExpression.newBuilder().setExpression(expression).setOrder(sortOrder);
+ }
+}
diff --git a/query-service-impl/src/test/java/org/hypertrace/core/query/service/QueryServiceImplConfigTest.java b/query-service-impl/src/test/java/org/hypertrace/core/query/service/QueryServiceImplConfigTest.java
new file mode 100644
index 00000000..cb617df2
--- /dev/null
+++ b/query-service-impl/src/test/java/org/hypertrace/core/query/service/QueryServiceImplConfigTest.java
@@ -0,0 +1,166 @@
+package org.hypertrace.core.query.service;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+import com.typesafe.config.Config;
+import com.typesafe.config.ConfigFactory;
+import java.io.File;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.stream.Collectors;
+import org.hypertrace.core.query.service.QueryServiceImplConfig.ClientConfig;
+import org.hypertrace.core.query.service.QueryServiceImplConfig.RequestHandlerConfig;
+import org.hypertrace.core.query.service.api.ColumnIdentifier;
+import org.hypertrace.core.query.service.api.Expression;
+import org.hypertrace.core.query.service.api.Filter;
+import org.hypertrace.core.query.service.api.LiteralConstant;
+import org.hypertrace.core.query.service.api.Operator;
+import org.hypertrace.core.query.service.api.QueryRequest;
+import org.hypertrace.core.query.service.api.QueryRequest.Builder;
+import org.hypertrace.core.query.service.api.Value;
+import org.hypertrace.core.query.service.pinot.PinotBasedRequestHandler;
+import org.hypertrace.core.query.service.pinot.ViewDefinition;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class QueryServiceImplConfigTest {
+
+ private static final Logger LOGGER = LoggerFactory.getLogger(QueryServiceImplConfigTest.class);
+ private Config appConfig;
+ private QueryServiceImplConfig queryServiceConfig;
+
+ @BeforeEach
+ public void setup() {
+ appConfig =
+ ConfigFactory.parseFile(
+ new File(
+ QueryServiceImplConfigTest.class
+ .getClassLoader()
+ .getResource("application.conf")
+ .getPath()));
+ queryServiceConfig = QueryServiceImplConfig.parse(appConfig.getConfig("service.config"));
+ }
+
+ @Test
+ public void testQueryServiceImplConfigParser() {
+ // Test QueryServiceImplConfig
+ assertEquals("query-service", appConfig.getString("service.name"));
+ assertEquals(8091, appConfig.getInt("service.admin.port"));
+ assertEquals(8090, appConfig.getInt("service.port"));
+ assertEquals(2, queryServiceConfig.getQueryRequestHandlersConfig().size());
+ assertEquals(2, queryServiceConfig.getClients().size());
+ assertEquals("tenant_id", queryServiceConfig.getTenantColumnName());
+
+ LOGGER.info("{}", queryServiceConfig.getQueryRequestHandlersConfig());
+
+ RequestHandlerConfig handler0 =
+ RequestHandlerConfig.parse(queryServiceConfig.getQueryRequestHandlersConfig().get(0));
+ assertEquals("piontCluster0", handler0.getName());
+ assertEquals("pinot", handler0.getType());
+ Map requestHandlerInfo = handler0.getRequestHandlerInfo();
+ LOGGER.info("{}", requestHandlerInfo);
+
+ String tenantColumnName = "tenant_id";
+ ViewDefinition viewDefinition =
+ ViewDefinition.parse((Map) requestHandlerInfo.get("viewDefinition"), tenantColumnName);
+ assertEquals("RawTraceView", viewDefinition.getViewName());
+ assertEquals(tenantColumnName, viewDefinition.getTenantIdColumn());
+
+ Map clientConfigMap =
+ queryServiceConfig.getClients().stream()
+ .map(ClientConfig::parse)
+ .collect(Collectors.toMap(ClientConfig::getType, clientConfig -> clientConfig));
+ ClientConfig clientConfig0 = clientConfigMap.get(handler0.getClientConfig());
+ assertEquals("broker", clientConfig0.getType());
+ assertEquals("pinotCluster0:8099", clientConfig0.getConnectionString());
+
+ RequestHandlerConfig handler1 =
+ RequestHandlerConfig.parse(queryServiceConfig.getQueryRequestHandlersConfig().get(1));
+ assertEquals("span-event-view-handler", handler1.getName());
+ assertEquals("pinot", handler1.getType());
+ ClientConfig clientConfig1 = clientConfigMap.get(handler1.getClientConfig());
+ assertEquals("zookeeper", clientConfig1.getType());
+ assertEquals("pinotCluster1:2181", clientConfig1.getConnectionString());
+ }
+
+ @Test
+ public void testHandlerSelection() {
+ // Register all the handlers with the registry.
+ for (Config config : queryServiceConfig.getQueryRequestHandlersConfig()) {
+ RequestHandlerConfig handlerConfig = RequestHandlerConfig.parse(config);
+ Map requestHandlerInfoConf = new HashMap<>();
+
+ String tenantColumnName = "tenant_id";
+ ViewDefinition viewDefinition =
+ ViewDefinition.parse(
+ (Map)
+ handlerConfig
+ .getRequestHandlerInfo()
+ .get(PinotBasedRequestHandler.VIEW_DEFINITION_CONFIG_KEY),
+ tenantColumnName);
+ assertEquals(tenantColumnName, viewDefinition.getTenantIdColumn());
+ requestHandlerInfoConf.put(
+ PinotBasedRequestHandler.VIEW_DEFINITION_CONFIG_KEY, viewDefinition);
+ RequestHandlerRegistry.get()
+ .register(
+ handlerConfig.getName(),
+ new RequestHandlerInfo(
+ handlerConfig.getName(), PinotBasedRequestHandler.class, requestHandlerInfoConf));
+ }
+
+ RequestHandlerSelector selector = new RequestHandlerSelector(RequestHandlerRegistry.get());
+
+ QueryRequest queryRequest = buildSimpleQuery();
+ RequestAnalyzer analyzer = new RequestAnalyzer(queryRequest);
+ analyzer.analyze();
+ RequestHandler handler = selector.select(queryRequest, analyzer);
+ assertEquals("span-event-view-handler", handler.getName());
+ }
+
+ private QueryRequest buildSimpleQuery() {
+ Builder builder = QueryRequest.newBuilder();
+ ColumnIdentifier eventId = ColumnIdentifier.newBuilder().setColumnName("EVENT.id").build();
+ builder.addSelection(Expression.newBuilder().setColumnIdentifier(eventId).build());
+
+ ColumnIdentifier eventType = ColumnIdentifier.newBuilder().setColumnName("EVENT.type").build();
+ builder.addSelection(Expression.newBuilder().setColumnIdentifier(eventType).build());
+
+ ColumnIdentifier displayName =
+ ColumnIdentifier.newBuilder().setColumnName("EVENT.displaySpanName").build();
+ builder.addSelection(Expression.newBuilder().setColumnIdentifier(displayName).build());
+
+ ColumnIdentifier tags = ColumnIdentifier.newBuilder().setColumnName("EVENT.spanTags").build();
+ builder.addSelection(Expression.newBuilder().setColumnIdentifier(tags).build());
+
+ Filter startTimeFilter =
+ createTimeFilter(
+ "EVENT.startTime", Operator.GT, System.currentTimeMillis() - 1000 * 60 * 60 * 24);
+ Filter endTimeFilter =
+ createTimeFilter("EVENT.endTime", Operator.LT, System.currentTimeMillis());
+
+ Filter andFilter =
+ Filter.newBuilder()
+ .setOperator(Operator.AND)
+ .addChildFilter(startTimeFilter)
+ .addChildFilter(endTimeFilter)
+ .build();
+ builder.setFilter(andFilter);
+ return builder.build();
+ }
+
+ private Filter createTimeFilter(String columnName, Operator op, long value) {
+
+ ColumnIdentifier startTimeColumn =
+ ColumnIdentifier.newBuilder().setColumnName(columnName).build();
+ Expression lhs = Expression.newBuilder().setColumnIdentifier(startTimeColumn).build();
+
+ LiteralConstant constant =
+ LiteralConstant.newBuilder()
+ .setValue(Value.newBuilder().setString(String.valueOf(value)).build())
+ .build();
+ Expression rhs = Expression.newBuilder().setLiteral(constant).build();
+ return Filter.newBuilder().setLhs(lhs).setOperator(op).setRhs(rhs).build();
+ }
+}
diff --git a/query-service-impl/src/test/java/org/hypertrace/core/query/service/QueryServiceImplTest.java b/query-service-impl/src/test/java/org/hypertrace/core/query/service/QueryServiceImplTest.java
new file mode 100644
index 00000000..70ab59ea
--- /dev/null
+++ b/query-service-impl/src/test/java/org/hypertrace/core/query/service/QueryServiceImplTest.java
@@ -0,0 +1,332 @@
+package org.hypertrace.core.query.service;
+
+import com.google.common.collect.Lists;
+import io.grpc.Deadline;
+import io.grpc.ManagedChannel;
+import io.grpc.ManagedChannelBuilder;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+import org.hypertrace.core.query.service.api.ColumnIdentifier;
+import org.hypertrace.core.query.service.api.Expression;
+import org.hypertrace.core.query.service.api.Filter;
+import org.hypertrace.core.query.service.api.Function;
+import org.hypertrace.core.query.service.api.LiteralConstant;
+import org.hypertrace.core.query.service.api.Operator;
+import org.hypertrace.core.query.service.api.QueryRequest;
+import org.hypertrace.core.query.service.api.QueryRequest.Builder;
+import org.hypertrace.core.query.service.api.QueryServiceGrpc;
+import org.hypertrace.core.query.service.api.QueryServiceGrpc.QueryServiceBlockingStub;
+import org.hypertrace.core.query.service.api.ResultSetChunk;
+import org.hypertrace.core.query.service.api.Value;
+import org.hypertrace.core.query.service.util.QueryRequestUtil;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Disabled;
+import org.junit.jupiter.api.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class QueryServiceImplTest {
+ private static final Logger LOGGER = LoggerFactory.getLogger(QueryServiceImplTest.class);
+
+ @Test
+ public void testQueryServiceImplInitialization() {
+ QueryServiceImplConfig queryServiceConfig = new QueryServiceImplConfig();
+ queryServiceConfig.setTenantColumnName("tenant_id");
+ queryServiceConfig.setClients(List.of());
+ queryServiceConfig.setQueryRequestHandlersConfig(List.of());
+
+ QueryServiceImpl queryServiceImpl = new QueryServiceImpl(queryServiceConfig);
+ Assertions.assertNotNull(queryServiceImpl);
+ }
+
+ @Test
+ public void testBlankTenantColumnNameThrowsException() {
+ // Empty tenant id column name
+ QueryServiceImplConfig queryServiceConfig = new QueryServiceImplConfig();
+ queryServiceConfig.setTenantColumnName("");
+ queryServiceConfig.setClients(List.of());
+
+ Assertions.assertThrows(
+ RuntimeException.class,
+ () -> new QueryServiceImpl(queryServiceConfig),
+ "Tenant column name is not defined. Need to set service.config.tenantColumnName in the application config.");
+
+ // null tenant id column name
+ QueryServiceImplConfig queryServiceConfig1 = new QueryServiceImplConfig();
+ queryServiceConfig1.setTenantColumnName(null);
+ queryServiceConfig1.setClients(List.of());
+
+ Assertions.assertThrows(
+ RuntimeException.class,
+ () -> new QueryServiceImpl(queryServiceConfig1),
+ "Tenant column name is not defined. Need to set service.config.tenantColumnName in the application config.");
+
+ // whitespace tenant id column name
+ QueryServiceImplConfig queryServiceConfig2 = new QueryServiceImplConfig();
+ queryServiceConfig2.setTenantColumnName(" ");
+ queryServiceConfig2.setClients(List.of());
+
+ Assertions.assertThrows(
+ RuntimeException.class,
+ () -> new QueryServiceImpl(queryServiceConfig2),
+ "Tenant column name is not defined. Need to set service.config.tenantColumnName in the application config.");
+ }
+
+ // works with query service running at localhost
+ @Disabled
+ public void testGrpc() {
+ ManagedChannel managedChannel =
+ ManagedChannelBuilder.forAddress("localhost", 8090).usePlaintext().build();
+ QueryServiceBlockingStub QueryServiceBlockingStub =
+ QueryServiceGrpc.newBlockingStub(managedChannel);
+
+ ArrayList queryRequests =
+ Lists.newArrayList(
+ buildSimpleQuery(),
+ buildAggQuery(),
+ buildGroupByAggQuery(),
+ buildGroupByTimeRollupAggQuery());
+
+ for (QueryRequest queryRequest : queryRequests) {
+ LOGGER.info("Trying to send request {}", queryRequest);
+ Iterator resultSetChunkIterator =
+ QueryServiceBlockingStub.withDeadline(Deadline.after(15, TimeUnit.SECONDS))
+ .execute(queryRequest);
+ LOGGER.info("Got response back: {}", resultSetChunkIterator);
+ while (resultSetChunkIterator.hasNext()) {
+ LOGGER.info("{}", resultSetChunkIterator.next());
+ }
+ }
+ }
+
+ @Disabled
+ public void testGrpcMap() {
+ ManagedChannel managedChannel =
+ ManagedChannelBuilder.forAddress("localhost", 8090).usePlaintext().build();
+ QueryServiceBlockingStub QueryServiceBlockingStub =
+ QueryServiceGrpc.newBlockingStub(managedChannel);
+
+ ArrayList queryRequests = Lists.newArrayList(buildSimpleMapQuery());
+
+ for (QueryRequest queryRequest : queryRequests) {
+ LOGGER.info("Trying to send request {}", queryRequest);
+ Iterator resultSetChunkIterator =
+ QueryServiceBlockingStub.withDeadline(Deadline.after(25, TimeUnit.SECONDS))
+ .execute(queryRequest);
+ LOGGER.info("Got response back: {}", resultSetChunkIterator);
+ while (resultSetChunkIterator.hasNext()) {
+ LOGGER.info(" Result {}", resultSetChunkIterator.next());
+ }
+ }
+ }
+
+ private QueryRequest buildSimpleQuery() {
+ Builder builder = QueryRequest.newBuilder();
+ ColumnIdentifier spanId = ColumnIdentifier.newBuilder().setColumnName("EVENT.id").build();
+ builder.addSelection(Expression.newBuilder().setColumnIdentifier(spanId).build());
+
+ Filter startTimeFilter =
+ createTimeFilter(
+ "EVENT.start_time_millis",
+ Operator.GT,
+ System.currentTimeMillis() - 1000 * 60 * 60 * 24);
+ Filter endTimeFilter =
+ createTimeFilter("EVENT.end_time_millis", Operator.LT, System.currentTimeMillis());
+
+ Filter andFilter =
+ Filter.newBuilder()
+ .setOperator(Operator.AND)
+ .addChildFilter(startTimeFilter)
+ .addChildFilter(endTimeFilter)
+ .build();
+ builder.setFilter(andFilter);
+
+ return builder.build();
+ }
+
+ private QueryRequest buildGroupByAggQuery() {
+ Builder builder = QueryRequest.newBuilder();
+ builder.addAggregation(QueryRequestUtil.createCountByColumnSelection("EVENT.id"));
+
+ Filter startTimeFilter =
+ createTimeFilter(
+ "EVENT.start_time_millis",
+ Operator.GT,
+ System.currentTimeMillis() - 1000 * 60 * 60 * 24);
+ Filter endTimeFilter =
+ createTimeFilter("EVENT.end_time_millis", Operator.LT, System.currentTimeMillis());
+
+ Filter andFilter =
+ Filter.newBuilder()
+ .setOperator(Operator.AND)
+ .addChildFilter(startTimeFilter)
+ .addChildFilter(endTimeFilter)
+ .build();
+ builder.setFilter(andFilter);
+
+ builder.addGroupBy(
+ Expression.newBuilder()
+ .setColumnIdentifier(ColumnIdentifier.newBuilder().setColumnName("EVENT.displaySpanName").build()));
+ return builder.build();
+ }
+
+ private QueryRequest buildGroupByTimeRollupAggQuery() {
+ Builder builder = QueryRequest.newBuilder();
+ builder.addAggregation(QueryRequestUtil.createCountByColumnSelection("EVENT.id"));
+
+ Filter startTimeFilter =
+ createTimeFilter(
+ "EVENT.start_time_millis",
+ Operator.GT,
+ System.currentTimeMillis() - 1000 * 60 * 60 * 24);
+ Filter endTimeFilter =
+ createTimeFilter("EVENT.end_time_millis", Operator.LT, System.currentTimeMillis());
+
+ Filter andFilter =
+ Filter.newBuilder()
+ .setOperator(Operator.AND)
+ .addChildFilter(startTimeFilter)
+ .addChildFilter(endTimeFilter)
+ .build();
+ builder.setFilter(andFilter);
+
+ Function groupByTimeUdf =
+ Function.newBuilder()
+ .setFunctionName("dateTimeConvert")
+ .addArguments(
+ Expression.newBuilder()
+ .setColumnIdentifier(
+ ColumnIdentifier.newBuilder()
+ .setColumnName("EVENT.start_time_millis")
+ .build()))
+ .addArguments(
+ Expression.newBuilder()
+ .setLiteral(
+ LiteralConstant.newBuilder()
+ .setValue(
+ Value.newBuilder().setString("1:MILLISECONDS:EPOCH").build())))
+ .addArguments(
+ Expression.newBuilder()
+ .setLiteral(
+ LiteralConstant.newBuilder()
+ .setValue(
+ Value.newBuilder().setString("1:MILLISECONDS:EPOCH").build())))
+ .addArguments(
+ Expression.newBuilder()
+ .setLiteral(
+ LiteralConstant.newBuilder()
+ .setValue(Value.newBuilder().setString("30:SECONDS").build())))
+ .build();
+ builder.addGroupBy(Expression.newBuilder().setFunction(groupByTimeUdf).build());
+ return builder.build();
+ }
+
+ private QueryRequest buildSimpleMapQuery() {
+ Builder builder = QueryRequest.newBuilder();
+ ColumnIdentifier spanId =
+ ColumnIdentifier.newBuilder().setColumnName("EVENT.id").setAlias("SpanIds").build();
+ builder.addSelection(Expression.newBuilder().setColumnIdentifier(spanId).build());
+ builder.addSelection(createSelection("EVENT.end_time_millis"));
+ builder.addSelection(createSelection("EVENT.displaySpanName"));
+ builder.addSelection(createSelection("EVENT.attributes.request_body"));
+ builder.addSelection(createSelection("EVENT.attributes.protocol_name"));
+ builder.addSelection(createSelection("EVENT.attributes.request_headers"));
+ builder.addSelection(createSelection("EVENT.attributes.response_headers"));
+
+ builder.addSelection(createSelection("EVENT.start_time_millis"));
+ builder.addSelection(createSelection("EVENT.metrics.duration_millis"));
+ builder.addSelection(createSelection("Service.name"));
+ builder.addSelection(createSelection("EVENT.attributes.response_body"));
+ builder.addSelection(createSelection("EVENT.attributes.parent_span_id"));
+
+ Filter startTimeFilter =
+ createTimeFilter(
+ "EVENT.start_time_millis",
+ Operator.GT,
+ System.currentTimeMillis() - 1000 * 60 * 60 * 24);
+ Filter endTimeFilter =
+ createTimeFilter("EVENT.end_time_millis", Operator.LT, System.currentTimeMillis());
+
+ Filter andFilter =
+ Filter.newBuilder()
+ .setOperator(Operator.AND)
+ .addChildFilter(startTimeFilter)
+ .addChildFilter(endTimeFilter)
+ .build();
+
+ builder.setFilter(andFilter);
+
+ return builder.build();
+ }
+
+ private QueryRequest buildAggQuery() {
+ Builder builder = QueryRequest.newBuilder();
+ Function maxStartTime =
+ Function.newBuilder()
+ .setFunctionName("MAX")
+ .addArguments(
+ Expression.newBuilder()
+ .setColumnIdentifier(
+ ColumnIdentifier.newBuilder()
+ .setColumnName("EVENT.start_time_millis")
+ .build()))
+ .setAlias("MAX_start_time_millis")
+ .build();
+
+ builder.addSelection(createSelection("EVENT.attributes.request_headers"));
+ builder.addSelection(Expression.newBuilder().setFunction(maxStartTime).build());
+
+ Filter startTimeFilter =
+ createTimeFilter(
+ "EVENT.start_time_millis",
+ Operator.GT,
+ System.currentTimeMillis() - 1000 * 60 * 60 * 24);
+ Filter endTimeFilter =
+ createTimeFilter("EVENT.end_time_millis", Operator.LT, System.currentTimeMillis());
+
+ Filter andFilter =
+ Filter.newBuilder()
+ .setOperator(Operator.AND)
+ .addChildFilter(startTimeFilter)
+ .addChildFilter(endTimeFilter)
+ .build();
+ builder.setFilter(andFilter);
+
+ return builder.build();
+ }
+
+ private Expression createSelection(String colName) {
+ return Expression.newBuilder()
+ .setColumnIdentifier(ColumnIdentifier.newBuilder().setColumnName(colName))
+ .build();
+ }
+
+ private Filter createTimeFilter(String columnName, Operator op, long value) {
+
+ ColumnIdentifier startTimeColumn =
+ ColumnIdentifier.newBuilder().setColumnName(columnName).build();
+ Expression lhs = Expression.newBuilder().setColumnIdentifier(startTimeColumn).build();
+
+ // TODO: Why is this not LONG
+ LiteralConstant constant =
+ LiteralConstant.newBuilder()
+ .setValue(Value.newBuilder().setString(String.valueOf(value)).build())
+ .build();
+ Expression rhs = Expression.newBuilder().setLiteral(constant).build();
+ return Filter.newBuilder().setLhs(lhs).setOperator(op).setRhs(rhs).build();
+ }
+
+ private Filter createStringColumnFilter(String columnName, Operator op, String value) {
+ ColumnIdentifier column = ColumnIdentifier.newBuilder().setColumnName(columnName).build();
+ Expression lhs = Expression.newBuilder().setColumnIdentifier(column).build();
+
+ LiteralConstant constant =
+ LiteralConstant.newBuilder()
+ .setValue(Value.newBuilder().setString(String.valueOf(value)).build())
+ .build();
+ Expression rhs = Expression.newBuilder().setLiteral(constant).build();
+ return Filter.newBuilder().setLhs(lhs).setOperator(op).setRhs(rhs).build();
+ }
+}
diff --git a/query-service-impl/src/test/java/org/hypertrace/core/query/service/pinot/PinotBasedRequestHandlerTest.java b/query-service-impl/src/test/java/org/hypertrace/core/query/service/pinot/PinotBasedRequestHandlerTest.java
new file mode 100644
index 00000000..cfac7981
--- /dev/null
+++ b/query-service-impl/src/test/java/org/hypertrace/core/query/service/pinot/PinotBasedRequestHandlerTest.java
@@ -0,0 +1,333 @@
+package org.hypertrace.core.query.service.pinot;
+
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import java.io.IOException;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Map;
+import org.apache.pinot.client.ResultSet;
+import org.apache.pinot.client.ResultSetGroup;
+import org.hypertrace.core.query.service.QueryContext;
+import org.hypertrace.core.query.service.QueryRequestBuilderUtils;
+import org.hypertrace.core.query.service.QueryResultCollector;
+import org.hypertrace.core.query.service.RequestAnalyzer;
+import org.hypertrace.core.query.service.api.QueryRequest;
+import org.hypertrace.core.query.service.api.ResultSetChunk;
+import org.hypertrace.core.query.service.api.Row;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+
+public class PinotBasedRequestHandlerTest {
+ // Test subject
+ private PinotBasedRequestHandler pinotBasedRequestHandler;
+ private final ObjectMapper objectMapper = new ObjectMapper();
+
+ @BeforeEach
+ public void setUp() {
+ // Mocks
+ PinotClientFactory pinotClientFactoryMock = mock(PinotClientFactory.class);
+ ResultSetTypePredicateProvider resultSetTypePredicateProviderMock = mock(
+ ResultSetTypePredicateProvider.class);
+ pinotBasedRequestHandler =
+ new PinotBasedRequestHandler(resultSetTypePredicateProviderMock, pinotClientFactoryMock);
+
+ // Test ResultTableResultSet result set format parsing
+ when(resultSetTypePredicateProviderMock.isSelectionResultSetType(any(ResultSet.class)))
+ .thenReturn(false);
+ when(resultSetTypePredicateProviderMock.isResultTableResultSetType(any(ResultSet.class)))
+ .thenReturn(true);
+ }
+
+ @Test
+ public void testConvertSimpleSelectionsQueryResultSet() throws IOException {
+ String[][] resultTable =
+ new String[][] {
+ {"operation-name-0", "service-name-0", "70", "80"},
+ {"operation-name-1", "service-name-1", "71", "79"},
+ {"operation-name-2", "service-name-2", "72", "78"},
+ {"operation-name-3", "service-name-3", "73", "77"}
+ };
+ List columnNames = List.of("operation_name", "service_name", "start_time_millis", "duration");
+ ResultSet resultSet = mockResultSet(4, 4, columnNames, resultTable);
+ ResultSetGroup resultSetGroup = mockResultSetGroup(List.of(resultSet));
+ TestQueryResultCollector testQueryResultCollector = new TestQueryResultCollector();
+
+ pinotBasedRequestHandler.convert(
+ resultSetGroup, testQueryResultCollector, new LinkedHashSet<>());
+
+ verifyResponseRows(testQueryResultCollector, resultTable);
+ }
+
+ @Test
+ public void testConvertAggregationColumnsQueryResultSet() throws IOException {
+ String[][] resultTable =
+ new String[][] {
+ {"operation-name-10", "110", "40", "21"},
+ {"operation-name-11", "111", "41", "22"},
+ {"operation-name-12", "112", "42", "23"},
+ {"operation-name-13", "113", "43", "24"}
+ };
+ List columnNames = List.of("operation_name", "avg(duration)", "count(*)", "max(duration)");
+ ResultSet resultSet = mockResultSet(4, 4, columnNames, resultTable);
+ ResultSetGroup resultSetGroup = mockResultSetGroup(List.of(resultSet));
+ TestQueryResultCollector testQueryResultCollector = new TestQueryResultCollector();
+
+ pinotBasedRequestHandler.convert(
+ resultSetGroup, testQueryResultCollector, new LinkedHashSet<>());
+
+ verifyResponseRows(testQueryResultCollector, resultTable);
+ }
+
+ @Test
+ public void testConvertSelectionsWithMapKeysAndValuesQueryResultSet() throws IOException {
+ String[][] resultTable =
+ new String[][] {
+ {
+ "operation-name-11",
+ stringify(List.of("t1", "t2")),
+ stringify(List.of("v1", "v2")),
+ "service-1",
+ stringify(List.of("t10")),
+ stringify(List.of("v10"))
+ },
+ {
+ "operation-name-12",
+ stringify(List.of("a2")),
+ stringify(List.of("b2")),
+ "service-2",
+ stringify(List.of("c10", "c11")),
+ stringify(List.of("d10", "d11"))
+ },
+ {
+ "operation-name-13",
+ stringify(List.of()),
+ stringify(List.of()),
+ "service-3",
+ stringify(List.of("e15")),
+ stringify(List.of("f15"))
+ }
+ };
+ List columnNames =
+ List.of(
+ "operation_name",
+ "tags1" + ViewDefinition.MAP_KEYS_SUFFIX,
+ "tags1" + ViewDefinition.MAP_VALUES_SUFFIX,
+ "service_name",
+ "tags2" + ViewDefinition.MAP_KEYS_SUFFIX,
+ "tags2" + ViewDefinition.MAP_VALUES_SUFFIX);
+ ResultSet resultSet = mockResultSet(3, 6, columnNames, resultTable);
+ ResultSetGroup resultSetGroup = mockResultSetGroup(List.of(resultSet));
+ TestQueryResultCollector testQueryResultCollector = new TestQueryResultCollector();
+
+ pinotBasedRequestHandler.convert(
+ resultSetGroup, testQueryResultCollector, new LinkedHashSet<>());
+
+ String[][] expectedRows =
+ new String[][] {
+ {
+ "operation-name-11",
+ stringify(Map.of("t1", "v1", "t2", "v2")),
+ "service-1",
+ stringify(Map.of("t10", "v10"))
+ },
+ {
+ "operation-name-12",
+ stringify(Map.of("a2", "b2")),
+ "service-2",
+ stringify(Map.of("c10", "d10", "c11", "d11"))
+ },
+ {"operation-name-13", stringify(Map.of()), "service-3", stringify(Map.of("e15", "f15"))}
+ };
+
+ verifyResponseRows(testQueryResultCollector, expectedRows);
+ }
+
+ @Test
+ public void testConvertMultipleResultSetsInFResultSetGroup() throws IOException {
+ List columnNames = List.of("operation_name", "avg(duration)", "count(*)", "max(duration)");
+ String[][] resultTable1 =
+ new String[][] {
+ {"operation-name-10", "110", "40", "21"},
+ {"operation-name-11", "111", "41", "22"},
+ {"operation-name-12", "112", "42", "23"},
+ {"operation-name-13", "113", "43", "24"}
+ };
+ ResultSet resultSet1 = mockResultSet(4, 4, columnNames, resultTable1);
+
+ String[][] resultTable2 =
+ new String[][] {
+ {"operation-name-20", "200", "400", "20000"},
+ {"operation-name-22", "220", "420", "22000"}
+ };
+ ResultSet resultSet2 = mockResultSet(2, 4, columnNames, resultTable2);
+ ResultSetGroup resultSetGroup = mockResultSetGroup(List.of(resultSet1, resultSet2));
+ TestQueryResultCollector testQueryResultCollector = new TestQueryResultCollector();
+
+ pinotBasedRequestHandler.convert(
+ resultSetGroup, testQueryResultCollector, new LinkedHashSet<>());
+
+ String[][] expectedRows =
+ new String[][] {
+ {"operation-name-10", "110", "40", "21"},
+ {"operation-name-11", "111", "41", "22"},
+ {"operation-name-12", "112", "42", "23"},
+ {"operation-name-13", "113", "43", "24"},
+ {"operation-name-20", "200", "400", "20000"},
+ {"operation-name-22", "220", "420", "22000"}
+ };
+
+ verifyResponseRows(testQueryResultCollector, expectedRows);
+ }
+
+ @Test
+ public void testNullQueryRequestContextThrowsNPE() {
+ Assertions.assertThrows(
+ NullPointerException.class,
+ () -> pinotBasedRequestHandler.handleRequest(
+ null,
+ QueryRequest.newBuilder().build(),
+ mock(QueryResultCollector.class),
+ mock(RequestAnalyzer.class)));
+ }
+
+ @Test
+ public void testNullTenantIdQueryRequestContextThrowsNPE() {
+ Assertions.assertThrows(
+ NullPointerException.class,
+ () -> pinotBasedRequestHandler.handleRequest(
+ new QueryContext(null),
+ QueryRequest.newBuilder().build(),
+ mock(QueryResultCollector.class),
+ mock(RequestAnalyzer.class)));
+ }
+
+ @Test
+ public void
+ testGroupBysAndAggregationsMixedWithSelectionsThrowsExeptionWhenDistinctSelectionIsSpecified() {
+ // Setting distinct selections and mixing selections and group bys should throw exception
+ Assertions.assertThrows(
+ IllegalArgumentException.class,
+ () -> pinotBasedRequestHandler.handleRequest(
+ new QueryContext("test-tenant-id"),
+ QueryRequest.newBuilder()
+ .setDistinctSelections(true)
+ .addSelection(QueryRequestBuilderUtils.createColumnExpression("col1"))
+ .addSelection(QueryRequestBuilderUtils.createColumnExpression("col2"))
+ .addGroupBy(QueryRequestBuilderUtils.createColumnExpression("col3"))
+ .build(),
+ mock(QueryResultCollector.class),
+ mock(RequestAnalyzer.class)));
+
+ // Setting distinct selections and mixing selections and aggregations should throw exception
+ Assertions.assertThrows(
+ IllegalArgumentException.class,
+ () -> pinotBasedRequestHandler.handleRequest(
+ new QueryContext("test-tenant-id"),
+ QueryRequest.newBuilder()
+ .setDistinctSelections(true)
+ .addSelection(QueryRequestBuilderUtils.createColumnExpression("col1"))
+ .addSelection(QueryRequestBuilderUtils.createColumnExpression("col2"))
+ .addAggregation(
+ QueryRequestBuilderUtils.createFunctionExpression(
+ "AVG", "duration", "avg_duration"))
+ .build(),
+ mock(QueryResultCollector.class),
+ mock(RequestAnalyzer.class)));
+
+ // Setting distinct selections and mixing selections, group bys and aggregations should throw
+ // exception
+ Assertions.assertThrows(
+ IllegalArgumentException.class,
+ () -> pinotBasedRequestHandler.handleRequest(
+ new QueryContext("test-tenant-id"),
+ QueryRequest.newBuilder()
+ .setDistinctSelections(true)
+ .addSelection(QueryRequestBuilderUtils.createColumnExpression("col1"))
+ .addSelection(QueryRequestBuilderUtils.createColumnExpression("col2"))
+ .addGroupBy(QueryRequestBuilderUtils.createColumnExpression("col3"))
+ .addAggregation(
+ QueryRequestBuilderUtils.createFunctionExpression(
+ "AVG", "duration", "avg_duration"))
+ .build(),
+ mock(QueryResultCollector.class),
+ mock(RequestAnalyzer.class)));
+ }
+
+ private ResultSet mockResultSet(
+ int rowCount, int columnCount, List columnNames, String[][] resultsTable) {
+ ResultSet resultSet = mock(ResultSet.class);
+ when(resultSet.getRowCount()).thenReturn(rowCount);
+ when(resultSet.getColumnCount()).thenReturn(columnCount);
+ for (int colIdx = 0; colIdx < columnNames.size(); colIdx++) {
+ when(resultSet.getColumnName(colIdx)).thenReturn(columnNames.get(colIdx));
+ }
+
+ for (int rowIdx = 0; rowIdx < resultsTable.length; rowIdx++) {
+ for (int colIdx = 0; colIdx < resultsTable[0].length; colIdx++) {
+ when(resultSet.getString(rowIdx, colIdx)).thenReturn(resultsTable[rowIdx][colIdx]);
+ }
+ }
+
+ return resultSet;
+ }
+
+ private ResultSetGroup mockResultSetGroup(List resultSets) {
+ ResultSetGroup resultSetGroup = mock(ResultSetGroup.class);
+
+ when(resultSetGroup.getResultSetCount()).thenReturn(resultSets.size());
+ for (int i = 0; i < resultSets.size(); i++) {
+ when(resultSetGroup.getResultSet(i)).thenReturn(resultSets.get(i));
+ }
+
+ return resultSetGroup;
+ }
+
+ private void verifyResponseRows(
+ TestQueryResultCollector testQueryResultCollector, String[][] expectedResultTable)
+ throws IOException {
+ List rows = testQueryResultCollector.getResultSetChunk().getRowList();
+ Assertions.assertEquals(expectedResultTable.length, rows.size());
+ for (int rowIdx = 0; rowIdx < rows.size(); rowIdx++) {
+ Row row = rows.get(rowIdx);
+ Assertions.assertEquals(expectedResultTable[rowIdx].length, row.getColumnCount());
+ for (int colIdx = 0; colIdx < row.getColumnCount(); colIdx++) {
+ String val = row.getColumn(colIdx).getString();
+ // In the scope of our unit tests, this is a map. Cannot JSON object comparison on it since
+ // it's not ordered.
+ if (val.startsWith("{") && val.endsWith("}")) {
+ Assertions.assertEquals(
+ objectMapper.readTree(expectedResultTable[rowIdx][colIdx]),
+ objectMapper.readTree(val));
+ } else {
+ Assertions.assertEquals(expectedResultTable[rowIdx][colIdx], val);
+ }
+ }
+ }
+ }
+
+ private String stringify(Object obj) throws JsonProcessingException {
+ return objectMapper.writeValueAsString(obj);
+ }
+
+ static class TestQueryResultCollector implements QueryResultCollector {
+ private final ResultSetChunk.Builder resultSetChunkBuilder = ResultSetChunk.newBuilder();
+
+ @Override
+ public void collect(Row row) {
+ resultSetChunkBuilder.addRow(row);
+ }
+
+ @Override
+ public void finish() {}
+
+ public ResultSetChunk getResultSetChunk() {
+ return resultSetChunkBuilder.build();
+ }
+ }
+}
diff --git a/query-service-impl/src/test/java/org/hypertrace/core/query/service/pinot/PinotMapConverterTest.java b/query-service-impl/src/test/java/org/hypertrace/core/query/service/pinot/PinotMapConverterTest.java
new file mode 100644
index 00000000..995cb020
--- /dev/null
+++ b/query-service-impl/src/test/java/org/hypertrace/core/query/service/pinot/PinotMapConverterTest.java
@@ -0,0 +1,94 @@
+package org.hypertrace.core.query.service.pinot;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.google.common.collect.Lists;
+import java.io.IOException;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+
+public class PinotMapConverterTest {
+ private static final String KEY1 = "KEY1";
+ private static final String KEY2 = "KEY2";
+ private static final String VAL1 = "VAL1";
+ private static final String VAL2 = "VAL2";
+
+ private List validKeys;
+ private List validValues;
+ private String validKeysJsonString;
+ private String validValueJsonString;
+ private String emptyMapJsonString;
+ private String expectedValidMapString;
+ private ObjectMapper objectMapper;
+ private Map expectedMap;
+ private PinotMapConverter target;
+
+ @BeforeEach
+ public void setup() throws JsonProcessingException {
+ objectMapper = new ObjectMapper();
+ emptyMapJsonString = objectMapper.writeValueAsString(Collections.emptyMap());
+
+ validKeys = Lists.newArrayList(KEY1, KEY2);
+ validValues = Lists.newArrayList(VAL1, VAL2);
+ validKeysJsonString = objectMapper.writeValueAsString(validKeys);
+ validValueJsonString = objectMapper.writeValueAsString(validValues);
+
+ expectedMap = new HashMap<>();
+ expectedMap.put(KEY1, VAL1);
+ expectedMap.put(KEY2, VAL2);
+ expectedValidMapString = objectMapper.writeValueAsString(expectedMap);
+
+ target = new PinotMapConverter();
+ }
+
+ @Test
+ public void test_merge_nullKey_shouldThrowException() throws IOException {
+ assertThrows(
+ IOException.class,
+ () -> {
+ target.merge(null, "");
+ });
+ }
+
+ @Test
+ public void test_merge_emptyListStringKey_shouldReturnEmptyMap() throws IOException {
+ assertEquals(emptyMapJsonString, target.merge("[]", "[]"));
+ }
+
+ @Test
+ public void test_merge_PinotemptyList_shouldReturnEmptyMap() throws IOException {
+ assertEquals(emptyMapJsonString, target.merge("[\"\"]", "[\"\"]"));
+ }
+
+ @Test
+ public void test_merge_validKeyEmptyStringValue_shouldReturnKeyWithEmptyValue()
+ throws IOException {
+ String expected = objectMapper.writeValueAsString(expectedMap);
+ assertEquals(expected, target.merge(validKeysJsonString, validValueJsonString));
+ }
+
+ @Test
+ public void test_merge_largerKeysThanValues_shouldReturnBasedOnKeys() throws IOException {
+ String newKey = "KEY3";
+ expectedMap.put(newKey, null);
+ validKeys.add(newKey);
+ String largerKeysString = objectMapper.writeValueAsString(validKeys);
+ String expectedMapString = objectMapper.writeValueAsString(expectedMap);
+ assertEquals(expectedMapString, target.merge(largerKeysString, validValueJsonString));
+ }
+
+ @Test
+ public void test_merge_largerValuesThanKeys_shouldReturnBasedOnKeys() throws IOException {
+ String newValue = "VALUE3";
+ validValues.add(newValue);
+ String largerValuesString = objectMapper.writeValueAsString(validValues);
+ assertEquals(expectedValidMapString, target.merge(validKeysJsonString, largerValuesString));
+ }
+}
diff --git a/query-service-impl/src/test/java/org/hypertrace/core/query/service/pinot/PinotQueryTest.java b/query-service-impl/src/test/java/org/hypertrace/core/query/service/pinot/PinotQueryTest.java
new file mode 100644
index 00000000..e537eb3a
--- /dev/null
+++ b/query-service-impl/src/test/java/org/hypertrace/core/query/service/pinot/PinotQueryTest.java
@@ -0,0 +1,25 @@
+package org.hypertrace.core.query.service.pinot;
+
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
+
+public class PinotQueryTest {
+
+ @Test
+ public void testPinotQuery() {
+ final AdhocPinotQuery q1 = new AdhocPinotQuery("query1", null);
+ q1.setQuery("q1");
+ final AdhocPinotQuery q2 = new AdhocPinotQuery("query2", null);
+ q2.setQuery("q2");
+ final AdhocPinotQuery q3 = new AdhocPinotQuery("query2", null);
+ q3.setQuery("q1");
+ Assertions.assertFalse(q1.equals(q2));
+ Assertions.assertFalse(q1.equals(q3));
+ Assertions.assertFalse(q2.equals(q3));
+ Assertions.assertNotEquals(q1, q2);
+ Assertions.assertNotEquals(q2, q3);
+ q3.setQuery("q2");
+ Assertions.assertEquals(q2, q3);
+ Assertions.assertTrue(q2.equals(q3));
+ }
+}
diff --git a/query-service-impl/src/test/java/org/hypertrace/core/query/service/pinot/PinotResultAnalyzerTest.java b/query-service-impl/src/test/java/org/hypertrace/core/query/service/pinot/PinotResultAnalyzerTest.java
new file mode 100644
index 00000000..4107c5f8
--- /dev/null
+++ b/query-service-impl/src/test/java/org/hypertrace/core/query/service/pinot/PinotResultAnalyzerTest.java
@@ -0,0 +1,156 @@
+package org.hypertrace.core.query.service.pinot;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Lists;
+import java.io.IOException;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Map;
+import org.apache.pinot.client.ResultSet;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.mockito.Mock;
+
+public class PinotResultAnalyzerTest {
+ // Attribute 1
+ private static final String LOGICAL_COL1 = "COL1";
+ private static final String PHYS_COL1 = "PHYS_COL1";
+ private static final String VAL_COL1 = "COL1_VAL";
+
+ // Attribute 2
+ private static final String LOGICAL_COL2 = "COL2";
+ private static final String PHYS_COL2 = "PHYS_COL2";
+ private static final String VAL_COL2 = "COL2_VAL";
+
+ // Map Attribute 1
+ private static final String LOGICAL_MAP_NAME1 = "MAP_COL1";
+ private static final String MAP1_KEY_NAME = "MAP_COL1__KEYS";
+ private static final String MAP1_VAL_NAME = "MAP_COL1__VALUES";
+ private static final String MAP1_KEY_VAL = "[\"Content-Type\"]";
+ private static final String MAP1_VAL_VAL = "[\"application/json\"]";
+
+ // Map Attribute 3
+ private static final String LOGICAL_MAP_NAME2 = "MAP_COL2";
+ private static final String MAP2_KEY_NAME = "MAP_COL2__KEYS";
+ private static final String MAP2_VAL_NAME = "MAP_COL2__VALUES";
+ private static final String MAP2_KEY_VAL = "[\"Amazing\"]";
+ private static final String MAP2_VAL_VAL = "[\"@TestOrg\"]";
+
+ @Mock private ResultSet resultSet;
+ @Mock private ViewDefinition viewDefinition;
+
+ private PinotResultAnalyzer target;
+ private LinkedHashSet selectedAttributes;
+ private Map> viewDefinitionMap;
+ private List resultSetColumnNames;
+ private List resultSetColumnValues;
+ private PinotMapConverter pinotMapConverter;
+
+ @BeforeEach
+ public void setup() {
+ viewDefinition = mock(ViewDefinition.class);
+ resultSet = mock(ResultSet.class);
+
+ pinotMapConverter = new PinotMapConverter();
+ viewDefinitionMap =
+ ImmutableMap.>builder()
+ .put(LOGICAL_COL1, Lists.newArrayList(PHYS_COL1))
+ .put(LOGICAL_COL2, Lists.newArrayList(PHYS_COL2))
+ .put(LOGICAL_MAP_NAME1, Lists.newArrayList(MAP1_KEY_NAME, MAP1_VAL_NAME))
+ .put(LOGICAL_MAP_NAME2, Lists.newArrayList(MAP2_KEY_NAME, MAP2_VAL_NAME))
+ .build();
+ viewDefinitionMap.forEach(
+ (k, v) -> {
+ when(viewDefinition.getPhysicalColumnNames(k)).thenReturn(v);
+ if (v.size() > 1) {
+ when(viewDefinition.isMap(k)).thenReturn(true);
+ } else {
+ when(viewDefinition.isMap(k)).thenReturn(false);
+ }
+ });
+ when(viewDefinition.getKeyColumnNameForMap(LOGICAL_MAP_NAME1)).thenReturn(MAP1_KEY_NAME);
+ when(viewDefinition.getValueColumnNameForMap(LOGICAL_MAP_NAME1)).thenReturn(MAP1_VAL_NAME);
+ when(viewDefinition.getKeyColumnNameForMap(LOGICAL_MAP_NAME2)).thenReturn(MAP2_KEY_NAME);
+ when(viewDefinition.getValueColumnNameForMap(LOGICAL_MAP_NAME2)).thenReturn(MAP2_VAL_NAME);
+
+ selectedAttributes =
+ new LinkedHashSet<>(
+ ImmutableList.builder()
+ .add(LOGICAL_COL1)
+ .add(LOGICAL_MAP_NAME1)
+ .add(LOGICAL_COL2)
+ .add(LOGICAL_MAP_NAME2)
+ .build());
+ resultSetColumnNames =
+ Lists.newArrayList(
+ PHYS_COL1, MAP1_KEY_NAME, MAP1_VAL_NAME, PHYS_COL2, MAP2_KEY_NAME, MAP2_VAL_NAME);
+
+ resultSetColumnValues =
+ Lists.newArrayList(
+ VAL_COL1, MAP1_KEY_VAL, MAP1_VAL_VAL, VAL_COL2, MAP2_KEY_VAL, MAP2_VAL_VAL);
+
+ mockResultSet(resultSetColumnNames, resultSetColumnValues);
+ target = PinotResultAnalyzer.create(resultSet, selectedAttributes, viewDefinition);
+ }
+
+ @Test
+ public void test_create_validInputWithMap_shouldFindIndexCorrectly() {
+ // assert index for non-map attributes
+ assertEquals(
+ findIndexInResultSet(resultSetColumnNames, PHYS_COL1),
+ target.getPhysicalColumnIndex(LOGICAL_COL1));
+ assertEquals(
+ findIndexInResultSet(resultSetColumnNames, PHYS_COL2),
+ target.getPhysicalColumnIndex(LOGICAL_COL2));
+
+ // assert index for map attributes
+ assertEquals(
+ findIndexInResultSet(resultSetColumnNames, MAP1_KEY_NAME),
+ target.getMapKeyIndex(LOGICAL_MAP_NAME1));
+ assertEquals(
+ findIndexInResultSet(resultSetColumnNames, MAP2_KEY_NAME),
+ target.getMapKeyIndex(LOGICAL_MAP_NAME2));
+
+ assertEquals(
+ findIndexInResultSet(resultSetColumnNames, MAP1_VAL_NAME),
+ target.getMapValueIndex(LOGICAL_MAP_NAME1));
+ assertEquals(
+ findIndexInResultSet(resultSetColumnNames, MAP2_VAL_NAME),
+ target.getMapValueIndex(LOGICAL_MAP_NAME2));
+ }
+
+ @Test
+ public void test_getDataFromRow_validInputWithTwoMaps_ShouldGetData() throws IOException {
+ assertEquals(VAL_COL1, target.getDataFromRow(0, LOGICAL_COL1));
+ assertEquals(VAL_COL2, target.getDataFromRow(0, LOGICAL_COL2));
+ assertEquals(
+ pinotMapConverter.merge(MAP1_KEY_VAL, MAP1_VAL_VAL),
+ target.getDataFromRow(0, LOGICAL_MAP_NAME1));
+ assertEquals(
+ pinotMapConverter.merge(MAP2_KEY_VAL, MAP2_VAL_VAL),
+ target.getDataFromRow(0, LOGICAL_MAP_NAME2));
+ }
+
+ private Integer findIndexInResultSet(List resultSetColumns, String name) {
+ for (int idx = 0; idx < resultSetColumns.size(); idx++) {
+ if (name.equalsIgnoreCase(resultSetColumns.get(idx))) {
+ return idx;
+ }
+ }
+ return null;
+ }
+
+ private void mockResultSet(
+ List resultSetColumnNames, List resultSetColumnValues) {
+ when(resultSet.getColumnCount()).thenReturn(resultSetColumnNames.size());
+ for (int idx = 0; idx < resultSetColumnNames.size(); idx++) {
+ when(resultSet.getColumnName(idx)).thenReturn(resultSetColumnNames.get(idx));
+ when(resultSet.getString(0, idx)).thenReturn(resultSetColumnValues.get(idx));
+ }
+ }
+}
diff --git a/query-service-impl/src/test/java/org/hypertrace/core/query/service/pinot/PinotUtilsTest.java b/query-service-impl/src/test/java/org/hypertrace/core/query/service/pinot/PinotUtilsTest.java
new file mode 100644
index 00000000..773440cf
--- /dev/null
+++ b/query-service-impl/src/test/java/org/hypertrace/core/query/service/pinot/PinotUtilsTest.java
@@ -0,0 +1,19 @@
+package org.hypertrace.core.query.service.pinot;
+
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
+
+public class PinotUtilsTest {
+
+ @Test
+ public void testZkPath() {
+ Assertions.assertEquals(
+ "localhost:2181/pinot", PinotUtils.getZkPath("localhost:2181", "pinot"));
+ Assertions.assertEquals(
+ "localhost:2181/pinot", PinotUtils.getZkPath("localhost:2181/", "pinot"));
+ Assertions.assertEquals(
+ "localhost:2181/pinot/myView", PinotUtils.getZkPath("localhost:2181/pinot", "myView"));
+ Assertions.assertEquals(
+ "localhost:2181/pinot/myView", PinotUtils.getZkPath("localhost:2181/pinot/", "myView"));
+ }
+}
diff --git a/query-service-impl/src/test/java/org/hypertrace/core/query/service/pinot/QueryRequestToPinotSQLConverterTest.java b/query-service-impl/src/test/java/org/hypertrace/core/query/service/pinot/QueryRequestToPinotSQLConverterTest.java
new file mode 100644
index 00000000..b1370273
--- /dev/null
+++ b/query-service-impl/src/test/java/org/hypertrace/core/query/service/pinot/QueryRequestToPinotSQLConverterTest.java
@@ -0,0 +1,830 @@
+package org.hypertrace.core.query.service.pinot;
+
+import static org.hypertrace.core.query.service.QueryRequestBuilderUtils.createColumnExpression;
+import static org.hypertrace.core.query.service.QueryRequestBuilderUtils.createFunctionExpression;
+import static org.hypertrace.core.query.service.QueryRequestBuilderUtils.createOrderByExpression;
+import static org.mockito.ArgumentMatchers.any;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Lists;
+import java.util.HashMap;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import org.apache.pinot.client.Connection;
+import org.apache.pinot.client.Request;
+import org.hypertrace.core.query.service.QueryContext;
+import org.hypertrace.core.query.service.RequestHandlerInfo;
+import org.hypertrace.core.query.service.RequestHandlerRegistry;
+import org.hypertrace.core.query.service.api.ColumnIdentifier;
+import org.hypertrace.core.query.service.api.Expression;
+import org.hypertrace.core.query.service.api.Filter;
+import org.hypertrace.core.query.service.api.Function;
+import org.hypertrace.core.query.service.api.LiteralConstant;
+import org.hypertrace.core.query.service.api.Operator;
+import org.hypertrace.core.query.service.api.OrderByExpression;
+import org.hypertrace.core.query.service.api.QueryRequest;
+import org.hypertrace.core.query.service.api.QueryRequest.Builder;
+import org.hypertrace.core.query.service.api.SortOrder;
+import org.hypertrace.core.query.service.api.Value;
+import org.hypertrace.core.query.service.api.ValueType;
+import org.hypertrace.core.query.service.pinot.PinotClientFactory.PinotClient;
+import org.hypertrace.core.query.service.util.QueryRequestUtil;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.mockito.ArgumentCaptor;
+import org.mockito.Mockito;
+
+public class QueryRequestToPinotSQLConverterTest {
+ private static final String TENANT_ID = "__default";
+ private static final String TENANT_COLUMN_NAME = "tenant_id";
+
+ private static ViewDefinition viewDefinition;
+ private static QueryContext queryContext;
+ private QueryRequestToPinotSQLConverter converter;
+ private Connection connection;
+
+ @BeforeAll
+ public static void setUp() {
+ String handlerName = "pinotBasedRequestHandler";
+
+ Map config = new HashMap<>();
+ Map columnSpecMap = new HashMap<>();
+ Set mapFields = Set.of("tags", "request_headers");
+
+ Map> logicalToViewColumns =
+ ImmutableMap.>builder()
+ .put("Span.tags", Lists.newArrayList("tags"))
+ .put("Span.id", Lists.newArrayList("span_id"))
+ .put("Span.duration_millis", Lists.newArrayList("duration_millis"))
+ .put("Span.start_time_millis", Lists.newArrayList("start_time_millis"))
+ .put("Span.end_time_millis", Lists.newArrayList("end_time_millis"))
+ .put("Span.displaySpanName", Lists.newArrayList("span_name"))
+ .put("Span.is_entry", Lists.newArrayList("is_entry"))
+ .put("Span.attributes.request_headers", Lists.newArrayList("request_headers"))
+ .put("Span.attributes.request_body", Lists.newArrayList("request_body"))
+ .put("Span.attributes.protocol_name", Lists.newArrayList("protocol_name"))
+ .put("Span.attributes.response_headers", Lists.newArrayList("response_headers"))
+ .put("Span.attributes.response_body", Lists.newArrayList("response_body"))
+ .put("Span.metrics.duration_millis", Lists.newArrayList("duration_millis"))
+ .put("Span.serviceName", Lists.newArrayList("service_name"))
+ .put("Span.attributes.parent_span_id", Lists.newArrayList("parent_span_id"))
+ .build();
+
+ for (String logicalName : logicalToViewColumns.keySet()) {
+ PinotColumnSpec spec = new PinotColumnSpec();
+ for (String viewName : logicalToViewColumns.get(logicalName)) {
+ if (mapFields.contains(viewName)) {
+ spec.setType(ValueType.STRING_MAP);
+ spec.addColumnName(viewName + "__KEYS");
+ spec.addColumnName(viewName + "__VALUES");
+ } else {
+ spec.addColumnName(viewName);
+ spec.setType(ValueType.STRING);
+ }
+ }
+ columnSpecMap.put(logicalName, spec);
+ }
+ viewDefinition = new ViewDefinition("SpanEventView", columnSpecMap, TENANT_COLUMN_NAME);
+ config.put(PinotBasedRequestHandler.VIEW_DEFINITION_CONFIG_KEY, viewDefinition);
+ RequestHandlerInfo requestHandlerInfo =
+ new RequestHandlerInfo(handlerName, PinotBasedRequestHandler.class, config);
+ RequestHandlerRegistry.get().register(handlerName, requestHandlerInfo);
+
+ queryContext = new QueryContext(TENANT_ID);
+ }
+
+ @BeforeEach
+ public void setup() {
+ converter = new QueryRequestToPinotSQLConverter(viewDefinition);
+ connection = Mockito.mock(Connection.class);
+ Mockito.when(connection.prepareStatement(any(Request.class))).thenCallRealMethod();
+ }
+
+ @Test
+ public void testQuery() {
+ Builder builder = QueryRequest.newBuilder();
+ ColumnIdentifier spanId = ColumnIdentifier.newBuilder().setColumnName("Span.id").build();
+ builder.addSelection(Expression.newBuilder().setColumnIdentifier(spanId).build());
+
+ ColumnIdentifier tags = ColumnIdentifier.newBuilder().setColumnName("Span.tags").build();
+ builder.addSelection(Expression.newBuilder().setColumnIdentifier(tags).build());
+
+ ColumnIdentifier request_headers =
+ ColumnIdentifier.newBuilder().setColumnName("Span.attributes.request_headers").build();
+ builder.addSelection(Expression.newBuilder().setColumnIdentifier(request_headers).build());
+
+ Filter startTimeFilter =
+ createTimeFilter("Span.start_time_millis", Operator.GT, 1557780911508L);
+ Filter endTimeFilter = createTimeFilter("Span.end_time_millis", Operator.LT, 1557780938419L);
+
+ Filter andFilter =
+ Filter.newBuilder()
+ .setOperator(Operator.AND)
+ .addChildFilter(startTimeFilter)
+ .addChildFilter(endTimeFilter)
+ .build();
+ builder.setFilter(andFilter);
+
+ assertPQLQuery(
+ builder.build(),
+ "select span_id, tags__keys, tags__values, request_headers__keys, request_headers__values "
+ + "from SpanEventView "
+ + "where "
+ + viewDefinition.getTenantIdColumn()
+ + " = '"
+ + TENANT_ID
+ + "' "
+ + "and ( start_time_millis > '1557780911508' and end_time_millis < '1557780938419' )");
+ }
+
+ @Test
+ public void testQueryWithoutFilter() {
+ Builder builder = QueryRequest.newBuilder();
+ ColumnIdentifier spanId = ColumnIdentifier.newBuilder().setColumnName("Span.id").build();
+ builder.addSelection(Expression.newBuilder().setColumnIdentifier(spanId).build());
+ assertPQLQuery(
+ builder.build(),
+ "Select span_id FROM SpanEventView "
+ + "where "
+ + viewDefinition.getTenantIdColumn()
+ + " = '"
+ + TENANT_ID
+ + "'");
+ }
+
+ @Test
+ public void testQuerySingleDistinctSelection() {
+ Builder builder = QueryRequest.newBuilder();
+ builder.setDistinctSelections(true).addSelection(createColumnExpression("Span.id"));
+ assertPQLQuery(
+ builder.build(),
+ "Select distinct span_id FROM SpanEventView "
+ + "where "
+ + viewDefinition.getTenantIdColumn()
+ + " = '"
+ + TENANT_ID
+ + "'");
+ }
+
+ @Test
+ public void testQueryMultipleDistinctSelection() {
+ Builder builder = QueryRequest.newBuilder();
+ builder
+ .setDistinctSelections(true)
+ .addSelection(createColumnExpression("Span.id"))
+ .addSelection(createColumnExpression("Span.displaySpanName"))
+ .addSelection(createColumnExpression("Span.serviceName"));
+ assertPQLQuery(
+ builder.build(),
+ "Select distinct span_id, span_name, service_name FROM SpanEventView "
+ + "where "
+ + viewDefinition.getTenantIdColumn()
+ + " = '"
+ + TENANT_ID
+ + "'");
+ }
+
+ @Test
+ public void testQueryWithStringFilter() {
+ QueryRequest queryRequest =
+ buildSimpleQueryWithFilter(createStringFilter("Span.displaySpanName", Operator.EQ, "GET /login"));
+ assertPQLQuery(
+ queryRequest,
+ "Select span_id FROM SpanEventView "
+ + "WHERE "
+ + viewDefinition.getTenantIdColumn()
+ + " = '"
+ + TENANT_ID
+ + "' "
+ + "AND span_name = 'GET /login'");
+ }
+
+ @Test
+ public void testSQLiWithStringValueFilter() {
+ QueryRequest queryRequest =
+ buildSimpleQueryWithFilter(
+ createStringFilter("Span.displaySpanName", Operator.EQ, "GET /login' OR tenant_id = 'tenant2"));
+
+ assertPQLQuery(
+ queryRequest,
+ "Select span_id FROM SpanEventView WHERE "
+ + viewDefinition.getTenantIdColumn()
+ + " = '"
+ + TENANT_ID
+ + "' "
+ + "AND span_name = 'GET /login'' OR tenant_id = ''tenant2'");
+ }
+
+ @Test
+ public void testQueryWithBooleanFilter() {
+ QueryRequest queryRequest =
+ buildSimpleQueryWithFilter(createBooleanFilter("Span.is_entry", Operator.EQ, true));
+
+ assertPQLQuery(
+ queryRequest,
+ "Select span_id FROM SpanEventView WHERE "
+ + viewDefinition.getTenantIdColumn()
+ + " = '"
+ + TENANT_ID
+ + "' "
+ + "AND is_entry = 'true'");
+ }
+
+ @Test
+ public void testQueryWithDoubleFilter() {
+ QueryRequest queryRequest =
+ buildSimpleQueryWithFilter(
+ createDoubleFilter("Span.metrics.duration_millis", Operator.EQ, 1.2));
+
+ assertPQLQuery(
+ queryRequest,
+ "Select span_id FROM SpanEventView WHERE "
+ + viewDefinition.getTenantIdColumn()
+ + " = '"
+ + TENANT_ID
+ + "' "
+ + "AND duration_millis = 1.2");
+ }
+
+ @Test
+ public void testQueryWithFloatFilter() {
+ QueryRequest queryRequest =
+ buildSimpleQueryWithFilter(
+ createFloatFilter("Span.metrics.duration_millis", Operator.EQ, 1.2f));
+
+ assertPQLQuery(
+ queryRequest,
+ "Select span_id FROM SpanEventView WHERE "
+ + viewDefinition.getTenantIdColumn()
+ + " = '"
+ + TENANT_ID
+ + "' "
+ + "AND duration_millis = 1.2");
+ }
+
+ @Test
+ public void testQueryWithIntFilter() {
+ QueryRequest queryRequest =
+ buildSimpleQueryWithFilter(createIntFilter("Span.metrics.duration_millis", Operator.EQ, 1));
+
+ assertPQLQuery(
+ queryRequest,
+ "Select span_id FROM SpanEventView WHERE "
+ + viewDefinition.getTenantIdColumn()
+ + " = '"
+ + TENANT_ID
+ + "' "
+ + "AND duration_millis = 1");
+ }
+
+ @Test
+ public void testQueryWithTimestampFilter() {
+ QueryRequest queryRequest =
+ buildSimpleQueryWithFilter(createTimestampFilter("Span.is_entry", Operator.EQ, 123456));
+
+ assertPQLQuery(
+ queryRequest,
+ "Select span_id FROM SpanEventView WHERE "
+ + viewDefinition.getTenantIdColumn()
+ + " = '"
+ + TENANT_ID
+ + "' "
+ + "AND is_entry = 123456");
+ }
+
+ @Test
+ public void testQueryWithOrderBy() {
+ assertPQLQuery(
+ buildOrderByQuery(),
+ "Select span_id, start_time_millis, end_time_millis FROM SpanEventView WHERE "
+ + viewDefinition.getTenantIdColumn()
+ + " = '"
+ + TENANT_ID
+ + "' "
+ + "order by start_time_millis desc , end_time_millis limit 100");
+ }
+
+ @Test
+ public void testQueryWithOrderByWithPagination() {
+ QueryRequest orderByQueryRequest = buildOrderByQuery();
+ Builder builder = QueryRequest.newBuilder(orderByQueryRequest);
+ builder.setOffset(1000);
+ assertPQLQuery(
+ builder.build(),
+ "Select span_id, start_time_millis, end_time_millis FROM SpanEventView WHERE "
+ + viewDefinition.getTenantIdColumn()
+ + " = '"
+ + TENANT_ID
+ + "' "
+ + "order by start_time_millis desc , end_time_millis limit 1000, 100");
+ }
+
+ @Test
+ public void testQueryWithGroupByWithMultipleAggregates() {
+ QueryRequest orderByQueryRequest = buildMultipleGroupByMultipleAggQuery();
+ Builder builder = QueryRequest.newBuilder(orderByQueryRequest);
+ builder.setLimit(20);
+ assertPQLQuery(
+ builder.build(),
+ "select service_name, span_name, count(*), avg(duration_millis) from SpanEventView"
+ + " where "
+ + viewDefinition.getTenantIdColumn()
+ + " = '"
+ + TENANT_ID
+ + "' "
+ + "and ( start_time_millis > '1570658506605' and end_time_millis < '1570744906673' )"
+ + " group by service_name, span_name limit 20");
+ }
+
+ @Test
+ public void testQueryWithGroupByWithMultipleAggregatesAndOrderBy() {
+ QueryRequest orderByQueryRequest = buildMultipleGroupByMultipleAggAndOrderByQuery();
+ Builder builder = QueryRequest.newBuilder(orderByQueryRequest);
+ builder.setLimit(20);
+ assertPQLQuery(
+ builder.build(),
+ "select service_name, span_name, count(*), avg(duration_millis) from SpanEventView"
+ + " where "
+ + viewDefinition.getTenantIdColumn()
+ + " = '"
+ + TENANT_ID
+ + "' "
+ + "and ( start_time_millis > '1570658506605' and end_time_millis < '1570744906673' )"
+ + " group by service_name, span_name order by service_name, avg(duration_millis) desc , count(*) desc limit 20");
+ }
+
+ @Test
+ public void testQueryWithDistinctCountAggregation() {
+ Filter startTimeFilter =
+ createTimeFilter("Span.start_time_millis", Operator.GT, 1570658506605L);
+ Filter endTimeFilter = createTimeFilter("Span.end_time_millis", Operator.LT, 1570744906673L);
+ QueryRequest queryRequest =
+ QueryRequest.newBuilder()
+ .addAggregation(
+ createFunctionExpression("DISTINCTCOUNT", "Span.id", "distinctcount_span_id"))
+ .setFilter(
+ Filter.newBuilder()
+ .setOperator(Operator.AND)
+ .addChildFilter(startTimeFilter)
+ .addChildFilter(endTimeFilter)
+ .build())
+ .setLimit(15)
+ .build();
+
+ assertPQLQuery(
+ queryRequest,
+ "select distinctcount(span_id) from SpanEventView"
+ + " where "
+ + viewDefinition.getTenantIdColumn()
+ + " = '"
+ + TENANT_ID
+ + "' "
+ + "and ( start_time_millis > '1570658506605' and end_time_millis < '1570744906673' )"
+ + " limit 15");
+ }
+
+ @Test
+ public void testQueryWithDistinctCountAggregationAndGroupBy() {
+ Filter startTimeFilter =
+ createTimeFilter("Span.start_time_millis", Operator.GT, 1570658506605L);
+ Filter endTimeFilter = createTimeFilter("Span.end_time_millis", Operator.LT, 1570744906673L);
+ QueryRequest queryRequest =
+ QueryRequest.newBuilder()
+ .addSelection(createColumnExpression("Span.id"))
+ .addGroupBy(createColumnExpression("Span.id"))
+ .addAggregation(
+ createFunctionExpression("DISTINCTCOUNT", "Span.id", "distinctcount_span_id"))
+ .setFilter(
+ Filter.newBuilder()
+ .setOperator(Operator.AND)
+ .addChildFilter(startTimeFilter)
+ .addChildFilter(endTimeFilter)
+ .build())
+ .addOrderBy(
+ createOrderByExpression(
+ createFunctionExpression("DISTINCTCOUNT", "Span.id", "distinctcount_span_id"),
+ SortOrder.ASC))
+ .setLimit(15)
+ .build();
+
+ assertPQLQuery(
+ queryRequest,
+ "select span_id, distinctcount(span_id) from SpanEventView"
+ + " where "
+ + viewDefinition.getTenantIdColumn()
+ + " = '"
+ + TENANT_ID
+ + "' "
+ + "and ( start_time_millis > '1570658506605' and end_time_millis < '1570744906673' )"
+ + " group by span_id order by distinctcount(span_id) limit 15");
+ }
+
+ @Test
+ public void testQueryWithStringArray() {
+ Builder builder = QueryRequest.newBuilder();
+ ColumnIdentifier spanId = ColumnIdentifier.newBuilder().setColumnName("Span.id").build();
+ builder.addSelection(Expression.newBuilder().setColumnIdentifier(spanId).build());
+
+ String trace1 = "1";
+ String trace2 = "2";
+ LiteralConstant spanIds =
+ LiteralConstant.newBuilder()
+ .setValue(
+ Value.newBuilder()
+ .setValueType(ValueType.STRING_ARRAY)
+ .addStringArray(trace1)
+ .addStringArray(trace2)
+ .build())
+ .build();
+
+ Filter filter =
+ Filter.newBuilder()
+ .setOperator(Operator.IN)
+ .setLhs(Expression.newBuilder().setColumnIdentifier(spanId).build())
+ .setRhs(Expression.newBuilder().setLiteral(spanIds).build())
+ .build();
+
+ builder.setFilter(filter);
+
+ assertPQLQuery(
+ builder.build(),
+ "SELECT span_id FROM SpanEventView "
+ + "WHERE "
+ + viewDefinition.getTenantIdColumn()
+ + " = '"
+ + TENANT_ID
+ + "' "
+ + "AND span_id IN ('1', '2')");
+ }
+
+ @Test
+ public void testSQLiWithStringArrayFilter() {
+ Builder builder = QueryRequest.newBuilder();
+ ColumnIdentifier spanId = ColumnIdentifier.newBuilder().setColumnName("Span.id").build();
+ builder.addSelection(Expression.newBuilder().setColumnIdentifier(spanId).build());
+
+ String span1 = "1') OR tenant_id = 'tenant2' and span_id IN ('1";
+ LiteralConstant spanIds =
+ LiteralConstant.newBuilder()
+ .setValue(
+ Value.newBuilder()
+ .setValueType(ValueType.STRING_ARRAY)
+ .addStringArray(span1)
+ .build())
+ .build();
+
+ Filter filter =
+ Filter.newBuilder()
+ .setOperator(Operator.IN)
+ .setLhs(Expression.newBuilder().setColumnIdentifier(spanId).build())
+ .setRhs(Expression.newBuilder().setLiteral(spanIds).build())
+ .build();
+
+ builder.setFilter(filter);
+ assertPQLQuery(
+ builder.build(),
+ "SELECT span_id FROM SpanEventView WHERE "
+ + viewDefinition.getTenantIdColumn()
+ + " = '"
+ + TENANT_ID
+ + "' "
+ + "AND span_id IN ('1'') OR tenant_id = ''tenant2'' and span_id IN (''1')");
+ }
+
+ @Test
+ public void testQueryWithLikeOperator() {
+ Builder builder = QueryRequest.newBuilder();
+ ColumnIdentifier spanId = ColumnIdentifier.newBuilder().setColumnName("Span.id").build();
+ builder.addSelection(Expression.newBuilder().setColumnIdentifier(spanId).build());
+
+ Filter likeFilter =
+ Filter.newBuilder()
+ .setOperator(Operator.LIKE)
+ .setLhs(Expression.newBuilder().setColumnIdentifier(spanId).build())
+ .setRhs(
+ Expression.newBuilder()
+ .setLiteral(
+ LiteralConstant.newBuilder()
+ .setValue(Value.newBuilder().setString("123").build()))
+ .build())
+ .build();
+
+ builder.setFilter(likeFilter);
+ assertPQLQuery(
+ builder.build(),
+ "SELECT span_id FROM SpanEventView "
+ + "WHERE "
+ + viewDefinition.getTenantIdColumn()
+ + " = '"
+ + TENANT_ID
+ + "' "
+ + "AND REGEXP_LIKE(span_id,'123')");
+ }
+
+ @Test
+ public void testQueryWithContainsKeyOperator() {
+ Builder builder = QueryRequest.newBuilder();
+ ColumnIdentifier spanTag = ColumnIdentifier.newBuilder().setColumnName("Span.tags").build();
+ builder.addSelection(Expression.newBuilder().setColumnIdentifier(spanTag).build());
+
+ LiteralConstant tag =
+ LiteralConstant.newBuilder()
+ .setValue(
+ Value.newBuilder()
+ .setValueType(ValueType.STRING_ARRAY)
+ .addStringArray("FLAGS")
+ .addStringArray("0")
+ .build())
+ .build();
+
+ Filter likeFilter =
+ Filter.newBuilder()
+ .setOperator(Operator.CONTAINS_KEY)
+ .setLhs(Expression.newBuilder().setColumnIdentifier(spanTag).build())
+ .setRhs(Expression.newBuilder().setLiteral(tag).build())
+ .build();
+
+ builder.setFilter(likeFilter);
+ assertPQLQuery(
+ builder.build(),
+ "SELECT tags__keys, tags__values FROM SpanEventView "
+ + "WHERE "
+ + viewDefinition.getTenantIdColumn()
+ + " = '"
+ + TENANT_ID
+ + "' "
+ + "AND tags__keys = 'flags'");
+ }
+
+ @Test
+ public void testQueryWithContainsKeyValueOperator() {
+ Builder builder = QueryRequest.newBuilder();
+ ColumnIdentifier spanTag = ColumnIdentifier.newBuilder().setColumnName("Span.tags").build();
+ builder.addSelection(Expression.newBuilder().setColumnIdentifier(spanTag).build());
+
+ LiteralConstant tag =
+ LiteralConstant.newBuilder()
+ .setValue(
+ Value.newBuilder()
+ .setValueType(ValueType.STRING_ARRAY)
+ .addStringArray("FLAGS")
+ .addStringArray("0")
+ .build())
+ .build();
+
+ Filter likeFilter =
+ Filter.newBuilder()
+ .setOperator(Operator.CONTAINS_KEYVALUE)
+ .setLhs(Expression.newBuilder().setColumnIdentifier(spanTag).build())
+ .setRhs(Expression.newBuilder().setLiteral(tag).build())
+ .build();
+
+ builder.setFilter(likeFilter);
+ assertPQLQuery(
+ builder.build(),
+ "SELECT tags__keys, tags__values FROM SpanEventView "
+ + "WHERE "
+ + viewDefinition.getTenantIdColumn()
+ + " = '"
+ + TENANT_ID
+ + "' "
+ + "AND tags__keys = 'flags' and tags__values = '0' and mapvalue(tags__keys,'flags',tags__values) = '0'");
+ }
+
+ private Filter createTimeFilter(String columnName, Operator op, long value) {
+ ColumnIdentifier startTimeColumn =
+ ColumnIdentifier.newBuilder().setColumnName(columnName).build();
+ Expression lhs = Expression.newBuilder().setColumnIdentifier(startTimeColumn).build();
+
+ LiteralConstant constant =
+ LiteralConstant.newBuilder()
+ .setValue(Value.newBuilder().setString(String.valueOf(value)).build())
+ .build();
+ Expression rhs = Expression.newBuilder().setLiteral(constant).build();
+ return Filter.newBuilder().setLhs(lhs).setOperator(op).setRhs(rhs).build();
+ }
+
+ private Filter createStringFilter(String columnName, Operator op, String value) {
+ ColumnIdentifier booleanColumn =
+ ColumnIdentifier.newBuilder().setColumnName(columnName).build();
+ Expression lhs = Expression.newBuilder().setColumnIdentifier(booleanColumn).build();
+
+ LiteralConstant constant =
+ LiteralConstant.newBuilder()
+ .setValue(Value.newBuilder().setValueType(ValueType.STRING).setString(value).build())
+ .build();
+ Expression rhs = Expression.newBuilder().setLiteral(constant).build();
+ return Filter.newBuilder().setLhs(lhs).setOperator(op).setRhs(rhs).build();
+ }
+
+ private Filter createBooleanFilter(String columnName, Operator op, boolean value) {
+ ColumnIdentifier booleanColumn =
+ ColumnIdentifier.newBuilder().setColumnName(columnName).build();
+ Expression lhs = Expression.newBuilder().setColumnIdentifier(booleanColumn).build();
+
+ LiteralConstant constant =
+ LiteralConstant.newBuilder()
+ .setValue(Value.newBuilder().setValueType(ValueType.BOOL).setBoolean(value).build())
+ .build();
+ Expression rhs = Expression.newBuilder().setLiteral(constant).build();
+ return Filter.newBuilder().setLhs(lhs).setOperator(op).setRhs(rhs).build();
+ }
+
+ private Filter createTimestampFilter(String columnName, Operator op, long value) {
+ ColumnIdentifier booleanColumn =
+ ColumnIdentifier.newBuilder().setColumnName(columnName).build();
+ Expression lhs = Expression.newBuilder().setColumnIdentifier(booleanColumn).build();
+
+ LiteralConstant constant =
+ LiteralConstant.newBuilder()
+ .setValue(
+ Value.newBuilder().setValueType(ValueType.TIMESTAMP).setTimestamp(value).build())
+ .build();
+ Expression rhs = Expression.newBuilder().setLiteral(constant).build();
+ return Filter.newBuilder().setLhs(lhs).setOperator(op).setRhs(rhs).build();
+ }
+
+ private Filter createDoubleFilter(String columnName, Operator op, double value) {
+ ColumnIdentifier booleanColumn =
+ ColumnIdentifier.newBuilder().setColumnName(columnName).build();
+ Expression lhs = Expression.newBuilder().setColumnIdentifier(booleanColumn).build();
+
+ LiteralConstant constant =
+ LiteralConstant.newBuilder()
+ .setValue(Value.newBuilder().setValueType(ValueType.DOUBLE).setDouble(value).build())
+ .build();
+ Expression rhs = Expression.newBuilder().setLiteral(constant).build();
+ return Filter.newBuilder().setLhs(lhs).setOperator(op).setRhs(rhs).build();
+ }
+
+ private Filter createFloatFilter(String columnName, Operator op, float value) {
+ ColumnIdentifier booleanColumn =
+ ColumnIdentifier.newBuilder().setColumnName(columnName).build();
+ Expression lhs = Expression.newBuilder().setColumnIdentifier(booleanColumn).build();
+
+ LiteralConstant constant =
+ LiteralConstant.newBuilder()
+ .setValue(Value.newBuilder().setValueType(ValueType.FLOAT).setFloat(value).build())
+ .build();
+ Expression rhs = Expression.newBuilder().setLiteral(constant).build();
+ return Filter.newBuilder().setLhs(lhs).setOperator(op).setRhs(rhs).build();
+ }
+
+ private Filter createIntFilter(String columnName, Operator op, int value) {
+ ColumnIdentifier booleanColumn =
+ ColumnIdentifier.newBuilder().setColumnName(columnName).build();
+ Expression lhs = Expression.newBuilder().setColumnIdentifier(booleanColumn).build();
+
+ LiteralConstant constant =
+ LiteralConstant.newBuilder()
+ .setValue(Value.newBuilder().setValueType(ValueType.INT).setInt(value).build())
+ .build();
+ Expression rhs = Expression.newBuilder().setLiteral(constant).build();
+ return Filter.newBuilder().setLhs(lhs).setOperator(op).setRhs(rhs).build();
+ }
+
+ private QueryRequest buildOrderByQuery() {
+ Builder builder = QueryRequest.newBuilder();
+ ColumnIdentifier spanId = ColumnIdentifier.newBuilder().setColumnName("Span.id").build();
+ ColumnIdentifier startTimeColumn =
+ ColumnIdentifier.newBuilder().setColumnName("Span.start_time_millis").build();
+ ColumnIdentifier endTimeColumn =
+ ColumnIdentifier.newBuilder().setColumnName("Span.end_time_millis").build();
+
+ builder.addSelection(Expression.newBuilder().setColumnIdentifier(spanId).build());
+ builder.addSelection(Expression.newBuilder().setColumnIdentifier(startTimeColumn).build());
+ builder.addSelection(Expression.newBuilder().setColumnIdentifier(endTimeColumn).build());
+
+ builder.addOrderBy(
+ OrderByExpression.newBuilder()
+ .setExpression(Expression.newBuilder().setColumnIdentifier(startTimeColumn).build())
+ .setOrder(SortOrder.DESC)
+ .build());
+ builder.addOrderBy(
+ OrderByExpression.newBuilder()
+ .setExpression(Expression.newBuilder().setColumnIdentifier(endTimeColumn).build())
+ .setOrder(SortOrder.ASC)
+ .build());
+
+ builder.setLimit(100);
+ return builder.build();
+ }
+
+ private QueryRequest buildMultipleGroupByMultipleAggQuery() {
+ Builder builder = QueryRequest.newBuilder();
+ builder.addAggregation(QueryRequestUtil.createCountByColumnSelection("Span.id"));
+ Function.Builder avg =
+ Function.newBuilder()
+ .setFunctionName("AVG")
+ .addArguments(
+ Expression.newBuilder()
+ .setColumnIdentifier(
+ ColumnIdentifier.newBuilder().setColumnName("Span.duration_millis")));
+ builder.addAggregation(Expression.newBuilder().setFunction(avg));
+
+ Filter startTimeFilter =
+ createTimeFilter("Span.start_time_millis", Operator.GT, 1570658506605L);
+ Filter endTimeFilter = createTimeFilter("Span.end_time_millis", Operator.LT, 1570744906673L);
+
+ Filter andFilter =
+ Filter.newBuilder()
+ .setOperator(Operator.AND)
+ .addChildFilter(startTimeFilter)
+ .addChildFilter(endTimeFilter)
+ .build();
+ builder.setFilter(andFilter);
+
+ builder.addGroupBy(
+ Expression.newBuilder()
+ .setColumnIdentifier(
+ ColumnIdentifier.newBuilder().setColumnName("Span.serviceName").build()));
+ builder.addGroupBy(
+ Expression.newBuilder()
+ .setColumnIdentifier(ColumnIdentifier.newBuilder().setColumnName("Span.displaySpanName").build()));
+ return builder.build();
+ }
+
+ private QueryRequest buildMultipleGroupByMultipleAggAndOrderByQuery() {
+ Builder builder = QueryRequest.newBuilder();
+ builder.addAggregation(QueryRequestUtil.createCountByColumnSelection("Span.id"));
+ Function.Builder avg =
+ Function.newBuilder()
+ .setFunctionName("AVG")
+ .addArguments(
+ Expression.newBuilder()
+ .setColumnIdentifier(
+ ColumnIdentifier.newBuilder().setColumnName("Span.duration_millis")));
+ builder.addAggregation(Expression.newBuilder().setFunction(avg));
+
+ Filter startTimeFilter =
+ createTimeFilter("Span.start_time_millis", Operator.GT, 1570658506605L);
+ Filter endTimeFilter = createTimeFilter("Span.end_time_millis", Operator.LT, 1570744906673L);
+
+ Filter andFilter =
+ Filter.newBuilder()
+ .setOperator(Operator.AND)
+ .addChildFilter(startTimeFilter)
+ .addChildFilter(endTimeFilter)
+ .build();
+ builder.setFilter(andFilter);
+
+ builder.addGroupBy(
+ Expression.newBuilder()
+ .setColumnIdentifier(
+ ColumnIdentifier.newBuilder().setColumnName("Span.serviceName").build()));
+ builder.addGroupBy(
+ Expression.newBuilder()
+ .setColumnIdentifier(ColumnIdentifier.newBuilder().setColumnName("Span.displaySpanName").build()));
+
+ builder.addOrderBy(
+ createOrderByExpression(createColumnExpression("Span.serviceName"), SortOrder.ASC));
+ builder.addOrderBy(
+ createOrderByExpression(
+ createFunctionExpression("AVG", "Span.duration_millis", "avg_duration_millis"),
+ SortOrder.DESC));
+ builder.addOrderBy(
+ createOrderByExpression(
+ createFunctionExpression("COUNT", "Span.id", "count_span_id"), SortOrder.DESC));
+ return builder.build();
+ }
+
+ private QueryRequest buildSimpleQueryWithFilter(Filter filter) {
+ Builder builder = QueryRequest.newBuilder();
+ ColumnIdentifier columnName = ColumnIdentifier.newBuilder().setColumnName("Span.id").build();
+ builder.addSelection(Expression.newBuilder().setColumnIdentifier(columnName).build());
+
+ builder.setFilter(filter);
+
+ return builder.build();
+ }
+
+ private void assertPQLQuery(QueryRequest queryRequest, String expectedQuery) {
+ QueryRequestToPinotSQLConverter converter = new QueryRequestToPinotSQLConverter(viewDefinition);
+ Entry statementToParam =
+ converter.toSQL(queryContext, queryRequest, createSelectionsFromQueryRequest(queryRequest));
+ PinotClient pinotClient = new PinotClient(connection);
+ pinotClient.executeQuery(statementToParam.getKey(), statementToParam.getValue());
+ ArgumentCaptor statementCaptor = ArgumentCaptor.forClass(Request.class);
+ Mockito.verify(connection, Mockito.times(1)).execute(statementCaptor.capture());
+ Assertions.assertEquals(
+ expectedQuery.toLowerCase(), statementCaptor.getValue().getQuery().toLowerCase());
+ }
+
+ // This method will put the selections in a LinkedHashSet in the order that RequestAnalyzer does:
+ // group bys,
+ // selections then aggregations.
+ private LinkedHashSet createSelectionsFromQueryRequest(QueryRequest queryRequest) {
+ LinkedHashSet selections = new LinkedHashSet<>();
+
+ selections.addAll(queryRequest.getGroupByList());
+ selections.addAll(queryRequest.getSelectionList());
+ selections.addAll(queryRequest.getAggregationList());
+
+ return selections;
+ }
+}
diff --git a/query-service-impl/src/test/java/org/hypertrace/core/query/service/pinot/RequestAnalyzerTest.java b/query-service-impl/src/test/java/org/hypertrace/core/query/service/pinot/RequestAnalyzerTest.java
new file mode 100644
index 00000000..0b75ea7d
--- /dev/null
+++ b/query-service-impl/src/test/java/org/hypertrace/core/query/service/pinot/RequestAnalyzerTest.java
@@ -0,0 +1,315 @@
+package org.hypertrace.core.query.service.pinot;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+
+import com.google.common.collect.ImmutableSet;
+import java.util.Iterator;
+import java.util.Set;
+import org.hypertrace.core.query.service.RequestAnalyzer;
+import org.hypertrace.core.query.service.api.ColumnIdentifier;
+import org.hypertrace.core.query.service.api.Expression;
+import org.hypertrace.core.query.service.api.Filter;
+import org.hypertrace.core.query.service.api.Function;
+import org.hypertrace.core.query.service.api.LiteralConstant;
+import org.hypertrace.core.query.service.api.Operator;
+import org.hypertrace.core.query.service.api.QueryRequest;
+import org.hypertrace.core.query.service.api.QueryRequest.Builder;
+import org.hypertrace.core.query.service.api.ResultSetMetadata;
+import org.hypertrace.core.query.service.api.Value;
+import org.hypertrace.core.query.service.util.QueryRequestUtil;
+import org.junit.jupiter.api.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class RequestAnalyzerTest {
+ private static final Logger LOGGER = LoggerFactory.getLogger(RequestAnalyzerTest.class);
+
+ @Test
+ public void testRepeatedColumns() {
+ Builder builder = QueryRequest.newBuilder();
+ // agg function with alias
+ Function count =
+ Function.newBuilder()
+ .setFunctionName("Count")
+ .setAlias("myCountAlias")
+ .addArguments(
+ Expression.newBuilder()
+ .setColumnIdentifier(ColumnIdentifier.newBuilder().setColumnName("Trace.id")))
+ .build();
+ builder.addAggregation(Expression.newBuilder().setFunction(count));
+
+ // agg function without alias
+ Function minFunction =
+ Function.newBuilder()
+ .setFunctionName("MIN")
+ .addArguments(
+ Expression.newBuilder()
+ .setColumnIdentifier(
+ ColumnIdentifier.newBuilder().setColumnName("Trace.duration")))
+ .build();
+ builder.addAggregation(Expression.newBuilder().setFunction(minFunction));
+
+ builder.addSelection(
+ Expression.newBuilder()
+ .setColumnIdentifier(
+ ColumnIdentifier.newBuilder().setColumnName("Trace.transaction_name")));
+
+ builder.addSelection(
+ Expression.newBuilder()
+ .setColumnIdentifier(
+ ColumnIdentifier.newBuilder().setColumnName("Trace.transaction_name")));
+
+ builder.addGroupBy(
+ Expression.newBuilder()
+ .setColumnIdentifier(
+ ColumnIdentifier.newBuilder().setColumnName("Trace.transaction_name")));
+ QueryRequest queryRequest = builder.build();
+
+ RequestAnalyzer analyzer = new RequestAnalyzer(queryRequest);
+ analyzer.analyze();
+ ResultSetMetadata resultSetMetadata = analyzer.getResultSetMetadata();
+ System.out.println("resultSetMetadata = " + resultSetMetadata);
+
+ assertNotNull(resultSetMetadata);
+ assertEquals(3, resultSetMetadata.getColumnMetadataCount());
+ assertEquals("Trace.transaction_name", resultSetMetadata.getColumnMetadata(0).getColumnName());
+ assertEquals("myCountAlias", resultSetMetadata.getColumnMetadata(1).getColumnName());
+ assertEquals("MIN", resultSetMetadata.getColumnMetadata(2).getColumnName());
+
+ // Selections should correspond in size and order to the
+ // resultSetMetadata.getColumnMetadataList()
+ assertEquals(3, analyzer.getAllSelections().size());
+ Iterator selectionsIterator = analyzer.getAllSelections().iterator();
+ assertEquals(
+ Expression.newBuilder()
+ .setColumnIdentifier(
+ ColumnIdentifier.newBuilder().setColumnName("Trace.transaction_name"))
+ .build(),
+ selectionsIterator.next());
+ assertEquals(Expression.newBuilder().setFunction(count).build(), selectionsIterator.next());
+ assertEquals(
+ Expression.newBuilder().setFunction(minFunction).build(), selectionsIterator.next());
+ }
+
+ @Test
+ public void testFiltersWithLiterals() {
+ Builder builder = QueryRequest.newBuilder();
+ builder.addSelection(
+ Expression.newBuilder()
+ .setColumnIdentifier(
+ ColumnIdentifier.newBuilder().setColumnName("Trace.transaction_name")));
+ Expression expression =
+ Expression.newBuilder()
+ .setLiteral(LiteralConstant.newBuilder().setValue(Value.newBuilder().setString("test")))
+ .build();
+ builder.setFilter(
+ Filter.newBuilder()
+ .setLhs(
+ Expression.newBuilder()
+ .setColumnIdentifier(
+ ColumnIdentifier.newBuilder().setColumnName("Trace.transaction_name")))
+ .setRhs(expression)
+ .setOperator(Operator.EQ));
+
+ QueryRequest queryRequest = builder.build();
+
+ RequestAnalyzer analyzer = new RequestAnalyzer(queryRequest);
+ analyzer.analyze();
+ ResultSetMetadata resultSetMetadata = analyzer.getResultSetMetadata();
+ LOGGER.info("resultSetMetadata = " + resultSetMetadata);
+
+ assertNotNull(resultSetMetadata);
+ assertEquals(1, resultSetMetadata.getColumnMetadataCount());
+ assertEquals("Trace.transaction_name", resultSetMetadata.getColumnMetadata(0).getColumnName());
+
+ // Selections should correspond in size and order to the
+ // resultSetMetadata.getColumnMetadataList()
+ assertEquals(1, analyzer.getAllSelections().size());
+ Iterator selectionsIterator = analyzer.getAllSelections().iterator();
+ assertEquals(
+ Expression.newBuilder()
+ .setColumnIdentifier(
+ ColumnIdentifier.newBuilder().setColumnName("Trace.transaction_name"))
+ .build(),
+ selectionsIterator.next());
+ }
+
+ @Test
+ public void testReferencedColumns() {
+ Builder builder = QueryRequest.newBuilder();
+ builder.addSelection(
+ Expression.newBuilder()
+ .setColumnIdentifier(
+ ColumnIdentifier.newBuilder().setColumnName("Trace.transaction_name")));
+ Expression expression =
+ Expression.newBuilder()
+ .setLiteral(LiteralConstant.newBuilder().setValue(Value.newBuilder().setString("test")))
+ .build();
+ Filter.Builder idFilter =
+ Filter.newBuilder()
+ .setLhs(
+ Expression.newBuilder()
+ .setColumnIdentifier(ColumnIdentifier.newBuilder().setColumnName("Trace.id")))
+ .setRhs(expression)
+ .setOperator(Operator.EQ);
+ Filter startTimeFilter =
+ QueryRequestUtil.createTimeFilter(
+ "Trace.start_time_millis",
+ Operator.GT,
+ System.currentTimeMillis() - 1000 * 60 * 60 * 24);
+ Filter endTimeFilter =
+ QueryRequestUtil.createTimeFilter(
+ "Trace.end_time_millis", Operator.LT, System.currentTimeMillis());
+
+ Filter andFilter =
+ Filter.newBuilder()
+ .setOperator(Operator.AND)
+ .addChildFilter(startTimeFilter)
+ .addChildFilter(endTimeFilter)
+ .addChildFilter(idFilter)
+ .build();
+ builder.setFilter(andFilter);
+
+ QueryRequest queryRequest = builder.build();
+
+ RequestAnalyzer analyzer = new RequestAnalyzer(queryRequest);
+ analyzer.analyze();
+
+ Set selectedColumns = analyzer.getSelectedColumns();
+ assertNotNull(selectedColumns);
+ assertEquals(1, selectedColumns.size());
+ assertEquals("Trace.transaction_name", selectedColumns.iterator().next());
+
+ Set referencedColumns = analyzer.getReferencedColumns();
+ assertNotNull(referencedColumns);
+ assertEquals(4, referencedColumns.size());
+ assertEquals(
+ ImmutableSet.of(
+ "Trace.transaction_name",
+ "Trace.id",
+ "Trace.start_time_millis",
+ "Trace.end_time_millis"),
+ referencedColumns);
+
+ ResultSetMetadata resultSetMetadata = analyzer.getResultSetMetadata();
+ assertNotNull(resultSetMetadata);
+ assertEquals(1, resultSetMetadata.getColumnMetadataCount());
+ assertEquals("Trace.transaction_name", resultSetMetadata.getColumnMetadata(0).getColumnName());
+
+ // Selections should correspond in size and order to the
+ // resultSetMetadata.getColumnMetadataList()
+ assertEquals(1, analyzer.getAllSelections().size());
+ Iterator selectionsIterator = analyzer.getAllSelections().iterator();
+ assertEquals(
+ Expression.newBuilder()
+ .setColumnIdentifier(
+ ColumnIdentifier.newBuilder().setColumnName("Trace.transaction_name"))
+ .build(),
+ selectionsIterator.next());
+ }
+
+ @Test
+ public void testSelectionsLinkedHashSet() {
+ Builder builder = QueryRequest.newBuilder();
+ // agg function with alias
+ Function count =
+ Function.newBuilder()
+ .setFunctionName("Count")
+ .setAlias("myCountAlias")
+ .addArguments(
+ Expression.newBuilder()
+ .setColumnIdentifier(ColumnIdentifier.newBuilder().setColumnName("Trace.id")))
+ .build();
+ builder.addAggregation(Expression.newBuilder().setFunction(count));
+
+ // agg function without alias
+ Function minFunction =
+ Function.newBuilder()
+ .setFunctionName("MIN")
+ .addArguments(
+ Expression.newBuilder()
+ .setColumnIdentifier(
+ ColumnIdentifier.newBuilder().setColumnName("Trace.duration")))
+ .build();
+ builder.addAggregation(Expression.newBuilder().setFunction(minFunction));
+
+ // Add some selections
+ builder.addSelection(
+ Expression.newBuilder()
+ .setColumnIdentifier(
+ ColumnIdentifier.newBuilder().setColumnName("Trace.transaction_name")));
+ builder.addSelection(
+ Expression.newBuilder()
+ .setColumnIdentifier(ColumnIdentifier.newBuilder().setColumnName("Trace.id")));
+
+ // An function added into selections list is treated as a selection
+ Function avg =
+ Function.newBuilder()
+ .setFunctionName("AVG")
+ .setAlias("myAvgAlias")
+ .addArguments(
+ Expression.newBuilder()
+ .setColumnIdentifier(
+ ColumnIdentifier.newBuilder().setColumnName("Trace.duration")))
+ .build();
+ builder.addSelection(Expression.newBuilder().setFunction(avg));
+
+ // Add some group bys
+ builder.addGroupBy(
+ Expression.newBuilder()
+ .setColumnIdentifier(ColumnIdentifier.newBuilder().setColumnName("Trace.api_name")));
+ builder.addGroupBy(
+ Expression.newBuilder()
+ .setColumnIdentifier(
+ ColumnIdentifier.newBuilder().setColumnName("Trace.service_name")));
+ QueryRequest queryRequest = builder.build();
+
+ RequestAnalyzer analyzer = new RequestAnalyzer(queryRequest);
+ analyzer.analyze();
+
+ // The order in resultSetMetadata.getColumnMetadataList() and selections is group bys,
+ // selections then aggregations
+ ResultSetMetadata resultSetMetadata = analyzer.getResultSetMetadata();
+
+ assertNotNull(resultSetMetadata);
+ assertEquals(7, resultSetMetadata.getColumnMetadataCount());
+ assertEquals("Trace.api_name", resultSetMetadata.getColumnMetadata(0).getColumnName());
+ assertEquals("Trace.service_name", resultSetMetadata.getColumnMetadata(1).getColumnName());
+ assertEquals("Trace.transaction_name", resultSetMetadata.getColumnMetadata(2).getColumnName());
+ assertEquals("Trace.id", resultSetMetadata.getColumnMetadata(3).getColumnName());
+ assertEquals("myAvgAlias", resultSetMetadata.getColumnMetadata(4).getColumnName());
+ assertEquals("myCountAlias", resultSetMetadata.getColumnMetadata(5).getColumnName());
+ assertEquals("MIN", resultSetMetadata.getColumnMetadata(6).getColumnName());
+
+ // Selections should correspond in size and order to the
+ // resultSetMetadata.getColumnMetadataList()
+ assertEquals(7, analyzer.getAllSelections().size());
+ Iterator selectionsIterator = analyzer.getAllSelections().iterator();
+ assertEquals(
+ Expression.newBuilder()
+ .setColumnIdentifier(ColumnIdentifier.newBuilder().setColumnName("Trace.api_name"))
+ .build(),
+ selectionsIterator.next());
+ assertEquals(
+ Expression.newBuilder()
+ .setColumnIdentifier(ColumnIdentifier.newBuilder().setColumnName("Trace.service_name"))
+ .build(),
+ selectionsIterator.next());
+ assertEquals(
+ Expression.newBuilder()
+ .setColumnIdentifier(
+ ColumnIdentifier.newBuilder().setColumnName("Trace.transaction_name"))
+ .build(),
+ selectionsIterator.next());
+ assertEquals(
+ Expression.newBuilder()
+ .setColumnIdentifier(ColumnIdentifier.newBuilder().setColumnName("Trace.id"))
+ .build(),
+ selectionsIterator.next());
+ assertEquals(Expression.newBuilder().setFunction(avg).build(), selectionsIterator.next());
+ assertEquals(Expression.newBuilder().setFunction(count).build(), selectionsIterator.next());
+ assertEquals(
+ Expression.newBuilder().setFunction(minFunction).build(), selectionsIterator.next());
+ }
+}
diff --git a/query-service-impl/src/test/resources/application.conf b/query-service-impl/src/test/resources/application.conf
new file mode 100644
index 00000000..814ac5bc
--- /dev/null
+++ b/query-service-impl/src/test/resources/application.conf
@@ -0,0 +1,66 @@
+service.name = "query-service"
+service.port = 8090
+service.admin.port = 8091
+service.config = {
+ tenantColumnName = "tenant_id"
+ clients = [
+ {
+ type = broker
+ connectionString = "pinotCluster0:8099"
+ }
+ {
+ type = zookeeper
+ connectionString = "pinotCluster1:2181"
+ }
+ ]
+ queryRequestHandlersConfig = [
+ {
+ name = piontCluster0
+ type = pinot
+ clientConfig = broker
+ requestHandlerInfo = {
+ viewDefinition = {
+ viewName = RawTraceView
+ mapFields = ["tags"]
+ fieldMap = {
+ "Trace.id": "trace_id",
+ "Trace.attributes.services": "services",
+ "Trace.start_time_millis": "start_time_millis",
+ "Trace.end_time_millis": "end_time_millis",
+ "Trace.duration_millis": "duration_millis",
+ "Trace.metrics.num_services": "num_services",
+ "Trace.metrics.num_spans": "num_spans",
+ "Trace.attributes": "attributes",
+ "Trace.metrics": "metrics"
+ "Trace.tags": "tags"
+ }
+ }
+ }
+ }
+ {
+ name = span-event-view-handler
+ type = pinot
+ clientConfig = zookeeper
+ requestHandlerInfo = {
+ viewDefinition = {
+ viewName = spanEventView
+ mapFields = ["tags"]
+ fieldMap = {
+ "EVENT.serviceName": "service_name",
+ "EVENT.id": "span_id",
+ "EVENT.startTime": "start_time_millis",
+ "EVENT.endTime": "end_time_millis",
+ "EVENT.traceId": "trace_id",
+ "EVENT.parentSpanId": "parent_span_id",
+ "EVENT.type": "span_kind",
+ "EVENT.statusCode": "status_code",
+ "EVENT.spanTags": "tags"
+ "EVENT.spanRequestUrl": "request_url",
+ "EVENT.duration": "duration_millis",
+ "EVENT.displaySpanName": "display_span_name"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/query-service-impl/src/test/resources/log4j2.properties b/query-service-impl/src/test/resources/log4j2.properties
new file mode 100644
index 00000000..62c371c3
--- /dev/null
+++ b/query-service-impl/src/test/resources/log4j2.properties
@@ -0,0 +1,8 @@
+status=error
+name=PropertiesConfig
+appender.console.type=Console
+appender.console.name=STDOUT
+appender.console.layout.type=PatternLayout
+appender.console.layout.pattern=%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %c{1.} - %msg%n
+rootLogger.level=INFO
+rootLogger.appenderRef.stdout.ref=STDOUT
diff --git a/query-service/build.gradle.kts b/query-service/build.gradle.kts
new file mode 100644
index 00000000..f960e23f
--- /dev/null
+++ b/query-service/build.gradle.kts
@@ -0,0 +1,27 @@
+plugins {
+ java
+ application
+ id("org.hypertrace.docker-java-application-plugin") version "0.2.2"
+ id("org.hypertrace.docker-publish-plugin") version "0.2.2"
+}
+
+dependencies {
+ implementation(project(":query-service-impl"))
+ implementation("org.hypertrace.core.grpcutils:grpc-server-utils:0.1.0")
+ implementation("org.hypertrace.core.serviceframework:platform-service-framework:0.1.2")
+ implementation("io.grpc:grpc-netty:1.30.2")
+
+ implementation("org.slf4j:slf4j-api:1.7.30")
+ runtimeOnly("org.apache.logging.log4j:log4j-slf4j-impl:2.13.3")
+
+ implementation("com.typesafe:config:1.3.2")
+}
+
+application {
+ mainClassName = "org.hypertrace.core.serviceframework.PlatformServiceLauncher"
+}
+
+// Config for gw run to be able to run this locally. Just execute gw run here on Intellij or on the console.
+tasks.run {
+ jvmArgs = listOf("-Dbootstrap.config.uri=file:${projectDir}/src/main/resources/configs", "-Dservice.name=${project.name}")
+}
diff --git a/query-service/src/main/java/org/hypertrace/core/query/service/QueryServiceStarter.java b/query-service/src/main/java/org/hypertrace/core/query/service/QueryServiceStarter.java
new file mode 100644
index 00000000..fbebfb8d
--- /dev/null
+++ b/query-service/src/main/java/org/hypertrace/core/query/service/QueryServiceStarter.java
@@ -0,0 +1,83 @@
+package org.hypertrace.core.query.service;
+
+import io.grpc.Server;
+import io.grpc.ServerBuilder;
+import java.io.IOException;
+import org.hypertrace.core.grpcutils.server.InterceptorUtil;
+import org.hypertrace.core.serviceframework.PlatformService;
+import org.hypertrace.core.serviceframework.config.ConfigClient;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class QueryServiceStarter extends PlatformService {
+ private static final String SERVICE_NAME_CONFIG = "service.name";
+ private static final String SERVICE_PORT_CONFIG = "service.port";
+ private static final String QUERY_SERVICE_CONFIG = "service.config";
+ private static final Logger LOG = LoggerFactory.getLogger(QueryServiceStarter.class);
+ private String serviceName;
+ private int serverPort;
+ private Server queryServiceServer;
+
+ public QueryServiceStarter(ConfigClient configClient) {
+ super(configClient);
+ }
+
+ @Override
+ protected void doInit() {
+ this.serviceName = getAppConfig().getString(SERVICE_NAME_CONFIG);
+ this.serverPort = getAppConfig().getInt(SERVICE_PORT_CONFIG);
+
+ final QueryServiceImplConfig queryServiceImplConfig =
+ QueryServiceImplConfig.parse(getAppConfig().getConfig(QUERY_SERVICE_CONFIG));
+
+ LOG.info("Creating the Query Service Server on port {}", serverPort);
+
+ queryServiceServer =
+ ServerBuilder.forPort(serverPort)
+ .addService(
+ InterceptorUtil.wrapInterceptors(new QueryServiceImpl(queryServiceImplConfig)))
+ .build();
+ }
+
+ @Override
+ protected void doStart() {
+ LOG.info("Attempting to start Query Service on port {}", serverPort);
+
+ try {
+ queryServiceServer.start();
+ LOG.info("Started Query Service on port {}", serverPort);
+ } catch (IOException e) {
+ LOG.error("Unable to start the Query Service");
+ throw new RuntimeException(e);
+ }
+
+ try {
+ queryServiceServer.awaitTermination();
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ throw new RuntimeException(e);
+ }
+ }
+
+ @Override
+ protected void doStop() {
+ LOG.info("Shutting down service: {}", serviceName);
+ while (!queryServiceServer.isShutdown()) {
+ queryServiceServer.shutdown();
+ try {
+ Thread.sleep(100);
+ } catch (InterruptedException ignore) {
+ }
+ }
+ }
+
+ @Override
+ public boolean healthCheck() {
+ return true;
+ }
+
+ @Override
+ public String getServiceName() {
+ return serviceName;
+ }
+}
diff --git a/query-service/src/main/resources/banner.txt b/query-service/src/main/resources/banner.txt
new file mode 100644
index 00000000..d1506ae2
--- /dev/null
+++ b/query-service/src/main/resources/banner.txt
@@ -0,0 +1,6 @@
+================================================================================
+
+ Query
+
+================================================================================
+
diff --git a/query-service/src/main/resources/configs/common/application.conf b/query-service/src/main/resources/configs/common/application.conf
new file mode 100644
index 00000000..82f29e18
--- /dev/null
+++ b/query-service/src/main/resources/configs/common/application.conf
@@ -0,0 +1,61 @@
+main.class = org.hypertrace.core.query.service.QueryServiceStarter
+service.name = query-service
+service.port = 8090
+service.admin.port = 8091
+service.config = {
+ clients = [
+ {
+ type = zookeeper
+ connectionString = "localhost:2181/pinot/org-views"
+ }
+ ]
+ queryRequestHandlersConfig = [
+ # Update runtime configuration in helm/values.yaml. Only local test/debug needs the following
+ {
+ name = trace-view-handler
+ type = pinot
+ clientConfig = zookeeper
+ requestHandlerInfo = {
+ viewDefinition = {
+ viewName = rawTraceView
+ fieldMap = {
+ "TRACE.id": "trace_id",
+ "TRACE.startTime": "start_time_millis",
+ "TRACE.endTime": "end_time_millis",
+ "TRACE.duration": "duration_millis",
+ "TRACE.numServices": "num_services",
+ "TRACE.numSpans": "num_spans"
+ }
+ }
+ }
+ }
+ {
+ name = span-event-view-handler
+ type = pinot
+ clientConfig = zookeeper
+ requestHandlerInfo = {
+ viewDefinition = {
+ viewName = spanEventView
+ mapFields = ["tags"]
+ fieldMap = {
+ "EVENT.serviceName": "service_name",
+ "EVENT.id": "span_id",
+ "EVENT.startTime": "start_time_millis",
+ "EVENT.endTime": "end_time_millis",
+ "EVENT.traceId": "trace_id",
+ "EVENT.parentSpanId": "parent_span_id",
+ "EVENT.type": "span_kind",
+ "EVENT.statusCode": "status_code",
+ "EVENT.spanTags": "tags"
+ "EVENT.spanRequestUrl": "request_url",
+ "EVENT.duration": "duration_millis",
+ "EVENT.displaySpanName": "display_span_name",
+ }
+ }
+ }
+ }
+ ]
+}
+
+metrics.reporter.names = ["prometheus"]
+metrics.reporter.console.reportInterval = 30
diff --git a/query-service/src/main/resources/log4j2.properties b/query-service/src/main/resources/log4j2.properties
new file mode 100644
index 00000000..d91bc7bf
--- /dev/null
+++ b/query-service/src/main/resources/log4j2.properties
@@ -0,0 +1,23 @@
+status=error
+name=PropertiesConfig
+appender.console.type=Console
+appender.console.name=STDOUT
+appender.console.layout.type=PatternLayout
+appender.console.layout.pattern=%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %c{1.} - %msg%n
+appender.rolling.type=RollingFile
+appender.rolling.name=ROLLING_FILE
+appender.rolling.fileName=${sys:service.name:-service}.log
+appender.rolling.filePattern=${sys:service.name:-service}-%d{MM-dd-yy-HH-mm-ss}-%i.log.gz
+appender.rolling.layout.type=PatternLayout
+appender.rolling.layout.pattern=%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %c{1.} - %msg%n
+appender.rolling.policies.type=Policies
+appender.rolling.policies.time.type=TimeBasedTriggeringPolicy
+appender.rolling.policies.time.interval=3600
+appender.rolling.policies.time.modulate=true
+appender.rolling.policies.size.type=SizeBasedTriggeringPolicy
+appender.rolling.policies.size.size=20MB
+appender.rolling.strategy.type=DefaultRolloverStrategy
+appender.rolling.strategy.max=5
+rootLogger.level=INFO
+rootLogger.appenderRef.stdout.ref=STDOUT
+rootLogger.appenderRef.rolling.ref=ROLLING_FILE
diff --git a/semantic-build-versioning.gradle b/semantic-build-versioning.gradle
new file mode 100644
index 00000000..9bc16767
--- /dev/null
+++ b/semantic-build-versioning.gradle
@@ -0,0 +1,11 @@
+// Follows https://www.conventionalcommits.org/en/v1.0.0/#summary with one change: any commit is treated as a release,
+// patch being the default if major or minor is not detected.
+
+autobump {
+ // match any message starting with a type/scope suffixed with !, or with a line starting with "BREAKING CHANGE:"
+ majorPattern = ~/(?m)(\A[^:]+(?<=!): |^BREAKING CHANGE:)/
+ // match any commit message starting with "feat: " or "feat(any scope): "
+ minorPattern = ~/^feat(\([^)]+\))?: /
+ newPreReleasePattern = null // Not used - no prereleases
+ promoteToReleasePattern = null // Not used - every merge is a release
+}
\ No newline at end of file
diff --git a/settings.gradle.kts b/settings.gradle.kts
new file mode 100644
index 00000000..cce8556b
--- /dev/null
+++ b/settings.gradle.kts
@@ -0,0 +1,18 @@
+rootProject.name = "query-service"
+
+pluginManagement {
+ repositories {
+ mavenLocal()
+ gradlePluginPortal()
+ maven("https://dl.bintray.com/hypertrace/maven")
+ }
+}
+
+plugins {
+ id("org.hypertrace.version-settings") version "0.1.1"
+}
+
+include(":query-service-api")
+include(":query-service-client")
+include(":query-service-impl")
+include(":query-service")