diff --git a/.github/workflows/sql-jdbc-release-workflow.yml b/.github/workflows/sql-jdbc-release-workflow.yml index dba1f7617a..b70e590484 100644 --- a/.github/workflows/sql-jdbc-release-workflow.yml +++ b/.github/workflows/sql-jdbc-release-workflow.yml @@ -9,7 +9,7 @@ jobs: Release-SQL-JDBC: strategy: matrix: - java: [14] + java: [10] name: Build and Release JDBC Plugin runs-on: ubuntu-latest diff --git a/.github/workflows/sql-odbc-rename-and-release-workflow.yml b/.github/workflows/sql-odbc-rename-and-release-workflow.yml new file mode 100644 index 0000000000..3eb440ad61 --- /dev/null +++ b/.github/workflows/sql-odbc-rename-and-release-workflow.yml @@ -0,0 +1,54 @@ +name: Rename and release ODBC + +# This workflow will rename previous artifacts of odbc and upload to s3, triggered by tag "rename*" + +on: + push: + tags: + - rename* + +env: + OD_VERSION: 1.12.0.0 + +jobs: + upload-odbc: + runs-on: ubuntu-latest + + name: Upload ODBC to S3 + steps: + - name: Configure AWS CLI + uses: aws-actions/configure-aws-credentials@v1 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + aws-region: us-east-1 + + - name: Rename and upload for MacOS + run: | + mkdir macos + cd macos + aws s3 cp "s3://artifacts.opendistroforelasticsearch.amazon.com/downloads/elasticsearch-clients/opendistro-sql-odbc/mac/Open Distro for Elasticsearch SQL ODBC Driver 64-bit-1.11.0.0-Darwin.pkg" "Open Distro for Elasticsearch SQL ODBC Driver 64-bit-${{ env.OD_VERSION }}-Darwin.pkg" + mac_installer=`ls -1t *.pkg | grep "Open Distro for Elasticsearch SQL ODBC Driver" | head -1` + echo $mac_installer + aws s3 cp "$mac_installer" s3://artifacts.opendistroforelasticsearch.amazon.com/downloads/elasticsearch-clients/opendistro-sql-odbc/mac/ + cd .. + + - name: Rename and upload for win32 + run: | + mkdir win32 + cd win32 + aws s3 cp "s3://artifacts.opendistroforelasticsearch.amazon.com/downloads/elasticsearch-clients/opendistro-sql-odbc/windows/Open Distro for Elasticsearch SQL ODBC Driver 32-bit-1.11.0.0-Windows.msi" "Open Distro for Elasticsearch SQL ODBC Driver 32-bit-${{ env.OD_VERSION }}-Windows.msi" + windows_installer=`ls -1t *.msi | grep "Open Distro for Elasticsearch SQL ODBC Driver" | head -1` + echo $windows_installer + aws s3 cp "$windows_installer" s3://artifacts.opendistroforelasticsearch.amazon.com/downloads/elasticsearch-clients/opendistro-sql-odbc/windows/ + cd .. + + - name: Rename and upload for win64 + run: | + mkdir win64 + cd win64 + aws s3 cp "s3://artifacts.opendistroforelasticsearch.amazon.com/downloads/elasticsearch-clients/opendistro-sql-odbc/windows/Open Distro for Elasticsearch SQL ODBC Driver 64-bit-1.11.0.0-Windows.msi" "Open Distro for Elasticsearch SQL ODBC Driver 64-bit-${{ env.OD_VERSION }}-Windows.msi" + windows_installer=`ls -1t *.msi | grep "Open Distro for Elasticsearch SQL ODBC Driver" | head -1` + echo $windows_installer + aws s3 cp "$windows_installer" s3://artifacts.opendistroforelasticsearch.amazon.com/downloads/elasticsearch-clients/opendistro-sql-odbc/windows/ + diff --git a/.github/workflows/sql-workbench-release-workflow.yml b/.github/workflows/sql-workbench-release-workflow.yml index 081edceeec..ef1d6bdcd2 100644 --- a/.github/workflows/sql-workbench-release-workflow.yml +++ b/.github/workflows/sql-workbench-release-workflow.yml @@ -42,7 +42,6 @@ jobs: - name: Move Workbench to Plugins Dir run: | - mkdir kibana/plugins mv workbench kibana/plugins - name: Kibana Plugin Bootstrap diff --git a/docs/category.json b/docs/category.json index d740d515e2..92a6df428f 100644 --- a/docs/category.json +++ b/docs/category.json @@ -12,7 +12,8 @@ "experiment/ppl/cmd/search.rst", "experiment/ppl/cmd/sort.rst", "experiment/ppl/cmd/stats.rst", - "experiment/ppl/cmd/where.rst" + "experiment/ppl/cmd/where.rst", + "experiment/ppl/general/identifiers.rst" ], "sql_cli": [ "user/dql/expressions.rst", diff --git a/docs/experiment/ppl/general/datatypes.rst b/docs/experiment/ppl/general/datatypes.rst new file mode 100644 index 0000000000..4682d61452 --- /dev/null +++ b/docs/experiment/ppl/general/datatypes.rst @@ -0,0 +1,226 @@ + +========== +Data Types +========== + +.. rubric:: Table of contents + +.. contents:: + :local: + :depth: 2 + + +Overview +======== + +PPL Data Types +------------------- + +The PPL support the following data types. + ++---------------+ +| PPL Data Type | ++===============+ +| boolean | ++---------------+ +| byte | ++---------------+ +| short | ++---------------+ +| integer | ++---------------+ +| long | ++---------------+ +| float | ++---------------+ +| double | ++---------------+ +| string | ++---------------+ +| text | ++---------------+ +| timestamp | ++---------------+ +| datetime | ++---------------+ +| date | ++---------------+ +| time | ++---------------+ +| interval | ++---------------+ +| ip | ++---------------+ +| geo_point | ++---------------+ +| binary | ++---------------+ +| struct | ++---------------+ +| array | ++---------------+ + +Data Types Mapping +------------------ + +The table below list the mapping between Elasticsearch Data Type, PPL Data Type and SQL Type. + ++--------------------+---------------+-----------+ +| Elasticsearch Type | PPL Type | SQL Type | ++====================+===============+===========+ +| boolean | boolean | BOOLEAN | ++--------------------+---------------+-----------+ +| byte | byte | TINYINT | ++--------------------+---------------+-----------+ +| short | byte | SMALLINT | ++--------------------+---------------+-----------+ +| integer | integer | INTEGER | ++--------------------+---------------+-----------+ +| long | long | BIGINT | ++--------------------+---------------+-----------+ +| float | float | REAL | ++--------------------+---------------+-----------+ +| half_float | float | FLOAT | ++--------------------+---------------+-----------+ +| scaled_float | float | DOUBLE | ++--------------------+---------------+-----------+ +| double | double | DOUBLE | ++--------------------+---------------+-----------+ +| keyword | string | VARCHAR | ++--------------------+---------------+-----------+ +| text | text | VARCHAR | ++--------------------+---------------+-----------+ +| date | timestamp | TIMESTAMP | ++--------------------+---------------+-----------+ +| ip | ip | VARCHAR | ++--------------------+---------------+-----------+ +| date | timestamp | TIMESTAMP | ++--------------------+---------------+-----------+ +| binary | binary | VARBINARY | ++--------------------+---------------+-----------+ +| object | struct | STRUCT | ++--------------------+---------------+-----------+ +| nested | array | STRUCT | ++--------------------+---------------+-----------+ + +Notes: Not all the PPL Type has correspond Elasticsearch Type. e.g. data and time. To use function which required such data type, user should explict convert the data type. + + + +Numeric Data Types +================== + +TODO + + +Date and Time Data Types +======================== + +The date and time data types are the types that represent temporal values and PPL plugin supports types including DATE, TIME, DATETIME, TIMESTAMP and INTERVAL. By default, the Elasticsearch DSL uses date type as the only date and time related type, which has contained all information about an absolute time point. To integrate with PPL language, each of the types other than timestamp is holding part of temporal or timezone information, and the usage to explicitly clarify the date and time types is reflected in the datetime functions (see `Functions `_ for details), where some functions might have restrictions in the input argument type. + + +Date +---- + +Date represents the calendar date regardless of the time zone. A given date value represents a 24-hour period, or say a day, but this period varies in different timezones and might have flexible hours during Daylight Savings Time programs. Besides, the date type does not contain time information as well. The supported range is '1000-01-01' to '9999-12-31'. + ++------+--------------+------------------------------+ +| Type | Syntax | Range | ++======+==============+==============================+ +| Date | 'yyyy-MM-dd' | '0001-01-01' to '9999-12-31' | ++------+--------------+------------------------------+ + + +Time +---- + +Time represents the time on the clock or watch with no regard for which timezone it might be related with. Time type data does not have date information. + ++------+-----------------------+----------------------------------------+ +| Type | Syntax | Range | ++======+=======================+========================================+ +| Time | 'hh:mm:ss[.fraction]' | '00:00:00.000000' to '23:59:59.999999' | ++------+-----------------------+----------------------------------------+ + + +Datetime +-------- + +Datetime type is the combination of date and time. The conversion rule of date or time to datetime is described in `Conversion between date and time types`_. Datetime type does not contain timezone information. For an absolute time point that contains both date time and timezone information, see `Timestamp`_. + ++----------+----------------------------------+--------------------------------------------------------------+ +| Type | Syntax | Range | ++==========+==================================+==============================================================+ +| Datetime | 'yyyy-MM-dd hh:mm:ss[.fraction]' | '0001-01-01 00:00:00.000000' to '9999-12-31 23:59:59.999999' | ++----------+----------------------------------+--------------------------------------------------------------+ + + + +Timestamp +--------- + +A timestamp instance is an absolute instant independent of timezone or convention. For example, for a given point of time, if we set the timestamp of this time point into another timezone, the value should also be different accordingly. Besides, the storage of timestamp type is also different from the other types. The timestamp is converted from the current timezone to UTC for storage, and is converted back to the set timezone from UTC when retrieving. + ++-----------+----------------------------------+------------------------------------------------------------------+ +| Type | Syntax | Range | ++===========+==================================+==================================================================+ +| Timestamp | 'yyyy-MM-dd hh:mm:ss[.fraction]' | '0001-01-01 00:00:01.000000' UTC to '9999-12-31 23:59:59.999999' | ++-----------+----------------------------------+------------------------------------------------------------------+ + + +Interval +-------- + +Interval data type represents a temporal duration or a period. The syntax is as follows: + ++----------+--------------------+ +| Type | Syntax | ++==========+====================+ +| Interval | INTERVAL expr unit | ++----------+--------------------+ + +The expr is any expression that can be iterated to a quantity value eventually, see `Expressions `_ for details. The unit represents the unit for interpreting the quantity, including MICROSECOND, SECOND, MINUTE, HOUR, DAY, WEEK, MONTH, QUARTER and YEAR.The INTERVAL keyword and the unit specifier are not case sensitive. Note that there are two classes of intervals. Year-week intervals can store years, quarters, months and weeks. Day-time intervals can store days, hours, minutes, seconds and microseconds. Year-week intervals are comparable only with another year-week intervals. These two types of intervals can only comparable with the same type of themselves. + + +Conversion between date and time types +-------------------------------------- + +Basically the date and time types except interval can be converted to each other, but might suffer some alteration of the value or some information loss, for example extracting the time value from a datetime value, or convert a date value to a datetime value and so forth. Here lists the summary of the conversion rules that PPL plugin supports for each of the types: + +Conversion from DATE +>>>>>>>>>>>>>>>>>>>> + +- Since the date value does not have any time information, conversion to `Time`_ type is not useful, and will always return a zero time value '00:00:00'. + +- Conversion from date to datetime has a data fill-up due to the lack of time information, and it attaches the time '00:00:00' to the original date by default and forms a datetime instance. For example, the result to covert date '2020-08-17' to datetime type is datetime '2020-08-17 00:00:00'. + +- Conversion to timestamp is to alternate both the time value and the timezone information, and it attaches the zero time value '00:00:00' and the session timezone (UTC by default) to the date. For example, the result to covert date '2020-08-17' to datetime type with session timezone UTC is datetime '2020-08-17 00:00:00' UTC. + + +Conversion from TIME +>>>>>>>>>>>>>>>>>>>> + +- Time value cannot be converted to any other date and time types since it does not contain any date information, so it is not meaningful to give no date info to a date/datetime/timestamp instance. + + +Conversion from DATETIME +>>>>>>>>>>>>>>>>>>>>>>>> + +- Conversion from datetime to date is to extract the date part from the datetime value. For example, the result to convert datetime '2020-08-17 14:09:00' to date is date '2020-08-08'. + +- Conversion to time is to extract the time part from the datetime value. For example, the result to convert datetime '2020-08-17 14:09:00' to time is time '14:09:00'. + +- Since the datetime type does not contain timezone information, the conversion to timestamp needs to fill up the timezone part with the session timezone. For example, the result to convert datetime '2020-08-17 14:09:00' with system timezone of UTC, to timestamp is timestamp '2020-08-17 14:09:00' UTC. + + +Conversion from TIMESTAMP +>>>>>>>>>>>>>>>>>>>>>>>>> + +- Conversion from timestamp is much more straightforward. To convert it to date is to extract the date value, and conversion to time is to extract the time value. Conversion to datetime, it will extracts the datetime value and leave the timezone information over. For example, the result to convert datetime '2020-08-17 14:09:00' UTC to date is date '2020-08-17', to time is '14:09:00' and to datetime is datetime '2020-08-17 14:09:00'. + + +String Data Types +================= + +TODO + diff --git a/docs/experiment/ppl/general/identifiers.rst b/docs/experiment/ppl/general/identifiers.rst new file mode 100644 index 0000000000..f14a1c55b9 --- /dev/null +++ b/docs/experiment/ppl/general/identifiers.rst @@ -0,0 +1,93 @@ +=========== +Identifiers +=========== + +.. rubric:: Table of contents + +.. contents:: + :local: + :depth: 2 + + +Introduction +============ + +Identifiers are used for naming your database objects, such as index name, field name, alias etc. Basically there are two types of identifiers: regular identifiers and delimited identifiers. + + +Regular Identifiers +=================== + +Description +----------- + +A regular identifier is a string of characters that must start with ASCII letter (lower or upper case). The subsequent character can be a combination of letter, digit, underscore (``_``). It cannot be a reversed key word. And whitespace and other special characters are not allowed. + +Examples +-------- + +Here are examples for using index pattern directly without quotes:: + + od> source=accounts | fields account_number, firstname, lastname; + fetched rows / total rows = 4/4 + +------------------+-------------+------------+ + | account_number | firstname | lastname | + |------------------+-------------+------------| + | 1 | Amber | Duke | + | 6 | Hattie | Bond | + | 13 | Nanette | Bates | + | 18 | Dale | Adams | + +------------------+-------------+------------+ + + +Delimited Identifiers +===================== + +Description +----------- + +A delimited identifier is an identifier enclosed in back ticks `````. In this case, the identifier enclosed is not necessarily a regular identifier. In other words, it can contain any special character not allowed by regular identifier. For Elasticsearch, the following identifiers are supported extensionally: + +1. Identifiers prefixed by dot ``.``: this is called hidden index in Elasticsearch, for example ``.kibana``. +2. Identifiers prefixed by at sign ``@``: this is common for meta fields generated in Logstash ingestion. +3. Identifiers with ``-`` in the middle: this is mostly the case for index name with date information. +4. Identifiers with star ``*`` present: this is mostly an index pattern for wildcard match. + +Use Cases +--------- + +Here are typical examples of the use of delimited identifiers: + +1. Identifiers of reserved key word name +2. Identifiers with dot ``.`` present: similarly as ``-`` in index name to include date information, it is required to be quoted so parser can differentiate it from identifier with qualifiers. +3. Identifiers with other special character: Elasticsearch has its own rule which allows more special character, for example Unicode character is supported in index name. + +Examples +-------- + +Here are examples for quoting an index name by back ticks:: + + od> source=`acc*` | fields `account_number`; + fetched rows / total rows = 4/4 + +------------------+ + | account_number | + |------------------| + | 1 | + | 6 | + | 13 | + | 18 | + +------------------+ + + +Case Sensitivity +================ + +Description +----------- + +Identifiers are treated in case sensitive manner. So it must be exactly same as what is stored in Elasticsearch. + +Examples +-------- + +For example, if you run ``source=Accounts``, it will end up with an index not found exception from our plugin because the actual index name is under lower case. \ No newline at end of file diff --git a/docs/experiment/ppl/index.rst b/docs/experiment/ppl/index.rst index 45eda59a26..fa160352e0 100644 --- a/docs/experiment/ppl/index.rst +++ b/docs/experiment/ppl/index.rst @@ -61,3 +61,9 @@ The query start with search command and then flowing a set of command delimited * **Functions** - `PPL Functions <../../user/dql/functions.rst>`_ + +* **Language Structure** + + - `Identifiers `_ + + - `Data Types `_ diff --git a/integ-test/build.gradle b/integ-test/build.gradle index 2b1d75bd85..685bd0b360 100644 --- a/integ-test/build.gradle +++ b/integ-test/build.gradle @@ -128,9 +128,6 @@ task integTestWithNewEngine(type: RestIntegTestTask) { // Skip old semantic analyzer IT because analyzer in new engine has different behavior exclude 'com/amazon/opendistroforelasticsearch/sql/legacy/QueryAnalysisIT.class' - // Skip this IT to avoid breaking tests due to inconsistency in JDBC schema - exclude 'com/amazon/opendistroforelasticsearch/sql/legacy/AggregationExpressionIT.class' - // Skip this IT because all assertions are against explain output exclude 'com/amazon/opendistroforelasticsearch/sql/legacy/OrderIT.class' } diff --git a/integ-test/src/test/java/com/amazon/opendistroforelasticsearch/sql/legacy/QueryIT.java b/integ-test/src/test/java/com/amazon/opendistroforelasticsearch/sql/legacy/QueryIT.java index 5cec676835..1e634ff679 100644 --- a/integ-test/src/test/java/com/amazon/opendistroforelasticsearch/sql/legacy/QueryIT.java +++ b/integ-test/src/test/java/com/amazon/opendistroforelasticsearch/sql/legacy/QueryIT.java @@ -187,7 +187,6 @@ public void selectAllWithFieldAndGroupByReverseOrder() throws IOException { checkSelectAllAndFieldAggregationResponseSize(response, "age"); } - @Ignore("This failed because there is no alias field in schema of new engine default formatter") @Test public void selectFieldWithAliasAndGroupBy() { String response = diff --git a/integ-test/src/test/java/com/amazon/opendistroforelasticsearch/sql/sql/DateTimeFunctionIT.java b/integ-test/src/test/java/com/amazon/opendistroforelasticsearch/sql/sql/DateTimeFunctionIT.java index 1f69188598..3e603963c6 100644 --- a/integ-test/src/test/java/com/amazon/opendistroforelasticsearch/sql/sql/DateTimeFunctionIT.java +++ b/integ-test/src/test/java/com/amazon/opendistroforelasticsearch/sql/sql/DateTimeFunctionIT.java @@ -136,11 +136,11 @@ public void testDay() throws IOException { @Test public void testDayName() throws IOException { JSONObject result = executeQuery("select dayname(date('2020-09-16'))"); - verifySchema(result, schema("dayname(date('2020-09-16'))", null, "string")); + verifySchema(result, schema("dayname(date('2020-09-16'))", null, "keyword")); verifyDataRows(result, rows("Wednesday")); result = executeQuery("select dayname('2020-09-16')"); - verifySchema(result, schema("dayname('2020-09-16')", null, "string")); + verifySchema(result, schema("dayname('2020-09-16')", null, "keyword")); verifyDataRows(result, rows("Wednesday")); } @@ -256,11 +256,11 @@ public void testMonth() throws IOException { @Test public void testMonthName() throws IOException { JSONObject result = executeQuery("select monthname(date('2020-09-16'))"); - verifySchema(result, schema("monthname(date('2020-09-16'))", null, "string")); + verifySchema(result, schema("monthname(date('2020-09-16'))", null, "keyword")); verifyDataRows(result, rows("September")); result = executeQuery("select monthname('2020-09-16')"); - verifySchema(result, schema("monthname('2020-09-16')", null, "string")); + verifySchema(result, schema("monthname('2020-09-16')", null, "keyword")); verifyDataRows(result, rows("September")); } @@ -378,12 +378,12 @@ public void testWeek() throws IOException { void verifyDateFormat(String date, String type, String format, String formatted) throws IOException { String query = String.format("date_format(%s('%s'), '%s')", type, date, format); JSONObject result = executeQuery("select " + query); - verifySchema(result, schema(query, null, "string")); + verifySchema(result, schema(query, null, "keyword")); verifyDataRows(result, rows(formatted)); query = String.format("date_format('%s', '%s')", date, format); result = executeQuery("select " + query); - verifySchema(result, schema(query, null, "string")); + verifySchema(result, schema(query, null, "keyword")); verifyDataRows(result, rows(formatted)); } diff --git a/integ-test/src/test/java/com/amazon/opendistroforelasticsearch/sql/sql/JdbcFormatIT.java b/integ-test/src/test/java/com/amazon/opendistroforelasticsearch/sql/sql/JdbcFormatIT.java new file mode 100644 index 0000000000..51cf961ca5 --- /dev/null +++ b/integ-test/src/test/java/com/amazon/opendistroforelasticsearch/sql/sql/JdbcFormatIT.java @@ -0,0 +1,58 @@ +/* + * Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.sql.sql; + +import static com.amazon.opendistroforelasticsearch.sql.legacy.TestsConstants.TEST_INDEX_BANK; +import static com.amazon.opendistroforelasticsearch.sql.util.MatcherUtils.schema; +import static com.amazon.opendistroforelasticsearch.sql.util.MatcherUtils.verifySchema; + +import com.amazon.opendistroforelasticsearch.sql.legacy.SQLIntegTestCase; +import org.json.JSONObject; +import org.junit.jupiter.api.Test; + +public class JdbcFormatIT extends SQLIntegTestCase { + + @Override + protected void init() throws Exception { + loadIndex(Index.BANK); + } + + @Test + public void testSimpleDataTypesInSchema() { + JSONObject response = new JSONObject(executeQuery( + "SELECT account_number, address, age, birthdate, city, male, state " + + "FROM " + TEST_INDEX_BANK, "jdbc")); + + verifySchema(response, + schema("account_number", "long"), + schema("address", "text"), + schema("age", "integer"), + schema("birthdate", "timestamp"), + schema("city", "keyword"), + schema("male", "boolean"), + schema("state", "text")); + } + + @Test + public void testAliasInSchema() { + JSONObject response = new JSONObject(executeQuery( + "SELECT account_number AS acc FROM " + TEST_INDEX_BANK, "jdbc")); + + verifySchema(response, schema("acc", "acc", "long")); + } + +} diff --git a/integ-test/src/test/java/com/amazon/opendistroforelasticsearch/sql/sql/MathematicalFunctionIT.java b/integ-test/src/test/java/com/amazon/opendistroforelasticsearch/sql/sql/MathematicalFunctionIT.java index f24de89146..7c55cbcc8e 100644 --- a/integ-test/src/test/java/com/amazon/opendistroforelasticsearch/sql/sql/MathematicalFunctionIT.java +++ b/integ-test/src/test/java/com/amazon/opendistroforelasticsearch/sql/sql/MathematicalFunctionIT.java @@ -43,11 +43,11 @@ public void init() throws Exception { @Test public void testConv() throws IOException { JSONObject result = executeQuery("select conv(11, 10, 16)"); - verifySchema(result, schema("conv(11, 10, 16)", null, "string")); + verifySchema(result, schema("conv(11, 10, 16)", null, "keyword")); verifyDataRows(result, rows("b")); result = executeQuery("select conv(11, 16, 10)"); - verifySchema(result, schema("conv(11, 16, 10)", null, "string")); + verifySchema(result, schema("conv(11, 16, 10)", null, "keyword")); verifyDataRows(result, rows("17")); } diff --git a/integ-test/src/test/java/com/amazon/opendistroforelasticsearch/sql/sql/TextFunctionIT.java b/integ-test/src/test/java/com/amazon/opendistroforelasticsearch/sql/sql/TextFunctionIT.java index 1cd3285fe8..d972a10c36 100644 --- a/integ-test/src/test/java/com/amazon/opendistroforelasticsearch/sql/sql/TextFunctionIT.java +++ b/integ-test/src/test/java/com/amazon/opendistroforelasticsearch/sql/sql/TextFunctionIT.java @@ -60,59 +60,59 @@ public void testRegexp() throws IOException { @Test public void testSubstr() throws IOException { - verifyQuery("substr('hello', 2)", "string", "ello"); - verifyQuery("substr('hello', 2, 2)", "string", "el"); + verifyQuery("substr('hello', 2)", "keyword", "ello"); + verifyQuery("substr('hello', 2, 2)", "keyword", "el"); } @Test public void testSubstring() throws IOException { - verifyQuery("substring('hello', 2)", "string", "ello"); - verifyQuery("substring('hello', 2, 2)", "string", "el"); + verifyQuery("substring('hello', 2)", "keyword", "ello"); + verifyQuery("substring('hello', 2, 2)", "keyword", "el"); } @Test public void testUpper() throws IOException { - verifyQuery("upper('hello')", "string", "HELLO"); - verifyQuery("upper('HELLO')", "string", "HELLO"); + verifyQuery("upper('hello')", "keyword", "HELLO"); + verifyQuery("upper('HELLO')", "keyword", "HELLO"); } @Test public void testLower() throws IOException { - verifyQuery("lower('hello')", "string", "hello"); - verifyQuery("lower('HELLO')", "string", "hello"); + verifyQuery("lower('hello')", "keyword", "hello"); + verifyQuery("lower('HELLO')", "keyword", "hello"); } @Test public void testTrim() throws IOException { - verifyQuery("trim(' hello')", "string", "hello"); - verifyQuery("trim('hello ')", "string", "hello"); - verifyQuery("trim(' hello ')", "string", "hello"); + verifyQuery("trim(' hello')", "keyword", "hello"); + verifyQuery("trim('hello ')", "keyword", "hello"); + verifyQuery("trim(' hello ')", "keyword", "hello"); } @Test public void testRtrim() throws IOException { - verifyQuery("rtrim(' hello')", "string", " hello"); - verifyQuery("rtrim('hello ')", "string", "hello"); - verifyQuery("rtrim(' hello ')", "string", " hello"); + verifyQuery("rtrim(' hello')", "keyword", " hello"); + verifyQuery("rtrim('hello ')", "keyword", "hello"); + verifyQuery("rtrim(' hello ')", "keyword", " hello"); } @Test public void testLtrim() throws IOException { - verifyQuery("ltrim(' hello')", "string", "hello"); - verifyQuery("ltrim('hello ')", "string", "hello "); - verifyQuery("ltrim(' hello ')", "string", "hello "); + verifyQuery("ltrim(' hello')", "keyword", "hello"); + verifyQuery("ltrim('hello ')", "keyword", "hello "); + verifyQuery("ltrim(' hello ')", "keyword", "hello "); } @Test public void testConcat() throws IOException { - verifyQuery("concat('hello', 'world')", "string", "helloworld"); - verifyQuery("concat('', 'hello')", "string", "hello"); + verifyQuery("concat('hello', 'world')", "keyword", "helloworld"); + verifyQuery("concat('', 'hello')", "keyword", "hello"); } @Test public void testConcat_ws() throws IOException { - verifyQuery("concat_ws(',', 'hello', 'world')", "string", "hello,world"); - verifyQuery("concat_ws(',', '', 'hello')", "string", ",hello"); + verifyQuery("concat_ws(',', 'hello', 'world')", "keyword", "hello,world"); + verifyQuery("concat_ws(',', '', 'hello')", "keyword", ",hello"); } @Test diff --git a/legacy/src/main/java/com/amazon/opendistroforelasticsearch/sql/legacy/plugin/RestSQLQueryAction.java b/legacy/src/main/java/com/amazon/opendistroforelasticsearch/sql/legacy/plugin/RestSQLQueryAction.java index 935c9c849f..565fb69638 100644 --- a/legacy/src/main/java/com/amazon/opendistroforelasticsearch/sql/legacy/plugin/RestSQLQueryAction.java +++ b/legacy/src/main/java/com/amazon/opendistroforelasticsearch/sql/legacy/plugin/RestSQLQueryAction.java @@ -28,8 +28,8 @@ import com.amazon.opendistroforelasticsearch.sql.executor.ExecutionEngine.ExplainResponse; import com.amazon.opendistroforelasticsearch.sql.planner.physical.PhysicalPlan; import com.amazon.opendistroforelasticsearch.sql.protocol.response.QueryResult; +import com.amazon.opendistroforelasticsearch.sql.protocol.response.format.JdbcResponseFormatter; import com.amazon.opendistroforelasticsearch.sql.protocol.response.format.JsonResponseFormatter; -import com.amazon.opendistroforelasticsearch.sql.protocol.response.format.SimpleJsonResponseFormatter; import com.amazon.opendistroforelasticsearch.sql.sql.SQLService; import com.amazon.opendistroforelasticsearch.sql.sql.config.SQLServiceConfig; import com.amazon.opendistroforelasticsearch.sql.sql.domain.SQLQueryRequest; @@ -149,9 +149,8 @@ public void onFailure(Exception e) { }; } - // TODO: duplicate code here as in RestPPLQueryAction private ResponseListener createQueryResponseListener(RestChannel channel) { - SimpleJsonResponseFormatter formatter = new SimpleJsonResponseFormatter(PRETTY); + JdbcResponseFormatter formatter = new JdbcResponseFormatter(PRETTY); return new ResponseListener() { @Override public void onResponse(QueryResponse response) { diff --git a/protocol/build.gradle b/protocol/build.gradle index 54e6bb1b89..3bb6b58ff1 100644 --- a/protocol/build.gradle +++ b/protocol/build.gradle @@ -15,6 +15,7 @@ dependencies { compile group: 'com.fasterxml.jackson.core', name: 'jackson-databind', version: '2.10.4' implementation 'com.google.code.gson:gson:2.8.6' compile project(':core') + compile project(':elasticsearch') testImplementation('org.junit.jupiter:junit-jupiter:5.6.2') testCompile group: 'org.hamcrest', name: 'hamcrest-library', version: '2.1' @@ -49,9 +50,13 @@ jacocoTestCoverageVerification { violationRules { rule { limit { + counter = 'LINE' + minimum = 1.0 + } + limit { + counter = 'BRANCH' minimum = 1.0 } - } } afterEvaluate { diff --git a/protocol/src/main/java/com/amazon/opendistroforelasticsearch/sql/protocol/response/QueryResult.java b/protocol/src/main/java/com/amazon/opendistroforelasticsearch/sql/protocol/response/QueryResult.java index cc8b4d73bd..83a09366b9 100644 --- a/protocol/src/main/java/com/amazon/opendistroforelasticsearch/sql/protocol/response/QueryResult.java +++ b/protocol/src/main/java/com/amazon/opendistroforelasticsearch/sql/protocol/response/QueryResult.java @@ -23,6 +23,7 @@ import java.util.Iterator; import java.util.LinkedHashMap; import java.util.Map; +import lombok.Getter; import lombok.RequiredArgsConstructor; /** @@ -31,6 +32,8 @@ */ @RequiredArgsConstructor public class QueryResult implements Iterable { + + @Getter private final ExecutionEngine.Schema schema; /** diff --git a/protocol/src/main/java/com/amazon/opendistroforelasticsearch/sql/protocol/response/format/JdbcResponseFormatter.java b/protocol/src/main/java/com/amazon/opendistroforelasticsearch/sql/protocol/response/format/JdbcResponseFormatter.java new file mode 100644 index 0000000000..a7c798551d --- /dev/null +++ b/protocol/src/main/java/com/amazon/opendistroforelasticsearch/sql/protocol/response/format/JdbcResponseFormatter.java @@ -0,0 +1,145 @@ +/* + * Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.sql.protocol.response.format; + +import static com.amazon.opendistroforelasticsearch.sql.data.type.ExprCoreType.ARRAY; +import static com.amazon.opendistroforelasticsearch.sql.data.type.ExprCoreType.STRING; +import static com.amazon.opendistroforelasticsearch.sql.data.type.ExprCoreType.STRUCT; +import static com.amazon.opendistroforelasticsearch.sql.elasticsearch.data.type.ElasticsearchDataType.ES_TEXT; +import static com.amazon.opendistroforelasticsearch.sql.elasticsearch.data.type.ElasticsearchDataType.ES_TEXT_KEYWORD; + +import com.amazon.opendistroforelasticsearch.sql.common.antlr.SyntaxCheckException; +import com.amazon.opendistroforelasticsearch.sql.data.type.ExprType; +import com.amazon.opendistroforelasticsearch.sql.exception.QueryEngineException; +import com.amazon.opendistroforelasticsearch.sql.executor.ExecutionEngine.Schema; +import com.amazon.opendistroforelasticsearch.sql.protocol.response.QueryResult; +import java.util.List; +import lombok.Builder; +import lombok.Getter; +import lombok.RequiredArgsConstructor; +import lombok.Singular; + +/** + * JDBC formatter that formats both normal or error response exactly same way as legacy code to + * avoid impact on client side. The only difference is a new "version" that indicates the response + * was produced by new query engine. + */ +public class JdbcResponseFormatter extends JsonResponseFormatter { + + public JdbcResponseFormatter(Style style) { + super(style); + } + + @Override + protected Object buildJsonObject(QueryResult response) { + JdbcResponse.JdbcResponseBuilder json = JdbcResponse.builder(); + + // Fetch schema and data rows + response.getSchema().getColumns().forEach(col -> json.column(fetchColumn(col))); + json.datarows(fetchDataRows(response)); + + // Populate other fields + json.total(response.size()) + .size(response.size()) + .status(200); + + return json.build(); + } + + @Override + public String format(Throwable t) { + Error error = new Error( + t.getClass().getSimpleName(), + t.getMessage(), + t.getMessage()); + return jsonify(new JdbcErrorResponse(error, getStatus(t))); + } + + private Column fetchColumn(Schema.Column col) { + return new Column(col.getName(), col.getAlias(), convertToLegacyType(col.getExprType())); + } + + /** + * Convert type that exists in both legacy and new engine but has different name. + * Return old type name to avoid breaking impact on client-side. + */ + private String convertToLegacyType(ExprType type) { + if (type == ES_TEXT || type == ES_TEXT_KEYWORD) { + return "text"; + } else if (type == STRING) { + return "keyword"; + } else if (type == STRUCT) { + return "object"; + } else if (type == ARRAY) { + return "nested"; + } else { + return type.typeName().toLowerCase(); + } + } + + private Object[][] fetchDataRows(QueryResult response) { + Object[][] rows = new Object[response.size()][]; + int i = 0; + for (Object[] values : response) { + rows[i++] = values; + } + return rows; + } + + private int getStatus(Throwable t) { + return (t instanceof SyntaxCheckException + || t instanceof QueryEngineException) ? 400 : 503; + } + + /** + * org.json requires these inner data classes be public (and static) + */ + @Builder + @Getter + public static class JdbcResponse { + @Singular("column") + private final List schema; + private final Object[][] datarows; + private final long total; + private final long size; + private final int status; + } + + @RequiredArgsConstructor + @Getter + public static class Column { + private final String name; + private final String alias; + private final String type; + } + + @RequiredArgsConstructor + @Getter + public static class JdbcErrorResponse { + private final Error error; + private final int status; + } + + @RequiredArgsConstructor + @Getter + public static class Error { + private final String type; + private final String reason; + private final String details; + } + +} diff --git a/protocol/src/main/java/com/amazon/opendistroforelasticsearch/sql/protocol/response/format/JsonResponseFormatter.java b/protocol/src/main/java/com/amazon/opendistroforelasticsearch/sql/protocol/response/format/JsonResponseFormatter.java index 4f3706341b..e901aca811 100644 --- a/protocol/src/main/java/com/amazon/opendistroforelasticsearch/sql/protocol/response/format/JsonResponseFormatter.java +++ b/protocol/src/main/java/com/amazon/opendistroforelasticsearch/sql/protocol/response/format/JsonResponseFormatter.java @@ -71,7 +71,7 @@ public String format(Throwable t) { */ protected abstract Object buildJsonObject(R response); - private String jsonify(Object jsonObject) { + protected String jsonify(Object jsonObject) { return AccessController.doPrivileged((PrivilegedAction) () -> (style == PRETTY) ? PRETTY_PRINT_GSON.toJson(jsonObject) : GSON.toJson(jsonObject)); } diff --git a/protocol/src/test/java/com/amazon/opendistroforelasticsearch/sql/protocol/response/format/JdbcResponseFormatterTest.java b/protocol/src/test/java/com/amazon/opendistroforelasticsearch/sql/protocol/response/format/JdbcResponseFormatterTest.java new file mode 100644 index 0000000000..2705b31d50 --- /dev/null +++ b/protocol/src/test/java/com/amazon/opendistroforelasticsearch/sql/protocol/response/format/JdbcResponseFormatterTest.java @@ -0,0 +1,163 @@ +/* + * Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +package com.amazon.opendistroforelasticsearch.sql.protocol.response.format; + +import static com.amazon.opendistroforelasticsearch.sql.data.model.ExprValueUtils.LITERAL_MISSING; +import static com.amazon.opendistroforelasticsearch.sql.data.model.ExprValueUtils.LITERAL_NULL; +import static com.amazon.opendistroforelasticsearch.sql.data.model.ExprValueUtils.stringValue; +import static com.amazon.opendistroforelasticsearch.sql.data.model.ExprValueUtils.tupleValue; +import static com.amazon.opendistroforelasticsearch.sql.data.type.ExprCoreType.ARRAY; +import static com.amazon.opendistroforelasticsearch.sql.data.type.ExprCoreType.INTEGER; +import static com.amazon.opendistroforelasticsearch.sql.data.type.ExprCoreType.STRING; +import static com.amazon.opendistroforelasticsearch.sql.data.type.ExprCoreType.STRUCT; +import static com.amazon.opendistroforelasticsearch.sql.elasticsearch.data.type.ElasticsearchDataType.ES_TEXT; +import static com.amazon.opendistroforelasticsearch.sql.elasticsearch.data.type.ElasticsearchDataType.ES_TEXT_KEYWORD; +import static com.amazon.opendistroforelasticsearch.sql.executor.ExecutionEngine.Schema; +import static com.amazon.opendistroforelasticsearch.sql.executor.ExecutionEngine.Schema.Column; +import static com.amazon.opendistroforelasticsearch.sql.protocol.response.format.JsonResponseFormatter.Style.COMPACT; +import static org.junit.jupiter.api.Assertions.assertEquals; + +import com.amazon.opendistroforelasticsearch.sql.common.antlr.SyntaxCheckException; +import com.amazon.opendistroforelasticsearch.sql.data.model.ExprTupleValue; +import com.amazon.opendistroforelasticsearch.sql.exception.SemanticCheckException; +import com.amazon.opendistroforelasticsearch.sql.protocol.response.QueryResult; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.gson.JsonParser; +import java.util.Arrays; +import org.junit.jupiter.api.DisplayNameGeneration; +import org.junit.jupiter.api.DisplayNameGenerator; +import org.junit.jupiter.api.Test; + +@DisplayNameGeneration(DisplayNameGenerator.ReplaceUnderscores.class) +class JdbcResponseFormatterTest { + + private final JdbcResponseFormatter formatter = new JdbcResponseFormatter(COMPACT); + + @Test + void format_response() { + QueryResult response = new QueryResult( + new Schema(ImmutableList.of( + new Column("name", "name", STRING), + new Column("address1", "address1", ES_TEXT), + new Column("address2", "address2", ES_TEXT_KEYWORD), + new Column("location", "location", STRUCT), + new Column("employer", "employer", ARRAY), + new Column("age", "age", INTEGER))), + ImmutableList.of( + tupleValue(ImmutableMap.builder() + .put("name", "John") + .put("address1", "Seattle") + .put("address2", "WA") + .put("location", ImmutableMap.of("x", "1", "y", "2")) + .put("employments", ImmutableList.of( + ImmutableMap.of("name", "Amazon"), + ImmutableMap.of("name", "AWS"))) + .put("age", 20) + .build()))); + + assertJsonEquals( + "{" + + "\"schema\":[" + + "{\"name\":\"name\",\"alias\":\"name\",\"type\":\"keyword\"}," + + "{\"name\":\"address1\",\"alias\":\"address1\",\"type\":\"text\"}," + + "{\"name\":\"address2\",\"alias\":\"address2\",\"type\":\"text\"}," + + "{\"name\":\"location\",\"alias\":\"location\",\"type\":\"object\"}," + + "{\"name\":\"employer\",\"alias\":\"employer\",\"type\":\"nested\"}," + + "{\"name\":\"age\",\"alias\":\"age\",\"type\":\"integer\"}" + + "]," + + "\"datarows\":[" + + "[\"John\",\"Seattle\",\"WA\",{\"x\":\"1\",\"y\":\"2\"}," + + "[{\"name\":\"Amazon\"}," + "{\"name\":\"AWS\"}]," + + "20]]," + + "\"total\":1," + + "\"size\":1," + + "\"status\":200}", + formatter.format(response)); + } + + @Test + void format_response_with_missing_and_null_value() { + QueryResult response = + new QueryResult( + new Schema(ImmutableList.of( + new Column("name", null, STRING), + new Column("age", null, INTEGER))), + Arrays.asList( + ExprTupleValue.fromExprValueMap( + ImmutableMap.of("name", stringValue("John"), "age", LITERAL_MISSING)), + ExprTupleValue.fromExprValueMap( + ImmutableMap.of("name", stringValue("Allen"), "age", LITERAL_NULL)), + tupleValue(ImmutableMap.of("name", "Smith", "age", 30)))); + + assertEquals( + "{\"schema\":[{\"name\":\"name\",\"type\":\"keyword\"}," + + "{\"name\":\"age\",\"type\":\"integer\"}]," + + "\"datarows\":[[\"John\",null],[\"Allen\",null]," + + "[\"Smith\",30]],\"total\":3,\"size\":3,\"status\":200}", + formatter.format(response)); + } + + @Test + void format_client_error_response_due_to_syntax_exception() { + assertJsonEquals( + "{\"error\":" + + "{\"" + + "type\":\"SyntaxCheckException\"," + + "\"reason\":\"Invalid query syntax\"," + + "\"details\":\"Invalid query syntax\"" + + "}," + + "\"status\":400}", + formatter.format(new SyntaxCheckException("Invalid query syntax")) + ); + } + + @Test + void format_client_error_response_due_to_semantic_exception() { + assertJsonEquals( + "{\"error\":" + + "{\"" + + "type\":\"SemanticCheckException\"," + + "\"reason\":\"Invalid query semantics\"," + + "\"details\":\"Invalid query semantics\"" + + "}," + + "\"status\":400}", + formatter.format(new SemanticCheckException("Invalid query semantics")) + ); + } + + @Test + void format_server_error_response() { + assertJsonEquals( + "{\"error\":" + + "{\"" + + "type\":\"IllegalStateException\"," + + "\"reason\":\"Execution error\"," + + "\"details\":\"Execution error\"" + + "}," + + "\"status\":503}", + formatter.format(new IllegalStateException("Execution error")) + ); + } + + private static void assertJsonEquals(String expected, String actual) { + assertEquals( + JsonParser.parseString(expected), + JsonParser.parseString(actual)); + } + +} \ No newline at end of file diff --git a/release-notes/opendistro-for-elasticsearch-sql.release-notes-1.12.0.0.md b/release-notes/opendistro-for-elasticsearch-sql.release-notes-1.12.0.0.md new file mode 100644 index 0000000000..d8c9d808f9 --- /dev/null +++ b/release-notes/opendistro-for-elasticsearch-sql.release-notes-1.12.0.0.md @@ -0,0 +1,46 @@ +## 2020-11-20 Version 1.12.0.0 + +### Features +* For ODFE 1.12 change position for sql workbench plugin (remove DEFAULT_APP_CATEGORIES) ([#857](https://github.com/opendistro-for-elasticsearch/sql/pull/857)) +* For ODFE 1.12 change position for sql workbench plugin ([#855](https://github.com/opendistro-for-elasticsearch/sql/pull/855)) +* add support for HH:mm:ss ([#850](https://github.com/opendistro-for-elasticsearch/sql/pull/850)) +* Support NULLS FIRST/LAST in new engine ([#843](https://github.com/opendistro-for-elasticsearch/sql/pull/843)) +* Support subquery in FROM clause in new engine ([#822](https://github.com/opendistro-for-elasticsearch/sql/pull/822)) +* Support CASE clause in new engine ([#818](https://github.com/opendistro-for-elasticsearch/sql/pull/818)) +* Support COUNT star and literal in new engine ([#802](https://github.com/opendistro-for-elasticsearch/sql/pull/802)) +* Adding example of nested() for more complex nested queries ([#801](https://github.com/opendistro-for-elasticsearch/sql/pull/801)) +* Adding example of nested() for more complex nested queries ([#799](https://github.com/opendistro-for-elasticsearch/sql/pull/799)) +* Support HAVING in new SQL engine ([#798](https://github.com/opendistro-for-elasticsearch/sql/pull/798)) +* Add ppl request log ([#796](https://github.com/opendistro-for-elasticsearch/sql/pull/796)) + +### Enhancements +* Sort field push down ([#848](https://github.com/opendistro-for-elasticsearch/sql/pull/848)) +* Seperate the logical plan optimization rule from core to storage engine ([#836](https://github.com/opendistro-for-elasticsearch/sql/pull/836)) + +### Bug Fixes +* Fix ExprCollectionValue serialization bug ([#859](https://github.com/opendistro-for-elasticsearch/sql/pull/859)) +* Fix issue: sort order keyword is case sensitive ([#853](https://github.com/opendistro-for-elasticsearch/sql/pull/853)) +* Config the default locale for gradle as en_US ([#847](https://github.com/opendistro-for-elasticsearch/sql/pull/847)) +* Fix bug of nested field format issue in JDBC response ([#846](https://github.com/opendistro-for-elasticsearch/sql/pull/846)) +* Fix symbol error and Fix SSLError when connect es. ([#831](https://github.com/opendistro-for-elasticsearch/sql/pull/831)) +* Bug fix, using Local.Root when format the string in DateTimeFunctionIT ([#794](https://github.com/opendistro-for-elasticsearch/sql/pull/794)) + +### Infrastructure +* add codecov for sql plugin ([#835](https://github.com/opendistro-for-elasticsearch/sql/pull/835)) +* update odbc workflow ([#828](https://github.com/opendistro-for-elasticsearch/sql/pull/828)) +* Updated workbench snapshots to fix broken workflow ([#823](https://github.com/opendistro-for-elasticsearch/sql/pull/823)) +* Updated Mac version for GitHub action build ([#804](https://github.com/opendistro-for-elasticsearch/sql/pull/804)) +* Fix unstable integration tests ([#793](https://github.com/opendistro-for-elasticsearch/sql/pull/793)) +* Update cypress tests and increase delay time ([#792](https://github.com/opendistro-for-elasticsearch/sql/pull/792)) + +### Documentation +* Add doc for ODFE SQL demo ([#826](https://github.com/opendistro-for-elasticsearch/sql/pull/826)) +* Update out-of-date documentation ([#820](https://github.com/opendistro-for-elasticsearch/sql/pull/820)) +* Add release notes for ODFE 1.12 ([#841](https://github.com/opendistro-for-elasticsearch/sql/pull/841)) + +### Maintenance +* SQL release for Elasticsearch 7.10 ([#834](https://github.com/opendistro-for-elasticsearch/sql/pull/834)) +* Migrate Query Workbench to new Platform ([#812](https://github.com/opendistro-for-elasticsearch/sql/pull/812)) +* Migrate Query Workbench to 7.10 ([#840](https://github.com/opendistro-for-elasticsearch/sql/pull/840)) +* Bump version number to 1.12.0.0 for [JDBC, ODBC, SQL-CLI] ([#838](https://github.com/opendistro-for-elasticsearch/sql/pull/838)) +