diff --git a/.github/workflows/backend.yml b/.github/workflows/backend.yml index 6afc981bed0..67b2511f107 100644 --- a/.github/workflows/backend.yml +++ b/.github/workflows/backend.yml @@ -50,7 +50,7 @@ jobs: dead-link: name: Dead links runs-on: ubuntu-latest - timeout-minutes: 120 + timeout-minutes: 150 steps: - uses: actions/checkout@v2 - run: sudo npm install -g markdown-link-check@3.8.7 @@ -125,6 +125,12 @@ jobs: echo "deleted-poms=$true_or_false" >> $GITHUB_OUTPUT echo "deleted-poms_files=$file_list" >> $GITHUB_OUTPUT + doc_files=`python tools/update_modules_check/check_file_updates.py ua $workspace apache/dev origin/$current_branch "docs/**"` + true_or_false=${doc_files%%$'\n'*} + file_list=${doc_files#*$'\n'} + echo "docs=$true_or_false" >> $GITHUB_OUTPUT + echo "docs_files=$file_list" >> $GITHUB_OUTPUT + engine_e2e_files=`python tools/update_modules_check/check_file_updates.py ua $workspace apache/dev origin/$current_branch "seatunnel-e2e/seatunnel-engine-e2e/**"` true_or_false=${engine_e2e_files%%$'\n'*} file_list=${engine_e2e_files#*$'\n'} @@ -268,6 +274,65 @@ jobs: - name: Check Dependencies Licenses run: tools/dependencies/checkLicense.sh + document: + if: needs.changes.outputs.api == 'true' || needs.changes.outputs.docs == 'true' + needs: [ changes, sanity-check ] + name: Build website + runs-on: ubuntu-latest + timeout-minutes: 60 + steps: + - name: Checkout PR + uses: actions/checkout@v3 + with: + path: seatunnel-pr + - name: Checkout website repo + uses: actions/checkout@v3 + with: + repository: apache/seatunnel-website + path: seatunnel-website + - name: Sync PR changes to website + run: | + bash seatunnel-pr/tools/documents/sync.sh seatunnel-pr seatunnel-website + - uses: actions/setup-node@v2 + with: + node-version: 16.19.0 + - name: Run docusaurus build + run: | + cd seatunnel-website + npm set strict-ssl false + npm install + npm run build + + seatunnel-ui: + if: needs.changes.outputs.api == 'true' + needs: [ changes, sanity-check ] + name: Build SeaTunnel UI + runs-on: ubuntu-latest + timeout-minutes: 60 + steps: + - name: Checkout PR + uses: actions/checkout@v3 + + - uses: actions/setup-node@v2 + with: + node-version: 20.x + + - name: Install Dependencies and Check Code Style + run: | + cd seatunnel-engine/seatunnel-engine-ui/ + npm install + npm run lint + + - name: Run unit tests + run: | + cd seatunnel-engine/seatunnel-engine-ui/ + npm run test:unit + + - name: Build SeaTunnel UI + run: | + cd seatunnel-engine/seatunnel-engine-ui/ + npm run build + unit-test: needs: [ changes, sanity-check ] if: needs.changes.outputs.api == 'true' || (needs.changes.outputs.api == 'false' && needs.changes.outputs.ut-modules != '') @@ -299,7 +364,7 @@ jobs: matrix: java: [ '8', '11' ] os: [ 'ubuntu-latest' ] - timeout-minutes: 120 + timeout-minutes: 180 steps: - uses: actions/checkout@v2 - name: Set up JDK ${{ matrix.java }} @@ -330,7 +395,7 @@ jobs: matrix: java: [ '8', '11' ] os: [ 'ubuntu-latest' ] - timeout-minutes: 120 + timeout-minutes: 150 steps: - uses: actions/checkout@v2 - name: Set up JDK ${{ matrix.java }} @@ -351,7 +416,7 @@ jobs: echo "sub modules is empty, skipping" fi env: - MAVEN_OPTS: -Xmx2048m + MAVEN_OPTS: -Xmx4096m updated-modules-integration-test-part-3: needs: [ changes, sanity-check ] @@ -392,7 +457,7 @@ jobs: matrix: java: [ '8', '11' ] os: [ 'ubuntu-latest' ] - timeout-minutes: 120 + timeout-minutes: 150 steps: - uses: actions/checkout@v2 - name: Set up JDK ${{ matrix.java }} @@ -413,7 +478,7 @@ jobs: echo "sub modules is empty, skipping" fi env: - MAVEN_OPTS: -Xmx2048m + MAVEN_OPTS: -Xmx4096m updated-modules-integration-test-part-5: needs: [ changes, sanity-check ] if: needs.changes.outputs.api == 'false' && needs.changes.outputs.it-modules != '' @@ -683,7 +748,7 @@ jobs: matrix: java: [ '8', '11' ] os: [ 'ubuntu-latest' ] - timeout-minutes: 150 + timeout-minutes: 180 steps: - uses: actions/checkout@v2 - name: Set up JDK ${{ matrix.java }} diff --git a/.github/workflows/documents.yml b/.github/workflows/documents.yml deleted file mode 100644 index 61d064f0109..00000000000 --- a/.github/workflows/documents.yml +++ /dev/null @@ -1,66 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the 'License'); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an 'AS IS' BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -name: Documents - -on: - pull_request: - paths: - - 'docs/**' - -jobs: - build: - name: Build website - runs-on: ubuntu-latest - timeout-minutes: 60 - steps: - - name: Checkout PR - uses: actions/checkout@v3 - with: - path: seatunnel-pr - - - name: Checkout website repo - uses: actions/checkout@v3 - with: - repository: apache/seatunnel-website - path: seatunnel-website - - - name: Sync PR changes to website - run: | - bash seatunnel-pr/tools/documents/sync.sh seatunnel-pr seatunnel-website - - - uses: actions/setup-node@v2 - with: - node-version: 16.19.0 - - - name: Run docusaurus build - run: | - cd seatunnel-website - npm set strict-ssl false - npm install - npm run build - - code-style: - name: Code style - runs-on: ubuntu-latest - timeout-minutes: 10 - steps: - - uses: actions/checkout@v3 - with: - submodules: true - - name: Check code style - run: ./mvnw --batch-mode --quiet --no-snapshot-updates clean spotless:check diff --git a/.github/workflows/labeler/label-scope-conf.yml b/.github/workflows/labeler/label-scope-conf.yml index b417d53e72a..b825fb9e58b 100644 --- a/.github/workflows/labeler/label-scope-conf.yml +++ b/.github/workflows/labeler/label-scope-conf.yml @@ -132,6 +132,11 @@ http: - changed-files: - any-glob-to-any-file: seatunnel-connectors-v2/connector-http/** - all-globs-to-all-files: '!seatunnel-connectors-v2/connector-!(http)/**' +prometheus: + - all: + - changed-files: + - any-glob-to-any-file: seatunnel-connectors-v2/connector-prometheus/** + - all-globs-to-all-files: '!seatunnel-connectors-v2/connector-!(prometheus)/**' hudi: - all: - changed-files: diff --git a/.gitignore b/.gitignore index c8732a4acfd..204a966b8a1 100644 --- a/.gitignore +++ b/.gitignore @@ -55,3 +55,8 @@ seatunnel-examples /lib/* version.properties +node/ + +dist/ + +seatunnel-engine/seatunnel-engine-server/**/ui/* \ No newline at end of file diff --git a/README.md b/README.md index 2f15fd2209e..1404587b0b0 100644 --- a/README.md +++ b/README.md @@ -125,7 +125,8 @@ Companies and organizations worldwide use SeaTunnel for research, production, an ### 1. How do I install SeaTunnel? -Follow the [Installation Guide](https://seatunnel.apache.org/docs/2.3.3/start-v2/locally/deployment/) on our website to get started. +Follow the [Installation Guide](https://seatunnel.apache.org/docs/start-v2/locally/deployment/) on our website to get +started. ### 2. How can I contribute to SeaTunnel? diff --git a/bin/install-plugin.cmd b/bin/install-plugin.cmd index be82e001bd8..2fe2a340f9a 100644 --- a/bin/install-plugin.cmd +++ b/bin/install-plugin.cmd @@ -22,8 +22,8 @@ REM Get seatunnel home set "SEATUNNEL_HOME=%~dp0..\" echo Set SEATUNNEL_HOME to [%SEATUNNEL_HOME%] -REM Connector default version is 2.3.8, you can also choose a custom version. eg: 2.3.8: install-plugin.bat 2.3.8 -set "version=2.3.8" +REM Connector default version is 2.3.9, you can also choose a custom version. eg: 2.3.9: install-plugin.bat 2.3.9 +set "version=2.3.9" if not "%~1"=="" set "version=%~1" REM Create the lib directory diff --git a/bin/install-plugin.sh b/bin/install-plugin.sh index 1938caf30c3..51afda5ad8a 100755 --- a/bin/install-plugin.sh +++ b/bin/install-plugin.sh @@ -23,8 +23,8 @@ # get seatunnel home SEATUNNEL_HOME=$(cd $(dirname $0);cd ../;pwd) -# connector default version is 2.3.8, you can also choose a custom version. eg: 2.3.8: sh install-plugin.sh 2.3.8 -version=2.3.8 +# connector default version is 2.3.9, you can also choose a custom version. eg: 2.3.9: sh install-plugin.sh 2.3.9 +version=2.3.9 if [ -n "$1" ]; then version="$1" diff --git a/config/plugin_config b/config/plugin_config index 317b41480e1..317256863f4 100644 --- a/config/plugin_config +++ b/config/plugin_config @@ -88,6 +88,7 @@ connector-tdengine connector-web3j connector-milvus connector-activemq +connector-prometheus connector-sls connector-qdrant connector-typesense diff --git a/config/seatunnel.yaml b/config/seatunnel.yaml index c3832394233..79a713a71e0 100644 --- a/config/seatunnel.yaml +++ b/config/seatunnel.yaml @@ -17,6 +17,7 @@ seatunnel: engine: + classloader-cache-mode: true history-job-expire-minutes: 1440 backup-count: 1 queue-type: blockingqueue @@ -37,6 +38,9 @@ seatunnel: telemetry: metric: enabled: false + log: + scheduled-deletion-enable: true http: enable-http: true port: 8080 + enable-dynamic-port: false diff --git a/docs/en/concept/schema-evolution.md b/docs/en/concept/schema-evolution.md index 067bfc7b1c9..0d699f1e94a 100644 --- a/docs/en/concept/schema-evolution.md +++ b/docs/en/concept/schema-evolution.md @@ -1,19 +1,25 @@ # Schema evolution Schema Evolution means that the schema of a data table can be changed and the data synchronization task can automatically adapt to the changes of the new table structure without any other operations. -Now we only support the operation about `add column`、`drop column`、`rename column` and `modify column` of the table in CDC source. This feature is only support zeta engine at now. +Now we only support the operation about `add column`、`drop column`、`rename column` and `modify column` of the table in CDC source. This feature is only support zeta engine at now. + ## Supported connectors ### Source [Mysql-CDC](https://github.com/apache/seatunnel/blob/dev/docs/en/connector-v2/source/MySQL-CDC.md) +[Oracle-CDC](https://github.com/apache/seatunnel/blob/dev/docs/en/connector-v2/source/Oracle-CDC.md) ### Sink [Jdbc-Mysql](https://github.com/apache/seatunnel/blob/dev/docs/en/connector-v2/sink/Jdbc.md) +[Jdbc-Oracle](https://github.com/apache/seatunnel/blob/dev/docs/en/connector-v2/sink/Jdbc.md) + +Note: The schema evolution is not support the transform at now. The schema evolution of different types of databases(Oracle-CDC -> Jdbc-Mysql)is currently not supported the default value of the column in ddl. -Note: The schema evolution is not support the transform at now. +When you use the Oracle-CDC,you can not use the username named `SYS` or `SYSTEM` to modify the table schema, otherwise the ddl event will be filtered out which can lead to the schema evolution not working. +Otherwise, If your table name start with `ORA_TEMP_` will also has the same problem. ## Enable schema evolution -Schema evolution is disabled by default in CDC source. You need configure `debezium.include.schema.changes = true` which is only supported in MySQL-CDC to enable it. +Schema evolution is disabled by default in CDC source. You need configure `debezium.include.schema.changes = true` which is only supported in CDC to enable it. When you use Oracle-CDC with schema-evolution enabled, you must specify `redo_log_catalog` as `log.mining.strategy` in the `debezium` attribute. ## Examples @@ -56,3 +62,92 @@ sink { } } ``` + +### Oracle-cdc -> Jdbc-Oracle +``` +env { + # You can set engine configuration here + parallelism = 1 + job.mode = "STREAMING" + checkpoint.interval = 5000 +} + +source { + # This is a example source plugin **only for test and demonstrate the feature source plugin** + Oracle-CDC { + result_table_name = "customers" + username = "dbzuser" + password = "dbz" + database-names = ["ORCLCDB"] + schema-names = ["DEBEZIUM"] + table-names = ["ORCLCDB.DEBEZIUM.FULL_TYPES"] + base-url = "jdbc:oracle:thin:@oracle-host:1521/ORCLCDB" + source.reader.close.timeout = 120000 + connection.pool.size = 1 + debezium { + include.schema.changes = true + log.mining.strategy = redo_log_catalog + } + } +} + +sink { + Jdbc { + source_table_name = "customers" + driver = "oracle.jdbc.driver.OracleDriver" + url = "jdbc:oracle:thin:@oracle-host:1521/ORCLCDB" + user = "dbzuser" + password = "dbz" + generate_sink_sql = true + database = "ORCLCDB" + table = "DEBEZIUM.FULL_TYPES_SINK" + batch_size = 1 + primary_keys = ["ID"] + connection.pool.size = 1 + } +} +``` + +### Oracle-cdc -> Jdbc-Mysql +``` +env { + # You can set engine configuration here + parallelism = 1 + job.mode = "STREAMING" + checkpoint.interval = 5000 +} + +source { + # This is a example source plugin **only for test and demonstrate the feature source plugin** + Oracle-CDC { + result_table_name = "customers" + username = "dbzuser" + password = "dbz" + database-names = ["ORCLCDB"] + schema-names = ["DEBEZIUM"] + table-names = ["ORCLCDB.DEBEZIUM.FULL_TYPES"] + base-url = "jdbc:oracle:thin:@oracle-host:1521/ORCLCDB" + source.reader.close.timeout = 120000 + connection.pool.size = 1 + debezium { + include.schema.changes = true + log.mining.strategy = redo_log_catalog + } + } +} + +sink { + jdbc { + source_table_name = "customers" + url = "jdbc:mysql://oracle-host:3306/oracle_sink" + driver = "com.mysql.cj.jdbc.Driver" + user = "st_user_sink" + password = "mysqlpw" + generate_sink_sql = true + # You need to configure both database and table + database = oracle_sink + table = oracle_cdc_2_mysql_sink_table + primary_keys = ["ID"] + } +} +``` diff --git a/docs/en/connector-v2/formats/protobuf.md b/docs/en/connector-v2/formats/protobuf.md new file mode 100644 index 00000000000..8433528978d --- /dev/null +++ b/docs/en/connector-v2/formats/protobuf.md @@ -0,0 +1,163 @@ +# Protobuf Format + +Protobuf (Protocol Buffers) is a language-neutral, platform-independent data serialization format developed by Google. It provides an efficient way to encode structured data and supports multiple programming languages and platforms. + +Currently, Protobuf format can be used with Kafka. + +## Kafka Usage Example + +- Example of simulating a randomly generated data source and writing it to Kafka in Protobuf format + +```hocon +env { + parallelism = 1 + job.mode = "BATCH" +} + +source { + FakeSource { + parallelism = 1 + result_table_name = "fake" + row.num = 16 + schema = { + fields { + c_int32 = int + c_int64 = long + c_float = float + c_double = double + c_bool = boolean + c_string = string + c_bytes = bytes + + Address { + city = string + state = string + street = string + } + attributes = "map" + phone_numbers = "array" + } + } + } +} + +sink { + kafka { + topic = "test_protobuf_topic_fake_source" + bootstrap.servers = "kafkaCluster:9092" + format = protobuf + kafka.request.timeout.ms = 60000 + kafka.config = { + acks = "all" + request.timeout.ms = 60000 + buffer.memory = 33554432 + } + protobuf_message_name = Person + protobuf_schema = """ + syntax = "proto3"; + + package org.apache.seatunnel.format.protobuf; + + option java_outer_classname = "ProtobufE2E"; + + message Person { + int32 c_int32 = 1; + int64 c_int64 = 2; + float c_float = 3; + double c_double = 4; + bool c_bool = 5; + string c_string = 6; + bytes c_bytes = 7; + + message Address { + string street = 1; + string city = 2; + string state = 3; + string zip = 4; + } + + Address address = 8; + + map attributes = 9; + + repeated string phone_numbers = 10; + } + """ + } +} +``` + +- Example of reading data from Kafka in Protobuf format and printing it to the console + +```hocon +env { + parallelism = 1 + job.mode = "BATCH" +} + +source { + Kafka { + topic = "test_protobuf_topic_fake_source" + format = protobuf + protobuf_message_name = Person + protobuf_schema = """ + syntax = "proto3"; + + package org.apache.seatunnel.format.protobuf; + + option java_outer_classname = "ProtobufE2E"; + + message Person { + int32 c_int32 = 1; + int64 c_int64 = 2; + float c_float = 3; + double c_double = 4; + bool c_bool = 5; + string c_string = 6; + bytes c_bytes = 7; + + message Address { + string street = 1; + string city = 2; + string state = 3; + string zip = 4; + } + + Address address = 8; + + map attributes = 9; + + repeated string phone_numbers = 10; + } + """ + schema = { + fields { + c_int32 = int + c_int64 = long + c_float = float + c_double = double + c_bool = boolean + c_string = string + c_bytes = bytes + + Address { + city = string + state = string + street = string + } + attributes = "map" + phone_numbers = "array" + } + } + bootstrap.servers = "kafkaCluster:9092" + start_mode = "earliest" + result_table_name = "kafka_table" + } +} + +sink { + Console { + source_table_name = "kafka_table" + } +} +``` \ No newline at end of file diff --git a/docs/en/connector-v2/sink/Hudi.md b/docs/en/connector-v2/sink/Hudi.md index 6c424fde15e..ea4c066d2f8 100644 --- a/docs/en/connector-v2/sink/Hudi.md +++ b/docs/en/connector-v2/sink/Hudi.md @@ -8,7 +8,7 @@ Used to write data to Hudi. ## Key features -- [x] [exactly-once](../../concept/connector-v2-features.md) +- [ ] [exactly-once](../../concept/connector-v2-features.md) - [x] [cdc](../../concept/connector-v2-features.md) - [x] [support multiple table write](../../concept/connector-v2-features.md) @@ -21,7 +21,6 @@ Base configuration: | table_dfs_path | string | yes | - | | conf_files_path | string | no | - | | table_list | Array | no | - | -| auto_commit | boolean | no | true | | schema_save_mode | enum | no | CREATE_SCHEMA_WHEN_NOT_EXIST| | common-options | Config | no | - | @@ -44,6 +43,7 @@ Table list configuration: | index_type | enum | no | BLOOM | | index_class_name | string | no | - | | record_byte_size | Int | no | 1024 | +| cdc_enabled | boolean| no | false | Note: When this configuration corresponds to a single table, you can flatten the configuration items in table_list to the outer layer. @@ -115,9 +115,9 @@ Note: When this configuration corresponds to a single table, you can flatten the `max_commits_to_keep` The max commits to keep of hudi table. -### auto_commit [boolean] +### cdc_enabled [boolean] -`auto_commit` Automatic transaction commit is enabled by default. +`cdc_enabled` Whether to persist the CDC change log. When enable, persist the change data if necessary, and the table can be queried as a CDC query mode. ### schema_save_mode [Enum] diff --git a/docs/en/connector-v2/sink/Jdbc.md b/docs/en/connector-v2/sink/Jdbc.md index 1ddbdd507d9..9b86a27721d 100644 --- a/docs/en/connector-v2/sink/Jdbc.md +++ b/docs/en/connector-v2/sink/Jdbc.md @@ -226,7 +226,7 @@ In the case of is_exactly_once = "true", Xa transactions are used. This requires there are some reference value for params above. -| datasource | driver | url | xa_data_source_class_name | maven | +| datasource | driver | url | xa_data_source_class_name | maven | |-------------------|----------------------------------------------|--------------------------------------------------------------------|----------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------| | MySQL | com.mysql.cj.jdbc.Driver | jdbc:mysql://localhost:3306/test | com.mysql.cj.jdbc.MysqlXADataSource | https://mvnrepository.com/artifact/mysql/mysql-connector-java | | PostgreSQL | org.postgresql.Driver | jdbc:postgresql://localhost:5432/postgres | org.postgresql.xa.PGXADataSource | https://mvnrepository.com/artifact/org.postgresql/postgresql | @@ -235,7 +235,7 @@ there are some reference value for params above. | SQL Server | com.microsoft.sqlserver.jdbc.SQLServerDriver | jdbc:sqlserver://localhost:1433 | com.microsoft.sqlserver.jdbc.SQLServerXADataSource | https://mvnrepository.com/artifact/com.microsoft.sqlserver/mssql-jdbc | | Oracle | oracle.jdbc.OracleDriver | jdbc:oracle:thin:@localhost:1521/xepdb1 | oracle.jdbc.xa.OracleXADataSource | https://mvnrepository.com/artifact/com.oracle.database.jdbc/ojdbc8 | | sqlite | org.sqlite.JDBC | jdbc:sqlite:test.db | / | https://mvnrepository.com/artifact/org.xerial/sqlite-jdbc | -| GBase8a | com.gbase.jdbc.Driver | jdbc:gbase://e2e_gbase8aDb:5258/test | / | https://cdn.gbase.cn/products/30/p5CiVwXBKQYIUGN8ecHvk/gbase-connector-java-9.5.0.7-build1-bin.jar | +| GBase8a | com.gbase.jdbc.Driver | jdbc:gbase://e2e_gbase8aDb:5258/test | / | https://cdn.gbase.cn/products/30/p5CiVwXBKQYIUGN8ecHvk/gbase-connector-java-9.5.0.7-build1-bin.jar | | StarRocks | com.mysql.cj.jdbc.Driver | jdbc:mysql://localhost:3306/test | / | https://mvnrepository.com/artifact/mysql/mysql-connector-java | | db2 | com.ibm.db2.jcc.DB2Driver | jdbc:db2://localhost:50000/testdb | com.ibm.db2.jcc.DB2XADataSource | https://mvnrepository.com/artifact/com.ibm.db2.jcc/db2jcc/db2jcc4 | | saphana | com.sap.db.jdbc.Driver | jdbc:sap://localhost:39015 | / | https://mvnrepository.com/artifact/com.sap.cloud.db.jdbc/ngdbc | @@ -245,9 +245,10 @@ there are some reference value for params above. | Snowflake | net.snowflake.client.jdbc.SnowflakeDriver | jdbc:snowflake://.snowflakecomputing.com | / | https://mvnrepository.com/artifact/net.snowflake/snowflake-jdbc | | Vertica | com.vertica.jdbc.Driver | jdbc:vertica://localhost:5433 | / | https://repo1.maven.org/maven2/com/vertica/jdbc/vertica-jdbc/12.0.3-0/vertica-jdbc-12.0.3-0.jar | | Kingbase | com.kingbase8.Driver | jdbc:kingbase8://localhost:54321/db_test | / | https://repo1.maven.org/maven2/cn/com/kingbase/kingbase8/8.6.0/kingbase8-8.6.0.jar | -| OceanBase | com.oceanbase.jdbc.Driver | jdbc:oceanbase://localhost:2881 | / | https://repo1.maven.org/maven2/com/oceanbase/oceanbase-client/2.4.11/oceanbase-client-2.4.11.jar | +| OceanBase | com.oceanbase.jdbc.Driver | jdbc:oceanbase://localhost:2881 | / | https://repo1.maven.org/maven2/com/oceanbase/oceanbase-client/2.4.12/oceanbase-client-2.4.12.jar | | xugu | com.xugu.cloudjdbc.Driver | jdbc:xugu://localhost:5138 | / | https://repo1.maven.org/maven2/com/xugudb/xugu-jdbc/12.2.0/xugu-jdbc-12.2.0.jar | | InterSystems IRIS | com.intersystems.jdbc.IRISDriver | jdbc:IRIS://localhost:1972/%SYS | / | https://raw.githubusercontent.com/intersystems-community/iris-driver-distribution/main/JDBC/JDK18/intersystems-jdbc-3.8.4.jar | +| opengauss | org.opengauss.Driver | jdbc:opengauss://localhost:5432/postgres | / | https://repo1.maven.org/maven2/org/opengauss/opengauss-jdbc/5.1.0-og/opengauss-jdbc-5.1.0-og.jar | ## Example diff --git a/docs/en/connector-v2/sink/Mivlus.md b/docs/en/connector-v2/sink/Milvus.md similarity index 79% rename from docs/en/connector-v2/sink/Mivlus.md rename to docs/en/connector-v2/sink/Milvus.md index 081f427a5df..6b6598fae30 100644 --- a/docs/en/connector-v2/sink/Mivlus.md +++ b/docs/en/connector-v2/sink/Milvus.md @@ -4,8 +4,11 @@ ## Description -Write data to Milvus or Zilliz Cloud - +This Milvus sink connector write data to Milvus or Zilliz Cloud, it has the following features: +- support read and write data by partition +- support write dynamic schema data from Metadata Column +- json data will be converted to json string and sink as json as well +- retry automatically to bypass ratelimit and grpc limit ## Key Features - [x] [batch](../../concept/connector-v2-features.md) @@ -34,7 +37,7 @@ Write data to Milvus or Zilliz Cloud ## Sink Options -| Name | Type | Required | Default | Description | +| Name | Type | Required | Default | Description | |----------------------|---------|----------|------------------------------|-----------------------------------------------------------| | url | String | Yes | - | The URL to connect to Milvus or Zilliz Cloud. | | token | String | Yes | - | User:password | @@ -44,6 +47,7 @@ Write data to Milvus or Zilliz Cloud | enable_upsert | boolean | No | false | Upsert data not insert. | | enable_dynamic_field | boolean | No | true | Enable create table with dynamic field. | | batch_size | int | No | 1000 | Write batch size. | +| partition_key | String | No | | Milvus partition key field | ## Task Example diff --git a/docs/en/connector-v2/sink/Paimon.md b/docs/en/connector-v2/sink/Paimon.md index 8133b6e8360..c9e4b3a9b61 100644 --- a/docs/en/connector-v2/sink/Paimon.md +++ b/docs/en/connector-v2/sink/Paimon.md @@ -31,7 +31,7 @@ libfb303-xxx.jar ## Options -| name | type | required | default value | Description | +| name | type | required | default value | Description | |-----------------------------|--------|----------|------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------| | warehouse | String | Yes | - | Paimon warehouse path | | catalog_type | String | No | filesystem | Catalog type of Paimon, support filesystem and hive | @@ -43,7 +43,7 @@ libfb303-xxx.jar | data_save_mode | Enum | No | APPEND_DATA | The data save mode | | paimon.table.primary-keys | String | No | - | Default comma-separated list of columns (primary key) that identify a row in tables.(Notice: The partition field needs to be included in the primary key fields) | | paimon.table.partition-keys | String | No | - | Default comma-separated list of partition fields to use when creating tables. | -| paimon.table.write-props | Map | No | - | Properties passed through to paimon table initialization, [reference](https://paimon.apache.org/docs/master/maintenance/configurations/#coreoptions). | +| paimon.table.write-props | Map | No | - | Properties passed through to paimon table initialization, [reference](https://paimon.apache.org/docs/master/maintenance/configurations/#coreoptions). | | paimon.hadoop.conf | Map | No | - | Properties in hadoop conf | | paimon.hadoop.conf-path | String | No | - | The specified loading path for the 'core-site.xml', 'hdfs-site.xml', 'hive-site.xml' files | @@ -52,9 +52,14 @@ You must configure the `changelog-producer=input` option to enable the changelog The changelog producer mode of the paimon table has [four mode](https://paimon.apache.org/docs/master/primary-key-table/changelog-producer/) which is `none`、`input`、`lookup` and `full-compaction`. -Currently, we only support the `none` and `input` mode. The default is `none` which will not output the changelog file. The `input` mode will output the changelog file in paimon table. +All `changelog-producer` modes are currently supported. The default is `none`. -When you use a streaming mode to read paimon table, these two mode will produce [different results](https://github.com/apache/seatunnel/blob/dev/docs/en/connector-v2/source/Paimon.md#changelog). +* [`none`](https://paimon.apache.org/docs/master/primary-key-table/changelog-producer/#none) +* [`input`](https://paimon.apache.org/docs/master/primary-key-table/changelog-producer/#input) +* [`lookup`](https://paimon.apache.org/docs/master/primary-key-table/changelog-producer/#lookup) +* [`full-compaction`](https://paimon.apache.org/docs/master/primary-key-table/changelog-producer/#full-compaction) +> note: +> When you use a streaming mode to read paimon table,different mode will produce [different results](https://github.com/apache/seatunnel/blob/dev/docs/en/connector-v2/source/Paimon.md#changelog)。 ## Examples @@ -250,6 +255,38 @@ sink { } ``` +#### Write with the `changelog-producer` attribute + +```hocon +env { + parallelism = 1 + job.mode = "STREAMING" + checkpoint.interval = 5000 +} + +source { + Mysql-CDC { + base-url = "jdbc:mysql://127.0.0.1:3306/seatunnel" + username = "root" + password = "******" + table-names = ["seatunnel.role"] + } +} + +sink { + Paimon { + catalog_name = "seatunnel_test" + warehouse = "file:///tmp/seatunnel/paimon/hadoop-sink/" + database = "seatunnel" + table = "role" + paimon.table.write-props = { + changelog-producer = full-compaction + changelog-tmp-path = /tmp/paimon/changelog + } + } +} +``` + ### Write to dynamic bucket table Single dynamic bucket table with write props of paimon,operates on the primary key table and bucket is -1. diff --git a/docs/en/connector-v2/sink/Prometheus.md b/docs/en/connector-v2/sink/Prometheus.md new file mode 100644 index 00000000000..8ba31ca2e2d --- /dev/null +++ b/docs/en/connector-v2/sink/Prometheus.md @@ -0,0 +1,103 @@ +# Prometheus + +> Prometheus sink connector + +## Support Those Engines + +> Spark
+> Flink
+> SeaTunnel Zeta
+ +## Key Features + +- [ ] [exactly-once](../../concept/connector-v2-features.md) +- [ ] [cdc](../../concept/connector-v2-features.md) +- [x] [support multiple table write](../../concept/connector-v2-features.md) + +## Description + +Used to launch web hooks using data. + +> For example, if the data from upstream is [`label: {"__name__": "test1"}, value: 1.2.3,time:2024-08-15T17:00:00`], the body content is the following: `{"label":{"__name__": "test1"}, "value":"1.23","time":"2024-08-15T17:00:00"}` + +**Tips: Prometheus sink only support `post json` webhook and the data from source will be treated as body content in web hook.And does not support passing past data** + +## Supported DataSource Info + +In order to use the Http connector, the following dependencies are required. +They can be downloaded via install-plugin.sh or from the Maven central repository. + +| Datasource | Supported Versions | Dependency | +|------------|--------------------|------------------------------------------------------------------------------------------------------------------| +| Http | universal | [Download](https://mvnrepository.com/artifact/org.apache.seatunnel/seatunnel-connectors-v2/connector-prometheus) | + +## Sink Options + +| Name | Type | Required | Default | Description | +|-----------------------------|--------|----------|---------|-------------------------------------------------------------------------------------------------------------| +| url | String | Yes | - | Http request url | +| headers | Map | No | - | Http headers | +| retry | Int | No | - | The max retry times if request http return to `IOException` | +| retry_backoff_multiplier_ms | Int | No | 100 | The retry-backoff times(millis) multiplier if request http failed | +| retry_backoff_max_ms | Int | No | 10000 | The maximum retry-backoff times(millis) if request http failed | +| connect_timeout_ms | Int | No | 12000 | Connection timeout setting, default 12s. | +| socket_timeout_ms | Int | No | 60000 | Socket timeout setting, default 60s. | +| key_timestamp | Int | NO | - | prometheus timestamp key . | +| key_label | String | yes | - | prometheus label key | +| key_value | Double | yes | - | prometheus value | +| batch_size | Int | false | 1024 | prometheus batch size write | +| flush_interval | Long | false | 300000L | prometheus flush commit interval | +| common-options | | No | - | Sink plugin common parameters, please refer to [Sink Common Options](../sink-common-options.md) for details | + +## Example + +simple: + +```hocon +env { + parallelism = 1 + job.mode = "BATCH" +} + +source { + FakeSource { + schema = { + fields { + c_map = "map" + c_double = double + c_timestamp = timestamp + } + } + result_table_name = "fake" + rows = [ + { + kind = INSERT + fields = [{"__name__": "test1"}, 1.23, "2024-08-15T17:00:00"] + }, + { + kind = INSERT + fields = [{"__name__": "test2"}, 1.23, "2024-08-15T17:00:00"] + } + ] + } +} + + +sink { + Prometheus { + url = "http://prometheus:9090/api/v1/write" + key_label = "c_map" + key_value = "c_double" + key_timestamp = "c_timestamp" + batch_size = 1 + } +} + +``` + +## Changelog + +### 2.3.8-beta 2024-08-22 + +- Add Http Sink Connector + diff --git a/docs/en/connector-v2/sink/Redis.md b/docs/en/connector-v2/sink/Redis.md index b5f444bb117..5b37720891b 100644 --- a/docs/en/connector-v2/sink/Redis.md +++ b/docs/en/connector-v2/sink/Redis.md @@ -12,21 +12,25 @@ Used to write data to Redis. ## Options -| name | type | required | default value | -|----------------|--------|-----------------------|---------------| -| host | string | yes | - | -| port | int | yes | - | -| key | string | yes | - | -| data_type | string | yes | - | -| batch_size | int | no | 10 | -| user | string | no | - | -| auth | string | no | - | -| db_num | int | no | 0 | -| mode | string | no | single | -| nodes | list | yes when mode=cluster | - | -| format | string | no | json | -| expire | long | no | -1 | -| common-options | | no | - | +| name | type | required | default value | +|--------------------|---------|-----------------------|---------------| +| host | string | yes | - | +| port | int | yes | - | +| key | string | yes | - | +| data_type | string | yes | - | +| batch_size | int | no | 10 | +| user | string | no | - | +| auth | string | no | - | +| db_num | int | no | 0 | +| mode | string | no | single | +| nodes | list | yes when mode=cluster | - | +| format | string | no | json | +| expire | long | no | -1 | +| support_custom_key | boolean | no | false | +| value_field | string | no | - | +| hash_key_field | string | no | - | +| hash_value_field | string | no | - | +| common-options | | no | - | ### host [string] @@ -50,12 +54,12 @@ Upstream data is the following: | 500 | internal error | false | If you assign field name to `code` and data_type to `key`, two data will be written to redis: -1. `200 -> {code: 200, message: true, data: get success}` -2. `500 -> {code: 500, message: false, data: internal error}` +1. `200 -> {code: 200, data: get success, success: true}` +2. `500 -> {code: 500, data: internal error, success: false}` If you assign field name to `value` and data_type to `key`, only one data will be written to redis because `value` is not existed in upstream data's fields: -1. `value -> {code: 500, message: false, data: internal error}` +1. `value -> {code: 500, data: internal error, success: false}` Please see the data_type section for specific writing rules. @@ -85,7 +89,7 @@ Redis data types, support `key` `hash` `list` `set` `zset` > Each data from upstream will be added to the configured zset key with a weight of 1. So the order of data in zset is based on the order of data consumption. > - ### batch_size [int] +### batch_size [int] ensure the batch write size in single-machine mode; no guarantees in cluster mode. @@ -135,6 +139,61 @@ Connector will generate data as the following and write it to redis: Set redis expiration time, the unit is second. The default value is -1, keys do not automatically expire by default. +### support_custom_key [boolean] + +if true, the key can be customized by the field value in the upstream data. + +Upstream data is the following: + +| code | data | success | +|------|----------------|---------| +| 200 | get success | true | +| 500 | internal error | false | + +You can customize the Redis key using '{' and '}', and the field name in '{}' will be parsed and replaced by the field value in the upstream data. For example, If you assign field name to `{code}` and data_type to `key`, two data will be written to redis: +1. `200 -> {code: 200, data: get success, success: true}` +2. `500 -> {code: 500, data: internal error, success: false}` + +Redis key can be composed of fixed and variable parts, connected by ':'. For example, If you assign field name to `code:{code}` and data_type to `key`, two data will be written to redis: +1. `code:200 -> {code: 200, data: get success, success: true}` +2. `code:500 -> {code: 500, data: internal error, success: false}` + +### value_field [string] + +The field of value you want to write to redis, `data_type` support `key` `list` `set` `zset`. + +When you assign field name to `value` and value_field is `data` and data_type to `key`, for example: + +Upstream data is the following: + +| code | data | success | +|------|-------------|---------| +| 200 | get success | true | + +The following data will be written to redis: +1. `value -> get success` + +### hash_key_field [string] + +The field of hash key you want to write to redis, `data_type` support `hash` + +### hash_value_field [string] + +The field of hash value you want to write to redis, `data_type` support `hash` + +When you assign field name to `value` and hash_key_field is `data` and hash_value_field is `success` and data_type to `hash`, for example: + +Upstream data is the following: + +| code | data | success | +|------|-------------|---------| +| 200 | get success | true | + +Connector will generate data as the following and write it to redis: + +The following data will be written to redis: +1. `value -> get success | true` + ### common options Sink plugin common parameters, please refer to [Sink Common Options](../sink-common-options.md) for details @@ -152,6 +211,43 @@ Redis { } ``` +custom key: + +```hocon +Redis { + host = localhost + port = 6379 + key = "name:{name}" + support_custom_key = true + data_type = key +} +``` + +custom value: + +```hocon +Redis { + host = localhost + port = 6379 + key = person + value_field = "name" + data_type = key +} +``` + +custom HashKey and HashValue: + +```hocon +Redis { + host = localhost + port = 6379 + key = person + hash_key_field = "name" + hash_value_field = "age" + data_type = hash +} +``` + ## Changelog ### 2.2.0-beta 2022-09-26 diff --git a/docs/en/connector-v2/source/CosFile.md b/docs/en/connector-v2/source/CosFile.md index 702439c3062..15b6de0c6f8 100644 --- a/docs/en/connector-v2/source/CosFile.md +++ b/docs/en/connector-v2/source/CosFile.md @@ -45,7 +45,7 @@ To use this connector you need put hadoop-cos-{hadoop.version}-{version}.jar and ## Options -| name | type | required | default value | +| name | type | required | default value | |---------------------------|---------|----------|---------------------| | path | string | yes | - | | file_format_type | string | yes | - | @@ -64,7 +64,7 @@ To use this connector you need put hadoop-cos-{hadoop.version}-{version}.jar and | sheet_name | string | no | - | | xml_row_tag | string | no | - | | xml_use_attr_format | boolean | no | - | -| file_filter_pattern | string | no | - | +| file_filter_pattern | string | no | | | compress_codec | string | no | none | | archive_compress_codec | string | no | none | | encoding | string | no | UTF-8 | @@ -275,6 +275,55 @@ Specifies Whether to process data using the tag attribute format. Filter pattern, which used for filtering files. +The pattern follows standard regular expressions. For details, please refer to https://en.wikipedia.org/wiki/Regular_expression. +There are some examples. + +File Structure Example: +``` +/data/seatunnel/20241001/report.txt +/data/seatunnel/20241007/abch202410.csv +/data/seatunnel/20241002/abcg202410.csv +/data/seatunnel/20241005/old_data.csv +/data/seatunnel/20241012/logo.png +``` +Matching Rules Example: + +**Example 1**: *Match all .txt files*,Regular Expression: +``` +/data/seatunnel/20241001/.*\.txt +``` +The result of this example matching is: +``` +/data/seatunnel/20241001/report.txt +``` +**Example 2**: *Match all file starting with abc*,Regular Expression: +``` +/data/seatunnel/20241002/abc.* +``` +The result of this example matching is: +``` +/data/seatunnel/20241007/abch202410.csv +/data/seatunnel/20241002/abcg202410.csv +``` +**Example 3**: *Match all file starting with abc,And the fourth character is either h or g*, the Regular Expression: +``` +/data/seatunnel/20241007/abc[h,g].* +``` +The result of this example matching is: +``` +/data/seatunnel/20241007/abch202410.csv +``` +**Example 4**: *Match third level folders starting with 202410 and files ending with .csv*, the Regular Expression: +``` +/data/seatunnel/202410\d*/.*\.csv +``` +The result of this example matching is: +``` +/data/seatunnel/20241007/abch202410.csv +/data/seatunnel/20241002/abcg202410.csv +/data/seatunnel/20241005/old_data.csv +``` + ### compress_codec [string] The compress codec of files and the details that supported as the following shown: @@ -372,6 +421,33 @@ sink { ``` +### Filter File + +```hocon +env { + parallelism = 1 + job.mode = "BATCH" +} + +source { + CosFile { + bucket = "cosn://seatunnel-test-1259587829" + secret_id = "xxxxxxxxxxxxxxxxxxx" + secret_key = "xxxxxxxxxxxxxxxxxxx" + region = "ap-chengdu" + path = "/seatunnel/read/binary/" + file_format_type = "binary" + // file example abcD2024.csv + file_filter_pattern = "abc[DX]*.*" + } +} + +sink { + Console { + } +} +``` + ## Changelog ### next version diff --git a/docs/en/connector-v2/source/Doris.md b/docs/en/connector-v2/source/Doris.md index c67444b58c8..373b84f8fdd 100644 --- a/docs/en/connector-v2/source/Doris.md +++ b/docs/en/connector-v2/source/Doris.md @@ -13,15 +13,14 @@ - [x] [batch](../../concept/connector-v2-features.md) - [ ] [stream](../../concept/connector-v2-features.md) - [ ] [exactly-once](../../concept/connector-v2-features.md) -- [x] [schema projection](../../concept/connector-v2-features.md) +- [x] [column projection](../../concept/connector-v2-features.md) - [x] [parallelism](../../concept/connector-v2-features.md) - [x] [support user-defined split](../../concept/connector-v2-features.md) +- [x] [support multiple table read](../../concept/connector-v2-features.md) ## Description -Used to read data from Doris. -Doris Source will send a SQL to FE, FE will parse it into an execution plan, send it to BE, and BE will -directly return the data +Used to read data from Apache Doris. ## Supported DataSource Info @@ -29,11 +28,6 @@ directly return the data |------------|--------------------------------------|--------|-----|-------| | Doris | Only Doris2.0 or later is supported. | - | - | - | -## Database Dependency - -> Please download the support list corresponding to 'Maven' and copy it to the '$SEATNUNNEL_HOME/plugins/jdbc/lib/' -> working directory
- ## Data Type Mapping | Doris Data type | SeaTunnel Data type | @@ -54,29 +48,40 @@ directly return the data ## Source Options +Base configuration: + | Name | Type | Required | Default | Description | |----------------------------------|--------|----------|------------|-----------------------------------------------------------------------------------------------------| | fenodes | string | yes | - | FE address, the format is `"fe_host:fe_http_port"` | | username | string | yes | - | User username | | password | string | yes | - | User password | +| doris.request.retries | int | no | 3 | Number of retries to send requests to Doris FE. | +| doris.request.read.timeout.ms | int | no | 30000 | | +| doris.request.connect.timeout.ms | int | no | 30000 | | +| query-port | string | no | 9030 | Doris QueryPort | +| doris.request.query.timeout.s | int | no | 3600 | Timeout period of Doris scan data, expressed in seconds. | +| table_list | string | 否 | - | table list | + +Table list configuration: + +| Name | Type | Required | Default | Description | +|----------------------------------|--------|----------|------------|-----------------------------------------------------------------------------------------------------| | database | string | yes | - | The name of Doris database | | table | string | yes | - | The name of Doris table | | doris.read.field | string | no | - | Use the 'doris.read.field' parameter to select the doris table columns to read | -| query-port | string | no | 9030 | Doris QueryPort | | doris.filter.query | string | no | - | Data filtering in doris. the format is "field = value",example : doris.filter.query = "F_ID > 2" | | doris.batch.size | int | no | 1024 | The maximum value that can be obtained by reading Doris BE once. | -| doris.request.query.timeout.s | int | no | 3600 | Timeout period of Doris scan data, expressed in seconds. | | doris.exec.mem.limit | long | no | 2147483648 | Maximum memory that can be used by a single be scan request. The default memory is 2G (2147483648). | -| doris.request.retries | int | no | 3 | Number of retries to send requests to Doris FE. | -| doris.request.read.timeout.ms | int | no | 30000 | | -| doris.request.connect.timeout.ms | int | no | 30000 | | + +Note: When this configuration corresponds to a single table, you can flatten the configuration items in table_list to the outer layer. ### Tips > It is not recommended to modify advanced parameters at will -## Task Example +## Example +### single table > This is an example of reading a Doris table and writing to Console. ``` @@ -159,4 +164,49 @@ sink { Console {} } ``` +### Multiple table +``` +env{ + parallelism = 1 + job.mode = "BATCH" +} +source{ + Doris { + fenodes = "xxxx:8030" + username = root + password = "" + table_list = [ + { + database = "st_source_0" + table = "doris_table_0" + doris.read.field = "F_ID,F_INT,F_BIGINT,F_TINYINT" + doris.filter.query = "F_ID >= 50" + }, + { + database = "st_source_1" + table = "doris_table_1" + } + ] + } +} + +transform {} + +sink{ + Doris { + fenodes = "xxxx:8030" + schema_save_mode = "RECREATE_SCHEMA" + username = root + password = "" + database = "st_sink" + table = "${table_name}" + sink.enable-2pc = "true" + sink.label-prefix = "test_json" + doris.config = { + format="json" + read_json_by_line="true" + } + } +} +``` diff --git a/docs/en/connector-v2/source/FtpFile.md b/docs/en/connector-v2/source/FtpFile.md index ec02f77f9f7..6d114813769 100644 --- a/docs/en/connector-v2/source/FtpFile.md +++ b/docs/en/connector-v2/source/FtpFile.md @@ -84,6 +84,59 @@ The target ftp password is required The source file path. +### file_filter_pattern [string] + +Filter pattern, which used for filtering files. + +The pattern follows standard regular expressions. For details, please refer to https://en.wikipedia.org/wiki/Regular_expression. +There are some examples. + +File Structure Example: +``` +/data/seatunnel/20241001/report.txt +/data/seatunnel/20241007/abch202410.csv +/data/seatunnel/20241002/abcg202410.csv +/data/seatunnel/20241005/old_data.csv +/data/seatunnel/20241012/logo.png +``` +Matching Rules Example: + +**Example 1**: *Match all .txt files*,Regular Expression: +``` +/data/seatunnel/20241001/.*\.txt +``` +The result of this example matching is: +``` +/data/seatunnel/20241001/report.txt +``` +**Example 2**: *Match all file starting with abc*,Regular Expression: +``` +/data/seatunnel/20241002/abc.* +``` +The result of this example matching is: +``` +/data/seatunnel/20241007/abch202410.csv +/data/seatunnel/20241002/abcg202410.csv +``` +**Example 3**: *Match all file starting with abc,And the fourth character is either h or g*, the Regular Expression: +``` +/data/seatunnel/20241007/abc[h,g].* +``` +The result of this example matching is: +``` +/data/seatunnel/20241007/abch202410.csv +``` +**Example 4**: *Match third level folders starting with 202410 and files ending with .csv*, the Regular Expression: +``` +/data/seatunnel/202410\d*/.*\.csv +``` +The result of this example matching is: +``` +/data/seatunnel/20241007/abch202410.csv +/data/seatunnel/20241002/abcg202410.csv +/data/seatunnel/20241005/old_data.csv +``` + ### file_format_type [string] File type, supported as the following file types: @@ -400,6 +453,33 @@ sink { ``` +### Filter File + +```hocon +env { + parallelism = 1 + job.mode = "BATCH" +} + +source { + FtpFile { + host = "192.168.31.48" + port = 21 + user = tyrantlucifer + password = tianchao + path = "/seatunnel/read/binary/" + file_format_type = "binary" + // file example abcD2024.csv + file_filter_pattern = "abc[DX]*.*" + } +} + +sink { + Console { + } +} +``` + ## Changelog ### 2.2.0-beta 2022-09-26 diff --git a/docs/en/connector-v2/source/HdfsFile.md b/docs/en/connector-v2/source/HdfsFile.md index 7413c0428b8..405dfff820f 100644 --- a/docs/en/connector-v2/source/HdfsFile.md +++ b/docs/en/connector-v2/source/HdfsFile.md @@ -41,7 +41,7 @@ Read data from hdfs file system. ## Source Options -| Name | Type | Required | Default | Description | +| Name | Type | Required | Default | Description | |---------------------------|---------|----------|---------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | path | string | yes | - | The source file path. | | file_format_type | string | yes | - | We supported as the following file types:`text` `csv` `parquet` `orc` `json` `excel` `xml` `binary`.Please note that, The final file name will end with the file_format's suffix, the suffix of the text file is `txt`. | @@ -62,6 +62,7 @@ Read data from hdfs file system. | sheet_name | string | no | - | Reader the sheet of the workbook,Only used when file_format is excel. | | xml_row_tag | string | no | - | Specifies the tag name of the data rows within the XML file, only used when file_format is xml. | | xml_use_attr_format | boolean | no | - | Specifies whether to process data using the tag attribute format, only used when file_format is xml. | +| file_filter_pattern | string | no | | Filter pattern, which used for filtering files. | | compress_codec | string | no | none | The compress codec of files | | archive_compress_codec | string | no | none | | encoding | string | no | UTF-8 | | @@ -71,6 +72,59 @@ Read data from hdfs file system. **delimiter** parameter will deprecate after version 2.3.5, please use **field_delimiter** instead. +### file_filter_pattern [string] + +Filter pattern, which used for filtering files. + +The pattern follows standard regular expressions. For details, please refer to https://en.wikipedia.org/wiki/Regular_expression. +There are some examples. + +File Structure Example: +``` +/data/seatunnel/20241001/report.txt +/data/seatunnel/20241007/abch202410.csv +/data/seatunnel/20241002/abcg202410.csv +/data/seatunnel/20241005/old_data.csv +/data/seatunnel/20241012/logo.png +``` +Matching Rules Example: + +**Example 1**: *Match all .txt files*,Regular Expression: +``` +/data/seatunnel/20241001/.*\.txt +``` +The result of this example matching is: +``` +/data/seatunnel/20241001/report.txt +``` +**Example 2**: *Match all file starting with abc*,Regular Expression: +``` +/data/seatunnel/20241002/abc.* +``` +The result of this example matching is: +``` +/data/seatunnel/20241007/abch202410.csv +/data/seatunnel/20241002/abcg202410.csv +``` +**Example 3**: *Match all file starting with abc,And the fourth character is either h or g*, the Regular Expression: +``` +/data/seatunnel/20241007/abc[h,g].* +``` +The result of this example matching is: +``` +/data/seatunnel/20241007/abch202410.csv +``` +**Example 4**: *Match third level folders starting with 202410 and files ending with .csv*, the Regular Expression: +``` +/data/seatunnel/202410\d*/.*\.csv +``` +The result of this example matching is: +``` +/data/seatunnel/20241007/abch202410.csv +/data/seatunnel/20241002/abcg202410.csv +/data/seatunnel/20241005/old_data.csv +``` + ### compress_codec [string] The compress codec of files and the details that supported as the following shown: @@ -146,3 +200,26 @@ sink { } ``` +### Filter File + +```hocon +env { + parallelism = 1 + job.mode = "BATCH" +} + +source { + HdfsFile { + path = "/apps/hive/demo/student" + file_format_type = "json" + fs.defaultFS = "hdfs://namenode001" + // file example abcD2024.csv + file_filter_pattern = "abc[DX]*.*" + } +} + +sink { + Console { + } +} +``` diff --git a/docs/en/connector-v2/source/HiveJdbc.md b/docs/en/connector-v2/source/HiveJdbc.md index 19619d924c1..23227aa306f 100644 --- a/docs/en/connector-v2/source/HiveJdbc.md +++ b/docs/en/connector-v2/source/HiveJdbc.md @@ -72,7 +72,7 @@ Read external data source data through JDBC. | partition_num | Int | No | job parallelism | The number of partition count, only support positive integer. default value is job parallelism | | fetch_size | Int | No | 0 | For queries that return a large number of objects,you can configure
the row fetch size used in the query toimprove performance by
reducing the number database hits required to satisfy the selection criteria.
Zero means use jdbc default value. | | common-options | | No | - | Source plugin common parameters, please refer to [Source Common Options](../source-common-options.md) for details | -| useKerberos | Boolean | No | no | Whether to enable Kerberos, default is false | +| use_kerberos | Boolean | No | no | Whether to enable Kerberos, default is false | | kerberos_principal | String | No | - | When use kerberos, we should set kerberos principal such as 'test_user@xxx'. | | kerberos_keytab_path | String | No | - | When use kerberos, we should set kerberos principal file path such as '/home/test/test_user.keytab' . | | krb5_path | String | No | /etc/krb5.conf | When use kerberos, we should set krb5 path file path such as '/seatunnel/krb5.conf' or use the default path '/etc/krb5.conf '. | diff --git a/docs/en/connector-v2/source/Jdbc.md b/docs/en/connector-v2/source/Jdbc.md index 27b3d875580..2b5897cbaea 100644 --- a/docs/en/connector-v2/source/Jdbc.md +++ b/docs/en/connector-v2/source/Jdbc.md @@ -113,7 +113,7 @@ The JDBC Source connector supports parallel reading of data from tables. SeaTunn there are some reference value for params above. -| datasource | driver | url | maven | +| datasource | driver | url | maven | |-------------------|-----------------------------------------------------|------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------| | mysql | com.mysql.cj.jdbc.Driver | jdbc:mysql://localhost:3306/test | https://mvnrepository.com/artifact/mysql/mysql-connector-java | | postgresql | org.postgresql.Driver | jdbc:postgresql://localhost:5432/postgres | https://mvnrepository.com/artifact/org.postgresql/postgresql | @@ -122,7 +122,7 @@ there are some reference value for params above. | sqlserver | com.microsoft.sqlserver.jdbc.SQLServerDriver | jdbc:sqlserver://localhost:1433 | https://mvnrepository.com/artifact/com.microsoft.sqlserver/mssql-jdbc | | oracle | oracle.jdbc.OracleDriver | jdbc:oracle:thin:@localhost:1521/xepdb1 | https://mvnrepository.com/artifact/com.oracle.database.jdbc/ojdbc8 | | sqlite | org.sqlite.JDBC | jdbc:sqlite:test.db | https://mvnrepository.com/artifact/org.xerial/sqlite-jdbc | -| gbase8a | com.gbase.jdbc.Driver | jdbc:gbase://e2e_gbase8aDb:5258/test | https://cdn.gbase.cn/products/30/p5CiVwXBKQYIUGN8ecHvk/gbase-connector-java-9.5.0.7-build1-bin.jar | +| gbase8a | com.gbase.jdbc.Driver | jdbc:gbase://e2e_gbase8aDb:5258/test | https://cdn.gbase.cn/products/30/p5CiVwXBKQYIUGN8ecHvk/gbase-connector-java-9.5.0.7-build1-bin.jar | | starrocks | com.mysql.cj.jdbc.Driver | jdbc:mysql://localhost:3306/test | https://mvnrepository.com/artifact/mysql/mysql-connector-java | | db2 | com.ibm.db2.jcc.DB2Driver | jdbc:db2://localhost:50000/testdb | https://mvnrepository.com/artifact/com.ibm.db2.jcc/db2jcc/db2jcc4 | | tablestore | com.alicloud.openservices.tablestore.jdbc.OTSDriver | "jdbc:ots:http s://myinstance.cn-hangzhou.ots.aliyuncs.com/myinstance" | https://mvnrepository.com/artifact/com.aliyun.openservices/tablestore-jdbc | @@ -133,10 +133,11 @@ there are some reference value for params above. | Redshift | com.amazon.redshift.jdbc42.Driver | jdbc:redshift://localhost:5439/testdb?defaultRowFetchSize=1000 | https://mvnrepository.com/artifact/com.amazon.redshift/redshift-jdbc42 | | Vertica | com.vertica.jdbc.Driver | jdbc:vertica://localhost:5433 | https://repo1.maven.org/maven2/com/vertica/jdbc/vertica-jdbc/12.0.3-0/vertica-jdbc-12.0.3-0.jar | | Kingbase | com.kingbase8.Driver | jdbc:kingbase8://localhost:54321/db_test | https://repo1.maven.org/maven2/cn/com/kingbase/kingbase8/8.6.0/kingbase8-8.6.0.jar | -| OceanBase | com.oceanbase.jdbc.Driver | jdbc:oceanbase://localhost:2881 | https://repo1.maven.org/maven2/com/oceanbase/oceanbase-client/2.4.11/oceanbase-client-2.4.11.jar | +| OceanBase | com.oceanbase.jdbc.Driver | jdbc:oceanbase://localhost:2881 | https://repo1.maven.org/maven2/com/oceanbase/oceanbase-client/2.4.12/oceanbase-client-2.4.12.jar | | Hive | org.apache.hive.jdbc.HiveDriver | jdbc:hive2://localhost:10000 | https://repo1.maven.org/maven2/org/apache/hive/hive-jdbc/3.1.3/hive-jdbc-3.1.3-standalone.jar | | xugu | com.xugu.cloudjdbc.Driver | jdbc:xugu://localhost:5138 | https://repo1.maven.org/maven2/com/xugudb/xugu-jdbc/12.2.0/xugu-jdbc-12.2.0.jar | | InterSystems IRIS | com.intersystems.jdbc.IRISDriver | jdbc:IRIS://localhost:1972/%SYS | https://raw.githubusercontent.com/intersystems-community/iris-driver-distribution/main/JDBC/JDK18/intersystems-jdbc-3.8.4.jar | +| opengauss | org.opengauss.Driver | jdbc:opengauss://localhost:5432/postgres | https://repo1.maven.org/maven2/org/opengauss/opengauss-jdbc/5.1.0-og/opengauss-jdbc-5.1.0-og.jar | ## Example diff --git a/docs/en/connector-v2/source/Klaviyo.md b/docs/en/connector-v2/source/Klaviyo.md index 10b4ed42e9e..848fe38ef8f 100644 --- a/docs/en/connector-v2/source/Klaviyo.md +++ b/docs/en/connector-v2/source/Klaviyo.md @@ -45,7 +45,7 @@ http request url API private key for login, you can get more detail at this link: -https://developers.klaviyo.com/en/docs/retrieve_api_credentials +https://developers.klaviyo.com/en/docs/authenticate_#private-key-authentication ### revision [String] diff --git a/docs/en/connector-v2/source/LocalFile.md b/docs/en/connector-v2/source/LocalFile.md index 6d11b992e3a..65f287f057b 100644 --- a/docs/en/connector-v2/source/LocalFile.md +++ b/docs/en/connector-v2/source/LocalFile.md @@ -43,7 +43,7 @@ If you use SeaTunnel Engine, It automatically integrated the hadoop jar when you ## Options -| name | type | required | default value | +| name | type | required | default value | |---------------------------|---------|----------|--------------------------------------| | path | string | yes | - | | file_format_type | string | yes | - | @@ -58,7 +58,7 @@ If you use SeaTunnel Engine, It automatically integrated the hadoop jar when you | sheet_name | string | no | - | | xml_row_tag | string | no | - | | xml_use_attr_format | boolean | no | - | -| file_filter_pattern | string | no | - | +| file_filter_pattern | string | no | | | compress_codec | string | no | none | | archive_compress_codec | string | no | none | | encoding | string | no | UTF-8 | @@ -254,6 +254,55 @@ Specifies Whether to process data using the tag attribute format. Filter pattern, which used for filtering files. +The pattern follows standard regular expressions. For details, please refer to https://en.wikipedia.org/wiki/Regular_expression. +There are some examples. + +File Structure Example: +``` +/data/seatunnel/20241001/report.txt +/data/seatunnel/20241007/abch202410.csv +/data/seatunnel/20241002/abcg202410.csv +/data/seatunnel/20241005/old_data.csv +/data/seatunnel/20241012/logo.png +``` +Matching Rules Example: + +**Example 1**: *Match all .txt files*,Regular Expression: +``` +/data/seatunnel/20241001/.*\.txt +``` +The result of this example matching is: +``` +/data/seatunnel/20241001/report.txt +``` +**Example 2**: *Match all file starting with abc*,Regular Expression: +``` +/data/seatunnel/20241002/abc.* +``` +The result of this example matching is: +``` +/data/seatunnel/20241007/abch202410.csv +/data/seatunnel/20241002/abcg202410.csv +``` +**Example 3**: *Match all file starting with abc,And the fourth character is either h or g*, the Regular Expression: +``` +/data/seatunnel/20241007/abc[h,g].* +``` +The result of this example matching is: +``` +/data/seatunnel/20241007/abch202410.csv +``` +**Example 4**: *Match third level folders starting with 202410 and files ending with .csv*, the Regular Expression: +``` +/data/seatunnel/202410\d*/.*\.csv +``` +The result of this example matching is: +``` +/data/seatunnel/20241007/abch202410.csv +/data/seatunnel/20241002/abcg202410.csv +/data/seatunnel/20241005/old_data.csv +``` + ### compress_codec [string] The compress codec of files and the details that supported as the following shown: @@ -406,6 +455,30 @@ sink { ``` +### Filter File + +```hocon +env { + parallelism = 1 + job.mode = "BATCH" +} + +source { + LocalFile { + path = "/data/seatunnel/" + file_format_type = "csv" + skip_header_row_number = 1 + // file example abcD2024.csv + file_filter_pattern = "abc[DX]*.*" + } +} + +sink { + Console { + } +} +``` + ## Changelog ### 2.2.0-beta 2022-09-26 diff --git a/docs/en/connector-v2/source/Mivlus.md b/docs/en/connector-v2/source/Milvus.md similarity index 85% rename from docs/en/connector-v2/source/Mivlus.md rename to docs/en/connector-v2/source/Milvus.md index a56df4c5fe7..e9560489762 100644 --- a/docs/en/connector-v2/source/Mivlus.md +++ b/docs/en/connector-v2/source/Milvus.md @@ -4,7 +4,11 @@ ## Description -Read data from Milvus or Zilliz Cloud +This Milvus source connector reads data from Milvus or Zilliz Cloud, it has the following features: +- support read and write data by partition +- support read dynamic schema data into Metadata Column +- json data will be converted to json string and sink as json as well +- retry automatically to bypass ratelimit and grpc limit ## Key Features @@ -53,3 +57,5 @@ source { } ``` +## Changelog + diff --git a/docs/en/connector-v2/source/MongoDB-CDC.md b/docs/en/connector-v2/source/MongoDB-CDC.md index 301d7075738..d7e6c7e440f 100644 --- a/docs/en/connector-v2/source/MongoDB-CDC.md +++ b/docs/en/connector-v2/source/MongoDB-CDC.md @@ -105,13 +105,14 @@ For specific types in MongoDB, we use Extended JSON format to map them to Seatun ## Source Options -| Name | Type | Required | Default | Description | +| Name | Type | Required | Default | Description | |------------------------------------|--------|----------|---------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | hosts | String | Yes | - | The comma-separated list of hostname and port pairs of the MongoDB servers. eg. `localhost:27017,localhost:27018` | | username | String | No | - | Name of the database user to be used when connecting to MongoDB. | | password | String | No | - | Password to be used when connecting to MongoDB. | | database | List | Yes | - | Name of the database to watch for changes. If not set then all databases will be captured. The database also supports regular expressions to monitor multiple databases matching the regular expression. eg. `db1,db2`. | | collection | List | Yes | - | Name of the collection in the database to watch for changes. If not set then all collections will be captured. The collection also supports regular expressions to monitor multiple collections matching fully-qualified collection identifiers. eg. `db1.coll1,db2.coll2`. | +| schema | | yes | - | The structure of the data, including field names and field types. | | connection.options | String | No | - | The ampersand-separated connection options of MongoDB. eg. `replicaSet=test&connectTimeoutMS=300000`. | | batch.size | Long | No | 1024 | The cursor batch size. | | poll.max.batch.size | Enum | No | 1024 | Maximum number of change stream documents to include in a single batch when polling for new data. | @@ -185,6 +186,14 @@ source { collection = ["inventory.products"] username = stuser password = stpw + schema = { + fields { + "_id" : string, + "name" : string, + "description" : string, + "weight" : string + } + } } } @@ -204,76 +213,6 @@ sink { } ``` -## Multi-table Synchronization - -The following example demonstrates how to create a data synchronization job that read the cdc data of multiple library tables mongodb and prints it on the local client: - -```hocon -env { - # You can set engine configuration here - parallelism = 1 - job.mode = "STREAMING" - checkpoint.interval = 5000 -} - -source { - MongoDB-CDC { - hosts = "mongo0:27017" - database = ["inventory","crm"] - collection = ["inventory.products","crm.test"] - username = stuser - password = stpw - } -} - -# Console printing of the read Mongodb data -sink { - Console { - parallelism = 1 - } -} -``` - -### Tips: - -> 1.The cdc synchronization of multiple library tables cannot specify the schema, and can only output json data downstream. -> This is because MongoDB does not provide metadata information for querying, so if you want to support multiple tables, all tables can only be read as one structure. - -## Regular Expression Matching for Multiple Tables - -The following example demonstrates how to create a data synchronization job that through regular expression read the data of multiple library tables mongodb and prints it on the local client: - -| Matching example | Expressions | | Describe | -|------------------|-------------|---|----------------------------------------------------------------------------------------| -| Prefix matching | ^(test).* | | Match the database name or table name with the prefix test, such as test1, test2, etc. | -| Suffix matching | .*[p$] | | Match the database name or table name with the suffix p, such as cdcp, edcp, etc. | - -```hocon -env { - # You can set engine configuration here - parallelism = 1 - job.mode = "STREAMING" - checkpoint.interval = 5000 -} - -source { - MongoDB-CDC { - hosts = "mongo0:27017" - # So this example is used (^(test).*|^(tpc).*|txc|.*[p$]|t{2}).(t[5-8]|tt),matching txc.tt、test2.test5. - database = ["(^(test).*|^(tpc).*|txc|.*[p$]|t{2})"] - collection = ["(t[5-8]|tt)"] - username = stuser - password = stpw - } -} - -# Console printing of the read Mongodb data -sink { - Console { - parallelism = 1 - } -} -``` ## Format of real-time streaming data @@ -309,4 +248,3 @@ sink { } } ``` - diff --git a/docs/en/connector-v2/source/OssFile.md b/docs/en/connector-v2/source/OssFile.md index d5326cb86a4..36d998f054c 100644 --- a/docs/en/connector-v2/source/OssFile.md +++ b/docs/en/connector-v2/source/OssFile.md @@ -190,7 +190,7 @@ If you assign file type to `parquet` `orc`, schema option not required, connecto ## Options -| name | type | required | default value | Description | +| name | type | required | default value | Description | |---------------------------|---------|----------|---------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | path | string | yes | - | The Oss path that needs to be read can have sub paths, but the sub paths need to meet certain format requirements. Specific requirements can be referred to "parse_partition_from_path" option | | file_format_type | string | yes | - | File type, supported as the following file types: `text` `csv` `parquet` `orc` `json` `excel` `xml` `binary` | @@ -211,7 +211,7 @@ If you assign file type to `parquet` `orc`, schema option not required, connecto | xml_use_attr_format | boolean | no | - | Specifies whether to process data using the tag attribute format, only used when file_format is xml. | | compress_codec | string | no | none | Which compress codec the files used. | | encoding | string | no | UTF-8 | -| file_filter_pattern | string | no | | `*.txt` means you only need read the files end with `.txt` | +| file_filter_pattern | string | no | | Filter pattern, which used for filtering files. | | common-options | config | no | - | Source plugin common parameters, please refer to [Source Common Options](../source-common-options.md) for details. | ### compress_codec [string] @@ -233,6 +233,55 @@ The encoding of the file to read. This param will be parsed by `Charset.forName( Filter pattern, which used for filtering files. +The pattern follows standard regular expressions. For details, please refer to https://en.wikipedia.org/wiki/Regular_expression. +There are some examples. + +File Structure Example: +``` +/data/seatunnel/20241001/report.txt +/data/seatunnel/20241007/abch202410.csv +/data/seatunnel/20241002/abcg202410.csv +/data/seatunnel/20241005/old_data.csv +/data/seatunnel/20241012/logo.png +``` +Matching Rules Example: + +**Example 1**: *Match all .txt files*,Regular Expression: +``` +/data/seatunnel/20241001/.*\.txt +``` +The result of this example matching is: +``` +/data/seatunnel/20241001/report.txt +``` +**Example 2**: *Match all file starting with abc*,Regular Expression: +``` +/data/seatunnel/20241002/abc.* +``` +The result of this example matching is: +``` +/data/seatunnel/20241007/abch202410.csv +/data/seatunnel/20241002/abcg202410.csv +``` +**Example 3**: *Match all file starting with abc,And the fourth character is either h or g*, the Regular Expression: +``` +/data/seatunnel/20241007/abc[h,g].* +``` +The result of this example matching is: +``` +/data/seatunnel/20241007/abch202410.csv +``` +**Example 4**: *Match third level folders starting with 202410 and files ending with .csv*, the Regular Expression: +``` +/data/seatunnel/202410\d*/.*\.csv +``` +The result of this example matching is: +``` +/data/seatunnel/20241007/abch202410.csv +/data/seatunnel/20241002/abcg202410.csv +/data/seatunnel/20241005/old_data.csv +``` + ### schema [config] Only need to be configured when the file_format_type are text, json, excel, xml or csv ( Or other format we can't read the schema from metadata). @@ -474,6 +523,33 @@ sink { } ``` +### Filter File + +```hocon +env { + parallelism = 1 + job.mode = "BATCH" +} + +source { + OssFile { + path = "/seatunnel/orc" + bucket = "oss://tyrantlucifer-image-bed" + access_key = "xxxxxxxxxxxxxxxxx" + access_secret = "xxxxxxxxxxxxxxxxxxxxxx" + endpoint = "oss-cn-beijing.aliyuncs.com" + file_format_type = "orc" + // file example abcD2024.csv + file_filter_pattern = "abc[DX]*.*" + } +} + +sink { + Console { + } +} +``` + ## Changelog ### 2.2.0-beta 2022-09-26 diff --git a/docs/en/connector-v2/source/OssJindoFile.md b/docs/en/connector-v2/source/OssJindoFile.md index d5bd6d14fa3..933439edc9f 100644 --- a/docs/en/connector-v2/source/OssJindoFile.md +++ b/docs/en/connector-v2/source/OssJindoFile.md @@ -49,7 +49,7 @@ It only supports hadoop version **2.9.X+**. ## Options -| name | type | required | default value | +| name | type | required | default value | |---------------------------|---------|----------|---------------------| | path | string | yes | - | | file_format_type | string | yes | - | @@ -68,7 +68,7 @@ It only supports hadoop version **2.9.X+**. | sheet_name | string | no | - | | xml_row_tag | string | no | - | | xml_use_attr_format | boolean | no | - | -| file_filter_pattern | string | no | - | +| file_filter_pattern | string | no | | | compress_codec | string | no | none | | archive_compress_codec | string | no | none | | encoding | string | no | UTF-8 | @@ -267,6 +267,55 @@ Reader the sheet of the workbook. Filter pattern, which used for filtering files. +The pattern follows standard regular expressions. For details, please refer to https://en.wikipedia.org/wiki/Regular_expression. +There are some examples. + +File Structure Example: +``` +/data/seatunnel/20241001/report.txt +/data/seatunnel/20241007/abch202410.csv +/data/seatunnel/20241002/abcg202410.csv +/data/seatunnel/20241005/old_data.csv +/data/seatunnel/20241012/logo.png +``` +Matching Rules Example: + +**Example 1**: *Match all .txt files*,Regular Expression: +``` +/data/seatunnel/20241001/.*\.txt +``` +The result of this example matching is: +``` +/data/seatunnel/20241001/report.txt +``` +**Example 2**: *Match all file starting with abc*,Regular Expression: +``` +/data/seatunnel/20241002/abc.* +``` +The result of this example matching is: +``` +/data/seatunnel/20241007/abch202410.csv +/data/seatunnel/20241002/abcg202410.csv +``` +**Example 3**: *Match all file starting with abc,And the fourth character is either h or g*, the Regular Expression: +``` +/data/seatunnel/20241007/abc[h,g].* +``` +The result of this example matching is: +``` +/data/seatunnel/20241007/abch202410.csv +``` +**Example 4**: *Match third level folders starting with 202410 and files ending with .csv*, the Regular Expression: +``` +/data/seatunnel/202410\d*/.*\.csv +``` +The result of this example matching is: +``` +/data/seatunnel/20241007/abch202410.csv +/data/seatunnel/20241002/abcg202410.csv +/data/seatunnel/20241005/old_data.csv +``` + ### compress_codec [string] The compress codec of files and the details that supported as the following shown: @@ -364,6 +413,33 @@ sink { ``` +### Filter File + +```hocon +env { + parallelism = 1 + job.mode = "BATCH" +} + +source { + OssJindoFile { + bucket = "oss://tyrantlucifer-image-bed" + access_key = "xxxxxxxxxxxxxxxxx" + access_secret = "xxxxxxxxxxxxxxxxxxxxxx" + endpoint = "oss-cn-beijing.aliyuncs.com" + path = "/seatunnel/read/binary/" + file_format_type = "binary" + // file example abcD2024.csv + file_filter_pattern = "abc[DX]*.*" + } +} + +sink { + Console { + } +} +``` + ## Changelog ### next version diff --git a/docs/en/connector-v2/source/Prometheus.md b/docs/en/connector-v2/source/Prometheus.md new file mode 100644 index 00000000000..fc9ecc4988b --- /dev/null +++ b/docs/en/connector-v2/source/Prometheus.md @@ -0,0 +1,152 @@ +# Prometheus + +> Prometheus source connector + +## Description + +Used to read data from Prometheus. + +## Key features + +- [x] [batch](../../concept/connector-v2-features.md) +- [ ] [stream](../../concept/connector-v2-features.md) +- [ ] [parallelism](../../concept/connector-v2-features.md) + +## Options + +| name | type | required | default value | +|-----------------------------|---------|----------|-----------------| +| url | String | Yes | - | +| query | String | Yes | - | +| query_type | String | Yes | Instant | +| content_field | String | Yes | $.data.result.* | +| schema.fields | Config | Yes | - | +| format | String | No | json | +| params | Map | Yes | - | +| poll_interval_millis | int | No | - | +| retry | int | No | - | +| retry_backoff_multiplier_ms | int | No | 100 | +| retry_backoff_max_ms | int | No | 10000 | +| enable_multi_lines | boolean | No | false | +| common-options | config | No | - | + +### url [String] + +http request url + +### query [String] + +Prometheus expression query string + +### query_type [String] + +Instant/Range + +1. Instant : The following endpoint evaluates an instant query at a single point in time +2. Range : The following endpoint evaluates an expression query over a range of time + +https://prometheus.io/docs/prometheus/latest/querying/api/ + +### params [Map] + +http request params + +### poll_interval_millis [int] + +request http api interval(millis) in stream mode + +### retry [int] + +The max retry times if request http return to `IOException` + +### retry_backoff_multiplier_ms [int] + +The retry-backoff times(millis) multiplier if request http failed + +### retry_backoff_max_ms [int] + +The maximum retry-backoff times(millis) if request http failed + +### format [String] + +the format of upstream data, default `json`. + +### schema [Config] + +Fill in a fixed value + +```hocon + schema = { + fields { + metric = "map" + value = double + time = long + } + } + +``` + +#### fields [Config] + +the schema fields of upstream data + +### common options + +Source plugin common parameters, please refer to [Source Common Options](../source-common-options.md) for details + +## Example + +### Instant: + +```hocon +source { + Prometheus { + result_table_name = "http" + url = "http://mockserver:1080" + query = "up" + query_type = "Instant" + content_field = "$.data.result.*" + format = "json" + schema = { + fields { + metric = "map" + value = double + time = long + } + } + } +} +``` + +### Range + +```hocon +source { + Prometheus { + result_table_name = "http" + url = "http://mockserver:1080" + query = "up" + query_type = "Range" + content_field = "$.data.result.*" + format = "json" + start = "2024-07-22T20:10:30.781Z" + end = "2024-07-22T20:11:00.781Z" + step = "15s" + schema = { + fields { + metric = "map" + value = double + time = long + } + } + } + } +``` + +## Changelog + +### next version + +- Add Prometheus Source Connector +- Reduce configuration items + diff --git a/docs/en/connector-v2/source/S3File.md b/docs/en/connector-v2/source/S3File.md index d280d6dc7f2..4834b025bc3 100644 --- a/docs/en/connector-v2/source/S3File.md +++ b/docs/en/connector-v2/source/S3File.md @@ -196,7 +196,7 @@ If you assign file type to `parquet` `orc`, schema option not required, connecto ## Options -| name | type | required | default value | Description | +| name | type | required | default value | Description | |---------------------------------|---------|----------|-------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | path | string | yes | - | The s3 path that needs to be read can have sub paths, but the sub paths need to meet certain format requirements. Specific requirements can be referred to "parse_partition_from_path" option | | file_format_type | string | yes | - | File type, supported as the following file types: `text` `csv` `parquet` `orc` `json` `excel` `xml` `binary` | @@ -220,12 +220,66 @@ If you assign file type to `parquet` `orc`, schema option not required, connecto | compress_codec | string | no | none | | | archive_compress_codec | string | no | none | | | encoding | string | no | UTF-8 | | +| file_filter_pattern | string | no | | Filter pattern, which used for filtering files. | | common-options | | no | - | Source plugin common parameters, please refer to [Source Common Options](../source-common-options.md) for details. | ### delimiter/field_delimiter [string] **delimiter** parameter will deprecate after version 2.3.5, please use **field_delimiter** instead. +### file_filter_pattern [string] + +Filter pattern, which used for filtering files. + +The pattern follows standard regular expressions. For details, please refer to https://en.wikipedia.org/wiki/Regular_expression. +There are some examples. + +File Structure Example: +``` +/data/seatunnel/20241001/report.txt +/data/seatunnel/20241007/abch202410.csv +/data/seatunnel/20241002/abcg202410.csv +/data/seatunnel/20241005/old_data.csv +/data/seatunnel/20241012/logo.png +``` +Matching Rules Example: + +**Example 1**: *Match all .txt files*,Regular Expression: +``` +/data/seatunnel/20241001/.*\.txt +``` +The result of this example matching is: +``` +/data/seatunnel/20241001/report.txt +``` +**Example 2**: *Match all file starting with abc*,Regular Expression: +``` +/data/seatunnel/20241002/abc.* +``` +The result of this example matching is: +``` +/data/seatunnel/20241007/abch202410.csv +/data/seatunnel/20241002/abcg202410.csv +``` +**Example 3**: *Match all file starting with abc,And the fourth character is either h or g*, the Regular Expression: +``` +/data/seatunnel/20241007/abc[h,g].* +``` +The result of this example matching is: +``` +/data/seatunnel/20241007/abch202410.csv +``` +**Example 4**: *Match third level folders starting with 202410 and files ending with .csv*, the Regular Expression: +``` +/data/seatunnel/202410\d*/.*\.csv +``` +The result of this example matching is: +``` +/data/seatunnel/20241007/abch202410.csv +/data/seatunnel/20241002/abcg202410.csv +/data/seatunnel/20241005/old_data.csv +``` + ### compress_codec [string] The compress codec of files and the details that supported as the following shown: @@ -349,6 +403,33 @@ sink { } ``` +### Filter File + +```hocon +env { + parallelism = 1 + job.mode = "BATCH" +} + +source { + S3File { + path = "/seatunnel/json" + bucket = "s3a://seatunnel-test" + fs.s3a.endpoint="s3.cn-north-1.amazonaws.com.cn" + fs.s3a.aws.credentials.provider="com.amazonaws.auth.InstanceProfileCredentialsProvider" + file_format_type = "json" + read_columns = ["id", "name"] + // file example abcD2024.csv + file_filter_pattern = "abc[DX]*.*" + } +} + +sink { + Console { + } +} +``` + ## Changelog ### 2.3.0-beta 2022-10-20 diff --git a/docs/en/connector-v2/source/SftpFile.md b/docs/en/connector-v2/source/SftpFile.md index 3eadcd3a69e..95c710110a0 100644 --- a/docs/en/connector-v2/source/SftpFile.md +++ b/docs/en/connector-v2/source/SftpFile.md @@ -71,7 +71,7 @@ The File does not have a specific type list, and we can indicate which SeaTunnel ## Source Options -| Name | Type | Required | default value | Description | +| Name | Type | Required | default value | Description | |---------------------------|---------|----------|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | host | String | Yes | - | The target sftp host is required | | port | Int | Yes | - | The target sftp port is required | @@ -96,6 +96,59 @@ The File does not have a specific type list, and we can indicate which SeaTunnel | encoding | string | no | UTF-8 | | common-options | | No | - | Source plugin common parameters, please refer to [Source Common Options](../source-common-options.md) for details. | +### file_filter_pattern [string] + +Filter pattern, which used for filtering files. + +The pattern follows standard regular expressions. For details, please refer to https://en.wikipedia.org/wiki/Regular_expression. +There are some examples. + +File Structure Example: +``` +/data/seatunnel/20241001/report.txt +/data/seatunnel/20241007/abch202410.csv +/data/seatunnel/20241002/abcg202410.csv +/data/seatunnel/20241005/old_data.csv +/data/seatunnel/20241012/logo.png +``` +Matching Rules Example: + +**Example 1**: *Match all .txt files*,Regular Expression: +``` +/data/seatunnel/20241001/.*\.txt +``` +The result of this example matching is: +``` +/data/seatunnel/20241001/report.txt +``` +**Example 2**: *Match all file starting with abc*,Regular Expression: +``` +/data/seatunnel/20241002/abc.* +``` +The result of this example matching is: +``` +/data/seatunnel/20241007/abch202410.csv +/data/seatunnel/20241002/abcg202410.csv +``` +**Example 3**: *Match all file starting with abc,And the fourth character is either h or g*, the Regular Expression: +``` +/data/seatunnel/20241007/abc[h,g].* +``` +The result of this example matching is: +``` +/data/seatunnel/20241007/abch202410.csv +``` +**Example 4**: *Match third level folders starting with 202410 and files ending with .csv*, the Regular Expression: +``` +/data/seatunnel/202410\d*/.*\.csv +``` +The result of this example matching is: +``` +/data/seatunnel/20241007/abch202410.csv +/data/seatunnel/20241002/abcg202410.csv +/data/seatunnel/20241005/old_data.csv +``` + ### file_format_type [string] File type, supported as the following file types: @@ -305,3 +358,30 @@ SftpFile { ``` +### Filter File + +```hocon +env { + parallelism = 1 + job.mode = "BATCH" +} + +source { + SftpFile { + host = "sftp" + port = 22 + user = seatunnel + password = pass + path = "tmp/seatunnel/read/json" + file_format_type = "json" + result_table_name = "sftp" + // file example abcD2024.csv + file_filter_pattern = "abc[DX]*.*" + } +} + +sink { + Console { + } +} +``` \ No newline at end of file diff --git a/docs/en/contribution/setup.md b/docs/en/contribution/setup.md index b2579e1ee1e..8fd632a24b0 100644 --- a/docs/en/contribution/setup.md +++ b/docs/en/contribution/setup.md @@ -80,7 +80,7 @@ After all the above things are done, you just finish the environment setup and c of box. All examples are in module `seatunnel-examples`, you could pick one you are interested in, [Running Or Debugging It In IDEA](https://www.jetbrains.com/help/idea/run-debug-configuration.html) as you wish. -Here we use `seatunnel-examples/seatunnel-engine-examples/src/main/java/org/apache/seatunnel/example/engine/SeaTunnelEngineExample.java` +Here we use `seatunnel-examples/seatunnel-engine-examples/src/main/java/org/apache/seatunnel/example/engine/SeaTunnelEngineLocalExample.java` as an example, when you run it successfully you can see the output as below: ```log diff --git a/docs/en/seatunnel-engine/checkpoint-storage.md b/docs/en/seatunnel-engine/checkpoint-storage.md index 7027f8067fb..19c617e0154 100644 --- a/docs/en/seatunnel-engine/checkpoint-storage.md +++ b/docs/en/seatunnel-engine/checkpoint-storage.md @@ -14,7 +14,7 @@ Checkpoint Storage is a storage mechanism for storing checkpoint data. SeaTunnel Engine supports the following checkpoint storage types: -- HDFS (OSS,S3,HDFS,LocalFile) +- HDFS (OSS,COS,S3,HDFS,LocalFile) - LocalFile (native), (it's deprecated: use Hdfs(LocalFile) instead. We use the microkernel design pattern to separate the checkpoint storage module from the engine. This allows users to implement their own checkpoint storage modules. @@ -73,6 +73,42 @@ For additional reading on the Hadoop Credential Provider API, you can see: [Cred For Aliyun OSS Credential Provider implements, you can see: [Auth Credential Providers](https://github.com/aliyun/aliyun-oss-java-sdk/tree/master/src/main/java/com/aliyun/oss/common/auth) +#### COS + +Tencent COS based hdfs-file you can refer [Hadoop COS Docs](https://hadoop.apache.org/docs/stable/hadoop-cos/cloud-storage/) to config COS. + +Except when interacting with cos buckets, the cos client needs the credentials needed to interact with buckets. +The client supports multiple authentication mechanisms and can be configured as to which mechanisms to use, and their order of use. Custom implementations of com.qcloud.cos.auth.COSCredentialsProvider may also be used. +If you used SimpleCredentialsProvider (can be obtained from the Tencent Cloud API Key Management), these consist of an access key, a secret key. +You can config like this: + +```yaml +seatunnel: + engine: + checkpoint: + interval: 6000 + timeout: 7000 + storage: + type: hdfs + max-retained: 3 + plugin-config: + storage.type: cos + cos.bucket: cosn://your-bucket + fs.cosn.credentials.provider: org.apache.hadoop.fs.cosn.auth.SimpleCredentialsProvider + fs.cosn.userinfo.secretId: your-secretId + fs.cosn.userinfo.secretKey: your-secretKey + fs.cosn.bucket.region: your-region +``` + +For additional reading on the Hadoop Credential Provider API, you can see: [Credential Provider API](https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-common/CredentialProviderAPI.html). + +For additional COS configuration, you can see: [Tencent Hadoop-COS Docs](https://doc.fincloud.tencent.cn/tcloud/Storage/COS/846365/hadoop) + +Please add the following jar to the lib directory: +- [hadoop-cos-3.4.1.jar](https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-cos/3.4.1) +- [cos_api-bundle-5.6.69.jar](https://mvnrepository.com/artifact/com.qcloud/cos_api-bundle/5.6.69) +- [hadoop-shaded-guava-1.1.1.jar](https://mvnrepository.com/artifact/org.apache.hadoop.thirdparty/hadoop-shaded-guava/1.1.1) + #### S3 S3 based hdfs-file you can refer [hadoop s3 docs](https://hadoop.apache.org/docs/stable/hadoop-aws/tools/hadoop-aws/index.html) to config s3. diff --git a/docs/en/seatunnel-engine/download-seatunnel.md b/docs/en/seatunnel-engine/download-seatunnel.md index 48b5ed63a54..12b169e482c 100644 --- a/docs/en/seatunnel-engine/download-seatunnel.md +++ b/docs/en/seatunnel-engine/download-seatunnel.md @@ -20,7 +20,7 @@ Go to the [Seatunnel Download Page](https://seatunnel.apache.org/download) to do Or you can also download it through the terminal. ```shell -export version="2.3.8" +export version="2.3.9" wget "https://archive.apache.org/dist/seatunnel/${version}/apache-seatunnel-${version}-bin.tar.gz" tar -xzvf "apache-seatunnel-${version}-bin.tar.gz" ``` @@ -33,10 +33,10 @@ Starting from the 2.2.0-beta version, the binary package no longer provides the sh bin/install-plugin.sh ``` -If you need a specific connector version, taking 2.3.8 as an example, you need to execute the following command. +If you need a specific connector version, taking 2.3.9 as an example, you need to execute the following command. ```bash -sh bin/install-plugin.sh 2.3.8 +sh bin/install-plugin.sh 2.3.9 ``` Usually you don't need all the connector plugins, so you can specify the plugins you need through configuring `config/plugin_config`, for example, if you only need the `connector-console` plugin, then you can modify the plugin.properties configuration file as follows. diff --git a/docs/en/seatunnel-engine/hybrid-cluster-deployment.md b/docs/en/seatunnel-engine/hybrid-cluster-deployment.md index 88b7b6a44a2..0f4ecf86b7c 100644 --- a/docs/en/seatunnel-engine/hybrid-cluster-deployment.md +++ b/docs/en/seatunnel-engine/hybrid-cluster-deployment.md @@ -127,7 +127,7 @@ seatunnel: This configuration primarily addresses the issue of resource leakage caused by constantly creating and attempting to destroy the class loader. If you encounter exceptions related to metaspace overflow, you can try enabling this configuration. To reduce the frequency of class loader creation, after enabling this configuration, SeaTunnel will not attempt to release the corresponding class loader when a job is completed, allowing it to be used by subsequent jobs. This is more effective when the number of Source/Sink connectors used in the running job is not excessive. -The default value is false. +The default value is true. Example ```yaml @@ -136,6 +136,24 @@ seatunnel: classloader-cache-mode: true ``` +### 4.6 Job Scheduling Strategy + +When resources are insufficient, the job scheduling strategy can be configured in the following two modes: + +1. `WAIT`: Wait for resources to be available. + +2. `REJECT`: Reject the job, default value. + +Example + +```yaml +seatunnel: + engine: + job-schedule-strategy: WAIT +``` + +When `dynamic-slot: true` is used, the `job-schedule-strategy: WAIT` configuration will become invalid and will be forcibly changed to `job-schedule-strategy: REJECT`, because this parameter is meaningless in dynamic slots. + ## 5. Configure The SeaTunnel Engine Network Service All SeaTunnel Engine network-related configurations are in the `hazelcast.yaml` file. @@ -319,4 +337,4 @@ Now that the cluster is deployed, you can complete the submission and management ### 8.2 Submit Jobs With The REST API -The SeaTunnel Engine provides a REST API for submitting and managing jobs. For more information, please refer to [REST API V2](rest-api-v2.md) \ No newline at end of file +The SeaTunnel Engine provides a REST API for submitting and managing jobs. For more information, please refer to [REST API V2](rest-api-v2.md) diff --git a/docs/en/seatunnel-engine/logging.md b/docs/en/seatunnel-engine/logging.md index 7c827887b82..be0bc12f0a2 100644 --- a/docs/en/seatunnel-engine/logging.md +++ b/docs/en/seatunnel-engine/logging.md @@ -30,7 +30,7 @@ The MDC is propagated by slf4j to the logging backend which usually adds it to t Log4j 2 is controlled using property files. -The SeaTunnel Engine distribution ships with the following log4j properties files in the `confing` directory, which are used automatically if Log4j 2 is enabled: +The SeaTunnel Engine distribution ships with the following log4j properties files in the `config` directory, which are used automatically if Log4j 2 is enabled: - `log4j2_client.properties`: used by the command line client (e.g., `seatunnel.sh`) - `log4j2.properties`: used for SeaTunnel Engine server processes (e.g., `seatunnel-cluster.sh`) @@ -80,6 +80,36 @@ appender.file.layout.pattern = [%X{ST-JID}] %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p [%- SeaTunnel Engine automatically integrates Log framework bridge, allowing existing applications that work against Log4j1/Logback classes to continue working. +### Query Logs via REST API + +SeaTunnel provides an API for querying logs. + +**Usage examples:** +- Retrieve logs for all nodes with `jobId` of `733584788375666689`: `http://localhost:8080/logs/733584788375666689` +- Retrieve the log list for all nodes: `http://localhost:8080/logs` +- Retrieve the log list for all nodes in JSON format: `http://localhost:8080/logs?format=json` +- Retrieve log file content: `http://localhost:8080/logs/job-898380162133917698.log` + +For more details, please refer to the [REST-API](rest-api-v2.md). + +## SeaTunnel Log Configuration + +### Scheduled deletion of old logs + +SeaTunnel supports scheduled deletion of old log files to prevent disk space exhaustion. You can add the following configuration in the `seatunnel.yml` file: + +```yaml +seatunnel: + engine: + history-job-expire-minutes: 1440 + telemetry: + logs: + scheduled-deletion-enable: true +``` + +- `history-job-expire-minutes`: Sets the retention time for historical job data and logs (in minutes). The system will automatically clear expired job information and log files after the specified period. +- `scheduled-deletion-enable`: Enable scheduled cleanup, with default value of `true`. The system will automatically delete relevant log files when job expiration time, as defined by `history-job-expire-minutes`, is reached. If this feature is disabled, logs will remain permanently on disk, requiring manual management, which may affect disk space usage. It is recommended to configure this setting based on specific needs. + ## Best practices for developers You can create an SLF4J logger by calling `org.slf4j.LoggerFactory#LoggerFactory.getLogger` with the Class of your class as an argument. diff --git a/docs/en/seatunnel-engine/rest-api-v1.md b/docs/en/seatunnel-engine/rest-api-v1.md index ec9d8f13b9b..f9d4fbc3e6e 100644 --- a/docs/en/seatunnel-engine/rest-api-v1.md +++ b/docs/en/seatunnel-engine/rest-api-v1.md @@ -121,10 +121,19 @@ network: }, "createTime": "", "jobDag": { - "vertices": [ + "jobId": "", + "envOptions": [], + "vertexInfoMap": [ + { + "vertexId": 1, + "type": "", + "vertexName": "", + "tablePaths": [ + "" + ] + } ], - "edges": [ - ] + "pipelineEdges": {} }, "pluginJarsUrls": [ ], @@ -162,6 +171,7 @@ network: "createTime": "", "jobDag": { "jobId": "", + "envOptions": [], "vertexInfoMap": [ { "vertexId": 1, @@ -227,6 +237,7 @@ This API has been deprecated, please use /hazelcast/rest/maps/job-info/:jobId in "createTime": "", "jobDag": { "jobId": "", + "envOptions": [], "vertexInfoMap": [ { "vertexId": 1, @@ -307,6 +318,7 @@ When we can't get the job info, the response will be: "finishTime": "", "jobDag": { "jobId": "", + "envOptions": [], "vertexInfoMap": [ { "vertexId": 1, @@ -764,3 +776,70 @@ If the parameter is an empty `Map` object, it means that the tags of the current ``` +------------------------------------------------------------------------------------------ + +### Get All Node Log Content + +
+ GET /hazelcast/rest/maps/logs/:jobId (Returns a list of logs.) + +#### Request Parameters + +#### Parameters (Add in the `params` field of the request body) + +> | Parameter Name | Required | Type | Description | +> |----------------------|------------|---------|---------------------------------| +> | jobId | optional | string | job id | + +When `jobId` is empty, it returns log information for all nodes; otherwise, it returns the log list of the specified `jobId` across all nodes. + +#### Response + +Returns a list of logs and content from the requested nodes. + +#### Get All Log Files List + +If you'd like to view the log list first, you can use a `GET` request to retrieve the log list: +`http://localhost:5801/hazelcast/rest/maps/logs?format=json` + +```json +[ + { + "node": "localhost:5801", + "logLink": "http://localhost:5801/hazelcast/rest/maps/logs/job-899485770241277953.log", + "logName": "job-899485770241277953.log" + }, + { + "node": "localhost:5801", + "logLink": "http://localhost:5801/hazelcast/rest/maps/logs/job-899470314109468673.log", + "logName": "job-899470314109468673.log" + } +] +``` + +The supported formats are `json` and `html`, with `html` as the default. + +#### Examples + +Retrieve logs for all nodes with the `jobId` of `733584788375666689`: `http://localhost:5801/hazelcast/rest/maps/logs/733584788375666689` +Retrieve the log list for all nodes: `http://localhost:5801/hazelcast/rest/maps/logs` +Retrieve the log list for all nodes in JSON format: `http://localhost:5801/hazelcast/rest/maps/logs?format=json` +Retrieve log file content: `http://localhost:5801/hazelcast/rest/maps/logs/job-898380162133917698.log` + +
+ +### Get Log Content from a Single Node + +
+ GET /hazelcast/rest/maps/log (Returns a list of logs.) + +#### Response + +Returns a list of logs from the requested node. + +#### Examples + +To get a list of logs from the current node: `http://localhost:5801/hazelcast/rest/maps/log` +To get the content of a log file: `http://localhost:5801/hazelcast/rest/maps/log/job-898380162133917698.log` + +
\ No newline at end of file diff --git a/docs/en/seatunnel-engine/rest-api-v2.md b/docs/en/seatunnel-engine/rest-api-v2.md index e5b9d5d718d..2c642dd8fbe 100644 --- a/docs/en/seatunnel-engine/rest-api-v2.md +++ b/docs/en/seatunnel-engine/rest-api-v2.md @@ -10,15 +10,19 @@ completed jobs. The monitoring API is a RESTful API that accepts HTTP requests a ## Overview The v2 version of the api uses jetty support. It is the same as the interface specification of v1 version -, you can specify the port and context-path by modifying the configuration items in `seatunnel.yaml` - +, you can specify the port and context-path by modifying the configuration items in `seatunnel.yaml`, +you can configure `enable-dynamic-port` to enable dynamic ports (the default port is accumulated starting from `port`), and the default is closed, +If enable-dynamic-port is true, We will use the unused port in the range within the range of `port` and `port` + `port-range`, default range is 100 ```yaml seatunnel: engine: - enable-http: true - port: 8080 + http: + enable-http: true + port: 8080 + enable-dynamic-port: false + port-range: 100 ``` Context-path can also be configured as follows: @@ -27,9 +31,10 @@ Context-path can also be configured as follows: seatunnel: engine: - enable-http: true - port: 8080 - context-path: /seatunnel + http: + enable-http: true + port: 8080 + context-path: /seatunnel ``` ## API reference @@ -88,10 +93,19 @@ seatunnel: }, "createTime": "", "jobDag": { - "vertices": [ + "jobId": "", + "envOptions": [], + "vertexInfoMap": [ + { + "vertexId": 1, + "type": "", + "vertexName": "", + "tablePaths": [ + "" + ] + } ], - "edges": [ - ] + "pipelineEdges": {} }, "pluginJarsUrls": [ ], @@ -129,6 +143,7 @@ seatunnel: "createTime": "", "jobDag": { "jobId": "", + "envOptions": [], "vertexInfoMap": [ { "vertexId": 1, @@ -194,6 +209,7 @@ This API has been deprecated, please use /job-info/:jobId instead "createTime": "", "jobDag": { "jobId": "", + "envOptions": [], "vertexInfoMap": [ { "vertexId": 1, @@ -274,6 +290,7 @@ When we can't get the job info, the response will be: "finishTime": "", "jobDag": { "jobId": "", + "envOptions": [], "vertexInfoMap": [ { "vertexId": 1, @@ -728,3 +745,69 @@ If the parameter is an empty `Map` object, it means that the tags of the current ``` +------------------------------------------------------------------------------------------ + +### Get Logs from All Nodes + +
+ GET /logs/:jobId (Returns a list of logs.) + +#### Request Parameters + +#### Parameters (to be added in the `params` field of the request body) + +> | Parameter Name | Required | Type | Description | +> |-----------------------|--------------|---------|------------------------------------| +> | jobId | optional | string | job id | + +If `jobId` is empty, the request will return logs from all nodes. Otherwise, it will return the list of logs for the specified `jobId` from all nodes. + +#### Response + +Returns a list of logs from the requested nodes along with their content. + +#### Return List of All Log Files + +If you want to view the log list first, you can retrieve it via a `GET` request: `http://localhost:8080/logs?format=json` + +```json +[ + { + "node": "localhost:8080", + "logLink": "http://localhost:8080/logs/job-899485770241277953.log", + "logName": "job-899485770241277953.log" + }, + { + "node": "localhost:8080", + "logLink": "http://localhost:8080/logs/job-899470314109468673.log", + "logName": "job-899470314109468673.log" + } +] +``` + +Supported formats are `json` and `html`, with `html` as the default. + +#### Examples + +Retrieve logs for `jobId` `733584788375666689` across all nodes: `http://localhost:8080/logs/733584788375666689` +Retrieve the list of logs from all nodes: `http://localhost:8080/logs` +Retrieve the list of logs in JSON format: `http://localhost:8080/logs?format=json` +Retrieve the content of a specific log file: `http://localhost:8080/logs/job-898380162133917698.log` + +
+ +### Get Log Content from a Single Node + +
+ GET /log (Returns a list of logs.) + +#### Response + +Returns a list of logs from the requested node. + +#### Examples + +To get a list of logs from the current node: `http://localhost:5801/log` +To get the content of a log file: `http://localhost:5801/log/job-898380162133917698.log` + +
\ No newline at end of file diff --git a/docs/en/seatunnel-engine/separated-cluster-deployment.md b/docs/en/seatunnel-engine/separated-cluster-deployment.md index fd379d8dbce..6b745974970 100644 --- a/docs/en/seatunnel-engine/separated-cluster-deployment.md +++ b/docs/en/seatunnel-engine/separated-cluster-deployment.md @@ -173,7 +173,7 @@ seatunnel: This configuration mainly solves the problem of resource leakage caused by continuously creating and attempting to destroy class loaders. If you encounter an exception related to metaspace space overflow, you can try to enable this configuration. In order to reduce the frequency of creating class loaders, after enabling this configuration, SeaTunnel will not try to release the corresponding class loader when the job is completed, so that it can be used by subsequent jobs, that is to say, when not too many types of Source/Sink connector are used in the running job, it is more effective. -The default value is false. +The default value is true. Example ```yaml @@ -280,6 +280,23 @@ netty-common-4.1.89.Final.jar seatunnel-hadoop3-3.1.4-uber.jar ``` +### 4.7 Job Scheduling Strategy + +When resources are insufficient, the job scheduling strategy can be configured in the following two modes: + +1. `WAIT`: Wait for resources to be available. + +2. `REJECT`: Reject the job, default value. + +Example + +```yaml +seatunnel: + engine: + job-schedule-strategy: WAIT +``` +When `dynamic-slot: true` is used, the `job-schedule-strategy: WAIT` configuration will become invalid and will be forcibly changed to `job-schedule-strategy: REJECT`, because this parameter is meaningless in dynamic slots. + ## 5. Configuring SeaTunnel Engine Network Services All network-related configurations of the SeaTunnel Engine are in the `hazelcast-master.yaml` and `hazelcast-worker.yaml` files. @@ -431,4 +448,4 @@ Now that the cluster has been deployed, you can complete the job submission and ### 8.2 Submit Jobs With The REST API -The SeaTunnel Engine provides a REST API for submitting and managing jobs. For more information, please refer to [REST API V2](rest-api-v2.md) \ No newline at end of file +The SeaTunnel Engine provides a REST API for submitting and managing jobs. For more information, please refer to [REST API V2](rest-api-v2.md) diff --git a/docs/en/seatunnel-engine/web-ui.md b/docs/en/seatunnel-engine/web-ui.md new file mode 100644 index 00000000000..934e6abe7cb --- /dev/null +++ b/docs/en/seatunnel-engine/web-ui.md @@ -0,0 +1,48 @@ +# Apache SeaTunnel Web UI Documentation + +## Access + +Before accessing the web ui we need to enable the http rest api. first you need to configure it in the `seatunnel.yaml` configuration file + +``` +seatunnel: + engine: + http: + enable-http: true + port: 8080 + +``` + +Then visit `http://ip:8080/#/overview` + +## Overview + +The Web UI of Apache SeaTunnel offers a user-friendly interface for monitoring and managing SeaTunnel jobs. Through the Web UI, users can view real-time information on currently running jobs, finished jobs, and the status of worker and master nodes within the cluster. The main functional modules include Jobs, Workers, and Master, each providing detailed status information and operational options to help users efficiently manage and optimize their data processing workflows. +![overview.png](../../images/ui/overview.png) + +## Jobs + +### Running Jobs + +The "Running Jobs" section lists all SeaTunnel jobs that are currently in execution. Users can view basic information for each job, including Job ID, submission time, status, execution time, and more. By clicking on a specific job, users can access detailed information such as task distribution, resource utilization, and log outputs, allowing for real-time monitoring of job progress and timely handling of potential issues. +![running.png](../../images/ui/running.png) +![detail.png](../../images/ui/detail.png) + +### Finished Jobs + +The "Finished Jobs" section displays all SeaTunnel jobs that have either successfully completed or failed. This section provides execution results, completion times, durations, and failure reasons (if any) for each job. Users can review past job records through this module to analyze job performance, troubleshoot issues, or rerun specific jobs as needed. +![finished.png](../../images/ui/finished.png) + +## Workers + +### Workers Information + +The "Workers" section displays detailed information about all worker nodes in the cluster, including each worker's address, running status, CPU and memory usage, number of tasks being executed, and more. Through this module, users can monitor the health of each worker node, promptly identify and address resource bottlenecks or node failures, ensuring the stable operation of the SeaTunnel cluster. +![workers.png](../../images/ui/workers.png) + +## Master + +### Master Information + +The "Master" section provides the status and configuration information of the master node in the SeaTunnel cluster. Users can view the master's address, running status, job scheduling responsibilities, and overall resource allocation within the cluster. This module helps users gain a comprehensive understanding of the cluster's core management components, facilitating cluster configuration optimization and troubleshooting. +![master.png](../../images/ui/master.png) diff --git a/docs/en/start-v2/docker/docker.md b/docs/en/start-v2/docker/docker.md index 8c3c620fb1c..2c2c7824f4f 100644 --- a/docs/en/start-v2/docker/docker.md +++ b/docs/en/start-v2/docker/docker.md @@ -40,7 +40,7 @@ You can download the source code from the [download page](https://seatunnel.apac ```shell cd seatunnel # Use already sett maven profile -sh ./mvnw -B clean install -Dmaven.test.skip=true -Dmaven.javadoc.skip=true -Dlicense.skipAddThirdParty=true -D"docker.build.skip"=false -D"docker.verify.skip"=false -D"docker.push.skip"=true -D"docker.tag"=2.3.8 -Dmaven.deploy.skip -D"skip.spotless"=true --no-snapshot-updates -Pdocker,seatunnel +sh ./mvnw -B clean install -Dmaven.test.skip=true -Dmaven.javadoc.skip=true -Dlicense.skipAddThirdParty=true -D"docker.build.skip"=false -D"docker.verify.skip"=false -D"docker.push.skip"=true -D"docker.tag"=2.3.9 -Dmaven.deploy.skip -D"skip.spotless"=true --no-snapshot-updates -Pdocker,seatunnel # Check the docker image docker images | grep apache/seatunnel @@ -53,10 +53,10 @@ sh ./mvnw clean package -DskipTests -Dskip.spotless=true # Build docker image cd seatunnel-dist -docker build -f src/main/docker/Dockerfile --build-arg VERSION=2.3.8 -t apache/seatunnel:2.3.8 . +docker build -f src/main/docker/Dockerfile --build-arg VERSION=2.3.9 -t apache/seatunnel:2.3.9 . # If you build from dev branch, you should add SNAPSHOT suffix to the version -docker build -f src/main/docker/Dockerfile --build-arg VERSION=2.3.8-SNAPSHOT -t apache/seatunnel:2.3.8-SNAPSHOT . +docker build -f src/main/docker/Dockerfile --build-arg VERSION=2.3.9-SNAPSHOT -t apache/seatunnel:2.3.9-SNAPSHOT . # Check the docker image docker images | grep apache/seatunnel diff --git a/docs/en/start-v2/kubernetes/kubernetes.mdx b/docs/en/start-v2/kubernetes/kubernetes.mdx index eb231850514..ce996c09b2a 100644 --- a/docs/en/start-v2/kubernetes/kubernetes.mdx +++ b/docs/en/start-v2/kubernetes/kubernetes.mdx @@ -44,7 +44,7 @@ To run the image with SeaTunnel, first create a `Dockerfile`: ```Dockerfile FROM flink:1.13 -ENV SEATUNNEL_VERSION="2.3.8" +ENV SEATUNNEL_VERSION="2.3.9" ENV SEATUNNEL_HOME="/opt/seatunnel" RUN wget https://dlcdn.apache.org/seatunnel/${SEATUNNEL_VERSION}/apache-seatunnel-${SEATUNNEL_VERSION}-bin.tar.gz @@ -56,13 +56,13 @@ RUN cd ${SEATUNNEL_HOME} && sh bin/install-plugin.sh ${SEATUNNEL_VERSION} Then run the following commands to build the image: ```bash -docker build -t seatunnel:2.3.8-flink-1.13 -f Dockerfile . +docker build -t seatunnel:2.3.9-flink-1.13 -f Dockerfile . ``` -Image `seatunnel:2.3.8-flink-1.13` needs to be present in the host (minikube) so that the deployment can take place. +Image `seatunnel:2.3.9-flink-1.13` needs to be present in the host (minikube) so that the deployment can take place. Load image to minikube via: ```bash -minikube image load seatunnel:2.3.8-flink-1.13 +minikube image load seatunnel:2.3.9-flink-1.13 ``` @@ -72,7 +72,7 @@ minikube image load seatunnel:2.3.8-flink-1.13 ```Dockerfile FROM openjdk:8 -ENV SEATUNNEL_VERSION="2.3.8" +ENV SEATUNNEL_VERSION="2.3.9" ENV SEATUNNEL_HOME="/opt/seatunnel" RUN wget https://dlcdn.apache.org/seatunnel/${SEATUNNEL_VERSION}/apache-seatunnel-${SEATUNNEL_VERSION}-bin.tar.gz @@ -84,13 +84,13 @@ RUN cd ${SEATUNNEL_HOME} && sh bin/install-plugin.sh ${SEATUNNEL_VERSION} Then run the following commands to build the image: ```bash -docker build -t seatunnel:2.3.8 -f Dockerfile . +docker build -t seatunnel:2.3.9 -f Dockerfile . ``` -Image `seatunnel:2.3.8` need to be present in the host (minikube) so that the deployment can take place. +Image `seatunnel:2.3.9` need to be present in the host (minikube) so that the deployment can take place. Load image to minikube via: ```bash -minikube image load seatunnel:2.3.8 +minikube image load seatunnel:2.3.9 ``` @@ -100,7 +100,7 @@ minikube image load seatunnel:2.3.8 ```Dockerfile FROM openjdk:8 -ENV SEATUNNEL_VERSION="2.3.8" +ENV SEATUNNEL_VERSION="2.3.9" ENV SEATUNNEL_HOME="/opt/seatunnel" RUN wget https://dlcdn.apache.org/seatunnel/${SEATUNNEL_VERSION}/apache-seatunnel-${SEATUNNEL_VERSION}-bin.tar.gz @@ -112,13 +112,13 @@ RUN cd ${SEATUNNEL_HOME} && sh bin/install-plugin.sh ${SEATUNNEL_VERSION} Then run the following commands to build the image: ```bash -docker build -t seatunnel:2.3.8 -f Dockerfile . +docker build -t seatunnel:2.3.9 -f Dockerfile . ``` -Image `seatunnel:2.3.8` needs to be present in the host (minikube) so that the deployment can take place. +Image `seatunnel:2.3.9` needs to be present in the host (minikube) so that the deployment can take place. Load image to minikube via: ```bash -minikube image load seatunnel:2.3.8 +minikube image load seatunnel:2.3.9 ``` @@ -191,7 +191,7 @@ none ]}> -In this guide we will use [seatunnel.streaming.conf](https://github.com/apache/seatunnel/blob/2.3.8-release/config/v2.streaming.conf.template): +In this guide we will use [seatunnel.streaming.conf](https://github.com/apache/seatunnel/blob/2.3.9-release/config/v2.streaming.conf.template): ```conf env { @@ -245,7 +245,7 @@ kind: FlinkDeployment metadata: name: seatunnel-flink-streaming-example spec: - image: seatunnel:2.3.8-flink-1.13 + image: seatunnel:2.3.9-flink-1.13 flinkVersion: v1_13 flinkConfiguration: taskmanager.numberOfTaskSlots: "2" @@ -291,7 +291,7 @@ kubectl apply -f seatunnel-flink.yaml -In this guide we will use [seatunnel.streaming.conf](https://github.com/apache/seatunnel/blob/2.3.8-release/config/v2.streaming.conf.template): +In this guide we will use [seatunnel.streaming.conf](https://github.com/apache/seatunnel/blob/2.3.9-release/config/v2.streaming.conf.template): ```conf env { @@ -334,7 +334,7 @@ metadata: spec: containers: - name: seatunnel - image: seatunnel:2.3.8 + image: seatunnel:2.3.9 command: ["/bin/sh","-c","/opt/seatunnel/bin/seatunnel.sh --config /data/seatunnel.streaming.conf -e local"] resources: limits: @@ -366,7 +366,7 @@ kubectl apply -f seatunnel.yaml -In this guide we will use [seatunnel.streaming.conf](https://github.com/apache/seatunnel/blob/2.3.8-release/config/v2.streaming.conf.template): +In this guide we will use [seatunnel.streaming.conf](https://github.com/apache/seatunnel/blob/2.3.9-release/config/v2.streaming.conf.template): ```conf env { @@ -524,7 +524,7 @@ spec: spec: containers: - name: seatunnel - image: seatunnel:2.3.8 + image: seatunnel:2.3.9 imagePullPolicy: IfNotPresent ports: - containerPort: 5801 diff --git a/docs/en/start-v2/locally/deployment.md b/docs/en/start-v2/locally/deployment.md index 8555c097f36..4684871acb0 100644 --- a/docs/en/start-v2/locally/deployment.md +++ b/docs/en/start-v2/locally/deployment.md @@ -22,7 +22,7 @@ Visit the [SeaTunnel Download Page](https://seatunnel.apache.org/download) to do Or you can also download it through the terminal: ```shell -export version="2.3.8" +export version="2.3.9" wget "https://archive.apache.org/dist/seatunnel/${version}/apache-seatunnel-${version}-bin.tar.gz" tar -xzvf "apache-seatunnel-${version}-bin.tar.gz" ``` @@ -35,10 +35,10 @@ Starting from version 2.2.0-beta, the binary package no longer provides connecto sh bin/install-plugin.sh ``` -If you need a specific connector version, taking 2.3.8 as an example, you need to execute the following command: +If you need a specific connector version, taking 2.3.9 as an example, you need to execute the following command: ```bash -sh bin/install-plugin.sh 2.3.8 +sh bin/install-plugin.sh 2.3.9 ``` Typically, you do not need all the connector plugins. You can specify the required plugins by configuring `config/plugin_config`. For example, if you want the sample application to work properly, you will need the `connector-console` and `connector-fake` plugins. You can modify the `plugin_config` configuration file as follows: @@ -71,7 +71,7 @@ You can download the source code from the [download page](https://seatunnel.apac cd seatunnel sh ./mvnw clean install -DskipTests -Dskip.spotless=true # get the binary package -cp seatunnel-dist/target/apache-seatunnel-2.3.8-bin.tar.gz /The-Path-You-Want-To-Copy +cp seatunnel-dist/target/apache-seatunnel-2.3.9-bin.tar.gz /The-Path-You-Want-To-Copy cd /The-Path-You-Want-To-Copy tar -xzvf "apache-seatunnel-${version}-bin.tar.gz" diff --git a/docs/en/transform-v2/sql-functions.md b/docs/en/transform-v2/sql-functions.md index 3438a24de9c..31a33989375 100644 --- a/docs/en/transform-v2/sql-functions.md +++ b/docs/en/transform-v2/sql-functions.md @@ -302,6 +302,14 @@ Example: REPLACE(NAME, ' ') +### SPLIT + +Split a string into an array. + +Example: + +select SPLIT(test,';') as arrays + ### SOUNDEX ```SOUNDEX(string)``` @@ -973,3 +981,37 @@ It is used to determine whether the condition is valid and return different valu Example: case when c_string in ('c_string') then 1 else 0 end + +### UUID + +```UUID()``` + +Generate a uuid through java function. + +Example: + +select UUID() as seatunnel_uuid + +### ARRAY + +Generate an array. + +Example: + +select ARRAY('test1','test2','test3') as arrays + + +### LATERAL VIEW +#### EXPLODE + +explode array column to rows. +OUTER EXPLODE will return NULL, while array is NULL or empty +EXPLODE(SPLIT(FIELD_NAME,separator))Used to split string type. The first parameter of SPLIT function is the field name, the second parameter is the separator +EXPLODE(ARRAY(value1,value2)) Used to custom array type. +``` +SELECT * FROM fake + LATERAL VIEW EXPLODE ( SPLIT ( NAME, ',' ) ) AS NAME + LATERAL VIEW EXPLODE ( SPLIT ( pk_id, ';' ) ) AS pk_id + LATERAL VIEW OUTER EXPLODE ( age ) AS age + LATERAL VIEW OUTER EXPLODE ( ARRAY(1,1) ) AS num +``` diff --git a/docs/images/icons/Apache Iceberg.svg b/docs/images/icons/Apache Iceberg.svg new file mode 100644 index 00000000000..d04e866a0f6 --- /dev/null +++ b/docs/images/icons/Apache Iceberg.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/images/icons/Doris.svg b/docs/images/icons/Doris.svg new file mode 100644 index 00000000000..2729c9a6985 --- /dev/null +++ b/docs/images/icons/Doris.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/images/icons/FtpFile.svg b/docs/images/icons/FtpFile.svg new file mode 100644 index 00000000000..4cf14476e97 --- /dev/null +++ b/docs/images/icons/FtpFile.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/images/icons/Greenplum.svg b/docs/images/icons/Greenplum.svg new file mode 100644 index 00000000000..ead7dc6bfeb --- /dev/null +++ b/docs/images/icons/Greenplum.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git "a/docs/images/icons/Hdfs\346\226\207\344\273\266.svg" "b/docs/images/icons/Hdfs\346\226\207\344\273\266.svg" new file mode 100644 index 00000000000..7bc4a938f74 --- /dev/null +++ "b/docs/images/icons/Hdfs\346\226\207\344\273\266.svg" @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/images/icons/Hive.svg b/docs/images/icons/Hive.svg new file mode 100644 index 00000000000..70859e23b97 --- /dev/null +++ b/docs/images/icons/Hive.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/images/icons/HiveJdbc.svg b/docs/images/icons/HiveJdbc.svg new file mode 100644 index 00000000000..70859e23b97 --- /dev/null +++ b/docs/images/icons/HiveJdbc.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/images/icons/Http.svg b/docs/images/icons/Http.svg new file mode 100644 index 00000000000..e9fcaf50aca --- /dev/null +++ b/docs/images/icons/Http.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/images/icons/InfluxDB.svg b/docs/images/icons/InfluxDB.svg new file mode 100644 index 00000000000..a0bd1c639b6 --- /dev/null +++ b/docs/images/icons/InfluxDB.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/images/icons/IoTDB.svg b/docs/images/icons/IoTDB.svg new file mode 100644 index 00000000000..1aad0988b75 --- /dev/null +++ b/docs/images/icons/IoTDB.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/images/icons/JDBC.svg b/docs/images/icons/JDBC.svg new file mode 100644 index 00000000000..00365006920 --- /dev/null +++ b/docs/images/icons/JDBC.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/images/icons/Jira.svg b/docs/images/icons/Jira.svg new file mode 100644 index 00000000000..e49c6d768f9 --- /dev/null +++ b/docs/images/icons/Jira.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/images/icons/Kafka.png b/docs/images/icons/Kafka.png deleted file mode 100644 index a4b5359b866..00000000000 Binary files a/docs/images/icons/Kafka.png and /dev/null differ diff --git a/docs/images/icons/Kafka.svg b/docs/images/icons/Kafka.svg new file mode 100644 index 00000000000..094d598c4c2 --- /dev/null +++ b/docs/images/icons/Kafka.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/images/icons/Kingbase.svg b/docs/images/icons/Kingbase.svg new file mode 100644 index 00000000000..65a72ff2122 --- /dev/null +++ b/docs/images/icons/Kingbase.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/images/icons/Klaviyo.svg b/docs/images/icons/Klaviyo.svg new file mode 100644 index 00000000000..77f75c139fa --- /dev/null +++ b/docs/images/icons/Klaviyo.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/images/icons/LocalFile.svg b/docs/images/icons/LocalFile.svg new file mode 100644 index 00000000000..414c3dde3b9 --- /dev/null +++ b/docs/images/icons/LocalFile.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/images/icons/Maxcompute.svg b/docs/images/icons/Maxcompute.svg new file mode 100644 index 00000000000..dca95d03c36 --- /dev/null +++ b/docs/images/icons/Maxcompute.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/images/icons/Milvus.svg b/docs/images/icons/Milvus.svg new file mode 100644 index 00000000000..a057c16e418 --- /dev/null +++ b/docs/images/icons/Milvus.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/images/icons/MySQL CDC.svg b/docs/images/icons/MySQL CDC.svg new file mode 100644 index 00000000000..92cca4e38d0 --- /dev/null +++ b/docs/images/icons/MySQL CDC.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/images/icons/Notion.svg b/docs/images/icons/Notion.svg new file mode 100644 index 00000000000..3c6e3b0f72f --- /dev/null +++ b/docs/images/icons/Notion.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/images/icons/ObsFile.png b/docs/images/icons/ObsFile.png new file mode 100644 index 00000000000..be943c607ac Binary files /dev/null and b/docs/images/icons/ObsFile.png differ diff --git a/docs/images/icons/OceanBase.svg b/docs/images/icons/OceanBase.svg new file mode 100644 index 00000000000..e4589987ea6 --- /dev/null +++ b/docs/images/icons/OceanBase.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/images/icons/OneSignal.svg b/docs/images/icons/OneSignal.svg new file mode 100644 index 00000000000..8f0c26700da --- /dev/null +++ b/docs/images/icons/OneSignal.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/images/icons/OpenMldb.png b/docs/images/icons/OpenMldb.png new file mode 100644 index 00000000000..b66e8dedef4 Binary files /dev/null and b/docs/images/icons/OpenMldb.png differ diff --git a/docs/images/icons/Oracle CDC.svg b/docs/images/icons/Oracle CDC.svg new file mode 100644 index 00000000000..9f739d77862 --- /dev/null +++ b/docs/images/icons/Oracle CDC.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/images/icons/Oracle.svg b/docs/images/icons/Oracle.svg new file mode 100644 index 00000000000..c4865624c3e --- /dev/null +++ b/docs/images/icons/Oracle.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/images/icons/Paimon.svg b/docs/images/icons/Paimon.svg new file mode 100644 index 00000000000..9dac157fdb6 --- /dev/null +++ b/docs/images/icons/Paimon.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/images/icons/Persistiq.svg b/docs/images/icons/Persistiq.svg new file mode 100644 index 00000000000..2ab14f08a78 --- /dev/null +++ b/docs/images/icons/Persistiq.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/images/icons/Phoenix.svg b/docs/images/icons/Phoenix.svg new file mode 100644 index 00000000000..6fa6e48a403 --- /dev/null +++ b/docs/images/icons/Phoenix.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/images/icons/PostgreSQL CDC.svg b/docs/images/icons/PostgreSQL CDC.svg new file mode 100644 index 00000000000..38547f16078 --- /dev/null +++ b/docs/images/icons/PostgreSQL CDC.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/images/icons/PostgreSQL.svg b/docs/images/icons/PostgreSQL.svg new file mode 100644 index 00000000000..38547f16078 --- /dev/null +++ b/docs/images/icons/PostgreSQL.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/images/icons/Pulsar.svg b/docs/images/icons/Pulsar.svg new file mode 100644 index 00000000000..cabedf1e022 --- /dev/null +++ b/docs/images/icons/Pulsar.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/images/icons/Qdrant.svg b/docs/images/icons/Qdrant.svg new file mode 100644 index 00000000000..b431d111a6a --- /dev/null +++ b/docs/images/icons/Qdrant.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/images/icons/Rabbitmq.svg b/docs/images/icons/Rabbitmq.svg new file mode 100644 index 00000000000..a4ecbc6cfbf --- /dev/null +++ b/docs/images/icons/Rabbitmq.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/images/icons/Redis.svg b/docs/images/icons/Redis.svg new file mode 100644 index 00000000000..4cbd41cada9 --- /dev/null +++ b/docs/images/icons/Redis.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/images/icons/RocketMQ.svg b/docs/images/icons/RocketMQ.svg new file mode 100644 index 00000000000..3fd2c1adba9 --- /dev/null +++ b/docs/images/icons/RocketMQ.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/images/icons/S3File.svg b/docs/images/icons/S3File.svg new file mode 100644 index 00000000000..ddd50aeff00 --- /dev/null +++ b/docs/images/icons/S3File.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/images/icons/SQL Server.svg b/docs/images/icons/SQL Server.svg new file mode 100644 index 00000000000..db4b76ca740 --- /dev/null +++ b/docs/images/icons/SQL Server.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/images/icons/Sftp.svg b/docs/images/icons/Sftp.svg new file mode 100644 index 00000000000..2a8015eb504 --- /dev/null +++ b/docs/images/icons/Sftp.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/images/icons/Snowflake.svg b/docs/images/icons/Snowflake.svg new file mode 100644 index 00000000000..fb4c2868fba --- /dev/null +++ b/docs/images/icons/Snowflake.svg @@ -0,0 +1,3 @@ + + + diff --git a/docs/images/icons/StarRocks.svg b/docs/images/icons/StarRocks.svg new file mode 100644 index 00000000000..10a52bbf355 --- /dev/null +++ b/docs/images/icons/StarRocks.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/images/icons/TDengine.svg b/docs/images/icons/TDengine.svg new file mode 100644 index 00000000000..588347b3727 --- /dev/null +++ b/docs/images/icons/TDengine.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/images/icons/Tablestore.svg b/docs/images/icons/Tablestore.svg new file mode 100644 index 00000000000..24526c988b9 --- /dev/null +++ b/docs/images/icons/Tablestore.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/images/icons/Typesense.png b/docs/images/icons/Typesense.png new file mode 100644 index 00000000000..f25cc7e9e71 Binary files /dev/null and b/docs/images/icons/Typesense.png differ diff --git a/docs/images/icons/Web3j.png b/docs/images/icons/Web3j.png new file mode 100644 index 00000000000..ec031cb3280 Binary files /dev/null and b/docs/images/icons/Web3j.png differ diff --git a/docs/images/ui/detail.png b/docs/images/ui/detail.png new file mode 100644 index 00000000000..a376b6e4880 Binary files /dev/null and b/docs/images/ui/detail.png differ diff --git a/docs/images/ui/finished.png b/docs/images/ui/finished.png new file mode 100644 index 00000000000..fa800bd6029 Binary files /dev/null and b/docs/images/ui/finished.png differ diff --git a/docs/images/ui/master.png b/docs/images/ui/master.png new file mode 100644 index 00000000000..5e42d2854ee Binary files /dev/null and b/docs/images/ui/master.png differ diff --git a/docs/images/ui/overview.png b/docs/images/ui/overview.png new file mode 100644 index 00000000000..67123532499 Binary files /dev/null and b/docs/images/ui/overview.png differ diff --git a/docs/images/ui/running.png b/docs/images/ui/running.png new file mode 100644 index 00000000000..889edb303b1 Binary files /dev/null and b/docs/images/ui/running.png differ diff --git a/docs/images/ui/workers.png b/docs/images/ui/workers.png new file mode 100644 index 00000000000..a2bf39ec218 Binary files /dev/null and b/docs/images/ui/workers.png differ diff --git a/docs/sidebars.js b/docs/sidebars.js index 6529ea291d5..1b213f3ff5c 100644 --- a/docs/sidebars.js +++ b/docs/sidebars.js @@ -206,7 +206,8 @@ const sidebars = { "seatunnel-engine/rest-api-v2", "seatunnel-engine/user-command", "seatunnel-engine/logging", - "seatunnel-engine/telemetry" + "seatunnel-engine/telemetry", + "seatunnel-engine/web-ui" ] }, { diff --git a/docs/zh/concept/schema-evolution.md b/docs/zh/concept/schema-evolution.md index 16f0f5dbc81..50f871f3e16 100644 --- a/docs/zh/concept/schema-evolution.md +++ b/docs/zh/concept/schema-evolution.md @@ -6,15 +6,19 @@ ### 源 [Mysql-CDC](https://github.com/apache/seatunnel/blob/dev/docs/en/connector-v2/source/MySQL-CDC.md) +[Oracle-CDC](https://github.com/apache/seatunnel/blob/dev/docs/en/connector-v2/source/Oracle-CDC.md) ### 目标 [Jdbc-Mysql](https://github.com/apache/seatunnel/blob/dev/docs/zh/connector-v2/sink/Jdbc.md) +[Jdbc-Oracle](https://github.com/apache/seatunnel/blob/dev/docs/en/connector-v2/sink/Jdbc.md) -注意: 目前模式演进不支持transform. +注意: 目前模式演进不支持transform。不同类型数据库(Oracle-CDC -> Jdbc-Mysql)的模式演进目前不支持ddl中列的默认值。 +当你使用Oracle-CDC时,你不能使用用户名`SYS`或`SYSTEM`来修改表结构,否则ddl事件将被过滤,这可能导致模式演进不起作用; +另外,如果你的表名以`ORA_TEMP_`开头,也会有相同的问题。 ## 启用Schema evolution功能 -在CDC源连接器中模式演进默认是关闭的。你需要在CDC连接器中配置`debezium.include.schema.changes = true`来启用它。 +在CDC源连接器中模式演进默认是关闭的。你需要在CDC连接器中配置`debezium.include.schema.changes = true`来启用它。当你使用Oracle-CDC并且启用schema-evolution时,你必须将`debezium`属性中的`log.mining.strategy`指定为`redo_log_catalog`。 ## 示例 @@ -57,3 +61,92 @@ sink { } } ``` + +### Oracle-cdc -> Jdbc-Oracle +``` +env { + # You can set engine configuration here + parallelism = 1 + job.mode = "STREAMING" + checkpoint.interval = 5000 +} + +source { + # This is a example source plugin **only for test and demonstrate the feature source plugin** + Oracle-CDC { + result_table_name = "customers" + username = "dbzuser" + password = "dbz" + database-names = ["ORCLCDB"] + schema-names = ["DEBEZIUM"] + table-names = ["ORCLCDB.DEBEZIUM.FULL_TYPES"] + base-url = "jdbc:oracle:thin:@oracle-host:1521/ORCLCDB" + source.reader.close.timeout = 120000 + connection.pool.size = 1 + debezium { + include.schema.changes = true + log.mining.strategy = redo_log_catalog + } + } +} + +sink { + Jdbc { + source_table_name = "customers" + driver = "oracle.jdbc.driver.OracleDriver" + url = "jdbc:oracle:thin:@oracle-host:1521/ORCLCDB" + user = "dbzuser" + password = "dbz" + generate_sink_sql = true + database = "ORCLCDB" + table = "DEBEZIUM.FULL_TYPES_SINK" + batch_size = 1 + primary_keys = ["ID"] + connection.pool.size = 1 + } +} +``` + +### Oracle-cdc -> Jdbc-Mysql +``` +env { + # You can set engine configuration here + parallelism = 1 + job.mode = "STREAMING" + checkpoint.interval = 5000 +} + +source { + # This is a example source plugin **only for test and demonstrate the feature source plugin** + Oracle-CDC { + result_table_name = "customers" + username = "dbzuser" + password = "dbz" + database-names = ["ORCLCDB"] + schema-names = ["DEBEZIUM"] + table-names = ["ORCLCDB.DEBEZIUM.FULL_TYPES"] + base-url = "jdbc:oracle:thin:@oracle-host:1521/ORCLCDB" + source.reader.close.timeout = 120000 + connection.pool.size = 1 + debezium { + include.schema.changes = true + log.mining.strategy = redo_log_catalog + } + } +} + +sink { + jdbc { + source_table_name = "customers" + url = "jdbc:mysql://oracle-host:3306/oracle_sink" + driver = "com.mysql.cj.jdbc.Driver" + user = "st_user_sink" + password = "mysqlpw" + generate_sink_sql = true + # You need to configure both database and table + database = oracle_sink + table = oracle_cdc_2_mysql_sink_table + primary_keys = ["ID"] + } +} +``` diff --git a/docs/zh/connector-v2/formats/protobuf.md b/docs/zh/connector-v2/formats/protobuf.md new file mode 100644 index 00000000000..68c4176fd6f --- /dev/null +++ b/docs/zh/connector-v2/formats/protobuf.md @@ -0,0 +1,164 @@ +# Protobuf 格式 + +Protobuf(Protocol Buffers)是一种由Google开发的语言中立、平台无关的数据序列化格式。它提供了一种高效的方式来编码结构化数据,同时支持多种编程语言和平台。 + +目前支持在 Kafka 中使用 protobuf 格式。 + +## Kafka 使用示例 + +- 模拟随机生成数据源,并以 protobuf 的格式 写入 kafka 的实例 + +```hocon +env { + parallelism = 1 + job.mode = "BATCH" +} + +source { + FakeSource { + parallelism = 1 + result_table_name = "fake" + row.num = 16 + schema = { + fields { + c_int32 = int + c_int64 = long + c_float = float + c_double = double + c_bool = boolean + c_string = string + c_bytes = bytes + + Address { + city = string + state = string + street = string + } + attributes = "map" + phone_numbers = "array" + } + } + } +} + +sink { + kafka { + topic = "test_protobuf_topic_fake_source" + bootstrap.servers = "kafkaCluster:9092" + format = protobuf + kafka.request.timeout.ms = 60000 + kafka.config = { + acks = "all" + request.timeout.ms = 60000 + buffer.memory = 33554432 + } + protobuf_message_name = Person + protobuf_schema = """ + syntax = "proto3"; + + package org.apache.seatunnel.format.protobuf; + + option java_outer_classname = "ProtobufE2E"; + + message Person { + int32 c_int32 = 1; + int64 c_int64 = 2; + float c_float = 3; + double c_double = 4; + bool c_bool = 5; + string c_string = 6; + bytes c_bytes = 7; + + message Address { + string street = 1; + string city = 2; + string state = 3; + string zip = 4; + } + + Address address = 8; + + map attributes = 9; + + repeated string phone_numbers = 10; + } + """ + } +} +``` + +- 从 kafka 读取 protobuf 格式的数据并打印到控制台的示例 + +```hocon +env { + parallelism = 1 + job.mode = "BATCH" +} + +source { + Kafka { + topic = "test_protobuf_topic_fake_source" + format = protobuf + protobuf_message_name = Person + protobuf_schema = """ + syntax = "proto3"; + + package org.apache.seatunnel.format.protobuf; + + option java_outer_classname = "ProtobufE2E"; + + message Person { + int32 c_int32 = 1; + int64 c_int64 = 2; + float c_float = 3; + double c_double = 4; + bool c_bool = 5; + string c_string = 6; + bytes c_bytes = 7; + + message Address { + string street = 1; + string city = 2; + string state = 3; + string zip = 4; + } + + Address address = 8; + + map attributes = 9; + + repeated string phone_numbers = 10; + } + """ + schema = { + fields { + c_int32 = int + c_int64 = long + c_float = float + c_double = double + c_bool = boolean + c_string = string + c_bytes = bytes + + Address { + city = string + state = string + street = string + } + attributes = "map" + phone_numbers = "array" + } + } + bootstrap.servers = "kafkaCluster:9092" + start_mode = "earliest" + result_table_name = "kafka_table" + } +} + +sink { + Console { + source_table_name = "kafka_table" + } +} +``` + diff --git a/docs/zh/connector-v2/sink/Hudi.md b/docs/zh/connector-v2/sink/Hudi.md index 2fbf0271358..7d8007f6b03 100644 --- a/docs/zh/connector-v2/sink/Hudi.md +++ b/docs/zh/connector-v2/sink/Hudi.md @@ -8,7 +8,7 @@ ## 主要特点 -- [x] [exactly-once](../../concept/connector-v2-features.md) +- [ ] [exactly-once](../../concept/connector-v2-features.md) - [x] [cdc](../../concept/connector-v2-features.md) - [x] [support multiple table write](../../concept/connector-v2-features.md) @@ -21,7 +21,6 @@ | table_dfs_path | string | 是 | - | | conf_files_path | string | 否 | - | | table_list | string | 否 | - | -| auto_commit | boolean| 否 | true | | schema_save_mode | enum | 否 | CREATE_SCHEMA_WHEN_NOT_EXIST | | common-options | config | 否 | - | @@ -44,6 +43,7 @@ | index_type | enum | no | BLOOM | | index_class_name | string | no | - | | record_byte_size | Int | no | 1024 | +| cdc_enabled | boolean| no | false | 注意: 当此配置对应于单个表时,您可以将table_list中的配置项展平到外层。 @@ -115,9 +115,9 @@ `max_commits_to_keep` Hudi 表保留的最多提交数。 -### auto_commit [boolean] +### cdc_enabled [boolean] -`auto_commit` 是否自动提交. +`cdc_enabled` 是否持久化Hudi表的CDC变更日志。启用后,在必要时持久化更改数据,表可以作为CDC模式进行查询. ### schema_save_mode [Enum] diff --git a/docs/zh/connector-v2/sink/Jdbc.md b/docs/zh/connector-v2/sink/Jdbc.md index e1ab422952e..4370af20026 100644 --- a/docs/zh/connector-v2/sink/Jdbc.md +++ b/docs/zh/connector-v2/sink/Jdbc.md @@ -216,26 +216,27 @@ Sink插件常用参数,请参考 [Sink常用选项](../sink-common-options.md) 附录参数仅提供参考 -| 数据源 | driver | url | xa_data_source_class_name | maven | -|------------|----------------------------------------------|--------------------------------------------------------------------|----------------------------------------------------|-------------------------------------------------------------------------------------------------------------| -| MySQL | com.mysql.cj.jdbc.Driver | jdbc:mysql://localhost:3306/test | com.mysql.cj.jdbc.MysqlXADataSource | https://mvnrepository.com/artifact/mysql/mysql-connector-java | -| PostgreSQL | org.postgresql.Driver | jdbc:postgresql://localhost:5432/postgres | org.postgresql.xa.PGXADataSource | https://mvnrepository.com/artifact/org.postgresql/postgresql | -| DM | dm.jdbc.driver.DmDriver | jdbc:dm://localhost:5236 | dm.jdbc.driver.DmdbXADataSource | https://mvnrepository.com/artifact/com.dameng/DmJdbcDriver18 | -| Phoenix | org.apache.phoenix.queryserver.client.Driver | jdbc:phoenix:thin:url=http://localhost:8765;serialization=PROTOBUF | / | https://mvnrepository.com/artifact/com.aliyun.phoenix/ali-phoenix-shaded-thin-client | -| SQL Server | com.microsoft.sqlserver.jdbc.SQLServerDriver | jdbc:sqlserver://localhost:1433 | com.microsoft.sqlserver.jdbc.SQLServerXADataSource | https://mvnrepository.com/artifact/com.microsoft.sqlserver/mssql-jdbc | -| Oracle | oracle.jdbc.OracleDriver | jdbc:oracle:thin:@localhost:1521/xepdb1 | oracle.jdbc.xa.OracleXADataSource | https://mvnrepository.com/artifact/com.oracle.database.jdbc/ojdbc8 | -| sqlite | org.sqlite.JDBC | jdbc:sqlite:test.db | / | https://mvnrepository.com/artifact/org.xerial/sqlite-jdbc | -| GBase8a | com.gbase.jdbc.Driver | jdbc:gbase://e2e_gbase8aDb:5258/test | / | https://cdn.gbase.cn/products/30/p5CiVwXBKQYIUGN8ecHvk/gbase-connector-java-9.5.0.7-build1-bin.jar | -| StarRocks | com.mysql.cj.jdbc.Driver | jdbc:mysql://localhost:3306/test | / | https://mvnrepository.com/artifact/mysql/mysql-connector-java | -| db2 | com.ibm.db2.jcc.DB2Driver | jdbc:db2://localhost:50000/testdb | com.ibm.db2.jcc.DB2XADataSource | https://mvnrepository.com/artifact/com.ibm.db2.jcc/db2jcc/db2jcc4 | -| saphana | com.sap.db.jdbc.Driver | jdbc:sap://localhost:39015 | / | https://mvnrepository.com/artifact/com.sap.cloud.db.jdbc/ngdbc | -| Doris | com.mysql.cj.jdbc.Driver | jdbc:mysql://localhost:3306/test | / | https://mvnrepository.com/artifact/mysql/mysql-connector-java | -| teradata | com.teradata.jdbc.TeraDriver | jdbc:teradata://localhost/DBS_PORT=1025,DATABASE=test | / | https://mvnrepository.com/artifact/com.teradata.jdbc/terajdbc | -| Redshift | com.amazon.redshift.jdbc42.Driver | jdbc:redshift://localhost:5439/testdb | com.amazon.redshift.xa.RedshiftXADataSource | https://mvnrepository.com/artifact/com.amazon.redshift/redshift-jdbc42 | -| Snowflake | net.snowflake.client.jdbc.SnowflakeDriver | jdbc:snowflake://.snowflakecomputing.com | / | https://mvnrepository.com/artifact/net.snowflake/snowflake-jdbc | -| Vertica | com.vertica.jdbc.Driver | jdbc:vertica://localhost:5433 | / | https://repo1.maven.org/maven2/com/vertica/jdbc/vertica-jdbc/12.0.3-0/vertica-jdbc-12.0.3-0.jar | -| Kingbase | com.kingbase8.Driver | jdbc:kingbase8://localhost:54321/db_test | / | https://repo1.maven.org/maven2/cn/com/kingbase/kingbase8/8.6.0/kingbase8-8.6.0.jar | -| OceanBase | com.oceanbase.jdbc.Driver | jdbc:oceanbase://localhost:2881 | / | https://repo1.maven.org/maven2/com/oceanbase/oceanbase-client/2.4.11/oceanbase-client-2.4.11.jar | +| 数据源 | driver | url | xa_data_source_class_name | maven | +|------------|----------------------------------------------|--------------------------------------------------------------------|----------------------------------------------------|------------------------------------------------------------------------------------------------------| +| MySQL | com.mysql.cj.jdbc.Driver | jdbc:mysql://localhost:3306/test | com.mysql.cj.jdbc.MysqlXADataSource | https://mvnrepository.com/artifact/mysql/mysql-connector-java | +| PostgreSQL | org.postgresql.Driver | jdbc:postgresql://localhost:5432/postgres | org.postgresql.xa.PGXADataSource | https://mvnrepository.com/artifact/org.postgresql/postgresql | +| DM | dm.jdbc.driver.DmDriver | jdbc:dm://localhost:5236 | dm.jdbc.driver.DmdbXADataSource | https://mvnrepository.com/artifact/com.dameng/DmJdbcDriver18 | +| Phoenix | org.apache.phoenix.queryserver.client.Driver | jdbc:phoenix:thin:url=http://localhost:8765;serialization=PROTOBUF | / | https://mvnrepository.com/artifact/com.aliyun.phoenix/ali-phoenix-shaded-thin-client | +| SQL Server | com.microsoft.sqlserver.jdbc.SQLServerDriver | jdbc:sqlserver://localhost:1433 | com.microsoft.sqlserver.jdbc.SQLServerXADataSource | https://mvnrepository.com/artifact/com.microsoft.sqlserver/mssql-jdbc | +| Oracle | oracle.jdbc.OracleDriver | jdbc:oracle:thin:@localhost:1521/xepdb1 | oracle.jdbc.xa.OracleXADataSource | https://mvnrepository.com/artifact/com.oracle.database.jdbc/ojdbc8 | +| sqlite | org.sqlite.JDBC | jdbc:sqlite:test.db | / | https://mvnrepository.com/artifact/org.xerial/sqlite-jdbc | +| GBase8a | com.gbase.jdbc.Driver | jdbc:gbase://e2e_gbase8aDb:5258/test | / | https://cdn.gbase.cn/products/30/p5CiVwXBKQYIUGN8ecHvk/gbase-connector-java-9.5.0.7-build1-bin.jar | +| StarRocks | com.mysql.cj.jdbc.Driver | jdbc:mysql://localhost:3306/test | / | https://mvnrepository.com/artifact/mysql/mysql-connector-java | +| db2 | com.ibm.db2.jcc.DB2Driver | jdbc:db2://localhost:50000/testdb | com.ibm.db2.jcc.DB2XADataSource | https://mvnrepository.com/artifact/com.ibm.db2.jcc/db2jcc/db2jcc4 | +| saphana | com.sap.db.jdbc.Driver | jdbc:sap://localhost:39015 | / | https://mvnrepository.com/artifact/com.sap.cloud.db.jdbc/ngdbc | +| Doris | com.mysql.cj.jdbc.Driver | jdbc:mysql://localhost:3306/test | / | https://mvnrepository.com/artifact/mysql/mysql-connector-java | +| teradata | com.teradata.jdbc.TeraDriver | jdbc:teradata://localhost/DBS_PORT=1025,DATABASE=test | / | https://mvnrepository.com/artifact/com.teradata.jdbc/terajdbc | +| Redshift | com.amazon.redshift.jdbc42.Driver | jdbc:redshift://localhost:5439/testdb | com.amazon.redshift.xa.RedshiftXADataSource | https://mvnrepository.com/artifact/com.amazon.redshift/redshift-jdbc42 | +| Snowflake | net.snowflake.client.jdbc.SnowflakeDriver | jdbc:snowflake://.snowflakecomputing.com | / | https://mvnrepository.com/artifact/net.snowflake/snowflake-jdbc | +| Vertica | com.vertica.jdbc.Driver | jdbc:vertica://localhost:5433 | / | https://repo1.maven.org/maven2/com/vertica/jdbc/vertica-jdbc/12.0.3-0/vertica-jdbc-12.0.3-0.jar | +| Kingbase | com.kingbase8.Driver | jdbc:kingbase8://localhost:54321/db_test | / | https://repo1.maven.org/maven2/cn/com/kingbase/kingbase8/8.6.0/kingbase8-8.6.0.jar | +| OceanBase | com.oceanbase.jdbc.Driver | jdbc:oceanbase://localhost:2881 | / | https://repo1.maven.org/maven2/com/oceanbase/oceanbase-client/2.4.12/oceanbase-client-2.4.12.jar | +| opengauss | org.opengauss.Driver | jdbc:opengauss://localhost:5432/postgres | / | https://repo1.maven.org/maven2/org/opengauss/opengauss-jdbc/5.1.0-og/opengauss-jdbc-5.1.0-og.jar | ## 示例 diff --git a/docs/zh/connector-v2/sink/Paimon.md b/docs/zh/connector-v2/sink/Paimon.md index 32d35a5e958..375c8c90caf 100644 --- a/docs/zh/connector-v2/sink/Paimon.md +++ b/docs/zh/connector-v2/sink/Paimon.md @@ -30,30 +30,35 @@ libfb303-xxx.jar ## 连接器选项 -| 名称 | 类型 | 是否必须 | 默认值 | 描述 | -|-----------------------------|-------|----------|------------------------------|---------------------------------------------------------------------------------------------------| -| warehouse | 字符串 | 是 | - | Paimon warehouse路径 | -| catalog_type | 字符串 | 否 | filesystem | Paimon的catalog类型,目前支持filesystem和hive | -| catalog_uri | 字符串 | 否 | - | Paimon catalog的uri,仅当catalog_type为hive时需要配置 | -| database | 字符串 | 是 | - | 数据库名称 | -| table | 字符串 | 是 | - | 表名 | -| hdfs_site_path | 字符串 | 否 | - | hdfs-site.xml文件路径 | -| schema_save_mode | 枚举 | 否 | CREATE_SCHEMA_WHEN_NOT_EXIST | Schema保存模式 | -| data_save_mode | 枚举 | 否 | APPEND_DATA | 数据保存模式 | -| paimon.table.primary-keys | 字符串 | 否 | - | 主键字段列表,联合主键使用逗号分隔(注意:分区字段需要包含在主键字段中) | -| paimon.table.partition-keys | 字符串 | 否 | - | 分区字段列表,多字段使用逗号分隔 | -| paimon.table.write-props | Map | 否 | - | Paimon表初始化指定的属性, [参考](https://paimon.apache.org/docs/master/maintenance/configurations/#coreoptions) | -| paimon.hadoop.conf | Map | 否 | - | Hadoop配置文件属性信息 | -| paimon.hadoop.conf-path | 字符串 | 否 | - | Hadoop配置文件目录,用于加载'core-site.xml', 'hdfs-site.xml', 'hive-site.xml'文件配置 | +| 名称 | 类型 | 是否必须 | 默认值 | 描述 | +|-----------------------------|------|------|------------------------------|-------------------------------------------------------------------------------------------------------| +| warehouse | 字符串 | 是 | - | Paimon warehouse路径 | +| catalog_type | 字符串 | 否 | filesystem | Paimon的catalog类型,目前支持filesystem和hive | +| catalog_uri | 字符串 | 否 | - | Paimon catalog的uri,仅当catalog_type为hive时需要配置 | +| database | 字符串 | 是 | - | 数据库名称 | +| table | 字符串 | 是 | - | 表名 | +| hdfs_site_path | 字符串 | 否 | - | hdfs-site.xml文件路径 | +| schema_save_mode | 枚举 | 否 | CREATE_SCHEMA_WHEN_NOT_EXIST | Schema保存模式 | +| data_save_mode | 枚举 | 否 | APPEND_DATA | 数据保存模式 | +| paimon.table.primary-keys | 字符串 | 否 | - | 主键字段列表,联合主键使用逗号分隔(注意:分区字段需要包含在主键字段中) | +| paimon.table.partition-keys | 字符串 | 否 | - | 分区字段列表,多字段使用逗号分隔 | +| paimon.table.write-props | Map | 否 | - | Paimon表初始化指定的属性, [参考](https://paimon.apache.org/docs/master/maintenance/configurations/#coreoptions) | +| paimon.hadoop.conf | Map | 否 | - | Hadoop配置文件属性信息 | +| paimon.hadoop.conf-path | 字符串 | 否 | - | Hadoop配置文件目录,用于加载'core-site.xml', 'hdfs-site.xml', 'hive-site.xml'文件配置 | ## 更新日志 你必须配置`changelog-producer=input`来启用paimon表的changelog产生模式。如果你使用了paimon sink的自动建表功能,你可以在`paimon.table.write-props`中指定这个属性。 Paimon表的changelog产生模式有[四种](https://paimon.apache.org/docs/master/primary-key-table/changelog-producer/),分别是`none`、`input`、`lookup` 和 `full-compaction`。 -目前,我们只支持`none`和`input`模式。默认是`none`,这种模式将不会产生changelog文件。`input`模式将会在Paimon表下产生changelog文件。 +目前支持全部`changelog-producer`模式。默认是`none`模式。 -当你使用流模式去读paimon表的数据时,这两种模式将会产生[不同的结果](https://github.com/apache/seatunnel/blob/dev/docs/en/connector-v2/source/Paimon.md#changelog)。 +* [`none`](https://paimon.apache.org/docs/master/primary-key-table/changelog-producer/#none) +* [`input`](https://paimon.apache.org/docs/master/primary-key-table/changelog-producer/#input) +* [`lookup`](https://paimon.apache.org/docs/master/primary-key-table/changelog-producer/#lookup) +* [`full-compaction`](https://paimon.apache.org/docs/master/primary-key-table/changelog-producer/#full-compaction) +> 注意: + > 当你使用流模式去读paimon表的数据时,不同模式将会产生[不同的结果](https://github.com/apache/seatunnel/blob/dev/docs/en/connector-v2/source/Paimon.md#changelog)。 ## 示例 @@ -248,6 +253,37 @@ sink { } } ``` +#### 使用`changelog-producer`属性写入 + +```hocon +env { + parallelism = 1 + job.mode = "STREAMING" + checkpoint.interval = 5000 +} + +source { + Mysql-CDC { + base-url = "jdbc:mysql://127.0.0.1:3306/seatunnel" + username = "root" + password = "******" + table-names = ["seatunnel.role"] + } +} + +sink { + Paimon { + catalog_name = "seatunnel_test" + warehouse = "file:///tmp/seatunnel/paimon/hadoop-sink/" + database = "seatunnel" + table = "role" + paimon.table.write-props = { + changelog-producer = full-compaction + changelog-tmp-path = /tmp/paimon/changelog + } + } +} +``` ### 动态分桶paimon单表 diff --git a/docs/zh/connector-v2/sink/Prometheus.md b/docs/zh/connector-v2/sink/Prometheus.md new file mode 100644 index 00000000000..8e7c9989020 --- /dev/null +++ b/docs/zh/connector-v2/sink/Prometheus.md @@ -0,0 +1,101 @@ +# Prometheus + +> Prometheus 数据接收器 + +## 引擎支持 + +> Spark
+> Flink
+> SeaTunnel Zeta
+ +## 主要特性 + +- [ ] [exactly-once](../../concept/connector-v2-features.md) +- [ ] [cdc](../../concept/connector-v2-features.md) +- [x] [support multiple table write](../../concept/connector-v2-features.md) + +## 描述 + +接收Source端传入的数据,利用数据触发 web hooks。 + +> 例如,来自上游的数据为 [`label: {"__name__": "test1"}, value: 1.2.3,time:2024-08-15T17:00:00`], 则body内容如下: `{"label":{"__name__": "test1"}, "value":"1.23","time":"2024-08-15T17:00:00"}` + +**Tips: Prometheus 数据接收器 仅支持 `post json` 类型的 web hook,source 数据将被视为 webhook 中的 body 内容。并且不支持传递过去太久的数据** + +## 支持的数据源信息 + +想使用 Prometheus 连接器,需要安装以下必要的依赖。可以通过运行 install-plugin.sh 脚本或者从 Maven 中央仓库下载这些依赖 + +| 数据源 | 支持版本 | 依赖 | +|------|-----------|------------------------------------------------------------------------------------------------------------------| +| Http | universal | [Download](https://mvnrepository.com/artifact/org.apache.seatunnel/seatunnel-connectors-v2/connector-prometheus) | + +## 接收器选项 + +| Name | Type | Required | Default | Description | +|-----------------------------|--------|----------|---------|-------------------------------------------------------------------| +| url | String | Yes | - | Http 请求链接 | +| headers | Map | No | - | Http 标头 | +| retry | Int | No | - | 如果请求http返回`IOException`的最大重试次数 | +| retry_backoff_multiplier_ms | Int | No | 100 | http请求失败,重试回退次数(毫秒)乘数 | +| retry_backoff_max_ms | Int | No | 10000 | http请求失败,最大重试回退时间(毫秒) | +| connect_timeout_ms | Int | No | 12000 | 连接超时设置,默认12s | +| socket_timeout_ms | Int | No | 60000 | 套接字超时设置,默认为60s | +| key_timestamp | Int | NO | - | prometheus时间戳的key. | +| key_label | String | yes | - | prometheus标签的key | +| key_value | Double | yes | - | prometheus值的key | +| batch_size | Int | false | 1024 | prometheus批量写入大小 | +| flush_interval | Long | false | 300000L | prometheus定时写入 | +| common-options | | No | - | Sink插件常用参数,请参考 [Sink常用选项 ](../sink-common-options.md) 了解详情 | + +## 示例 + +简单示例: + +```hocon +env { + parallelism = 1 + job.mode = "BATCH" +} + +source { + FakeSource { + schema = { + fields { + c_map = "map" + c_double = double + c_timestamp = timestamp + } + } + result_table_name = "fake" + rows = [ + { + kind = INSERT + fields = [{"__name__": "test1"}, 1.23, "2024-08-15T17:00:00"] + }, + { + kind = INSERT + fields = [{"__name__": "test2"}, 1.23, "2024-08-15T17:00:00"] + } + ] + } +} + + +sink { + Prometheus { + url = "http://prometheus:9090/api/v1/write" + key_label = "c_map" + key_value = "c_double" + key_timestamp = "c_timestamp" + batch_size = 1 + } +} +``` + +## Changelog + +### 2.3.8-beta 2024-08-22 + +- 添加prometheus接收连接器 + diff --git a/docs/zh/connector-v2/sink/Redis.md b/docs/zh/connector-v2/sink/Redis.md index b47d9de9146..d4bb13cd888 100644 --- a/docs/zh/connector-v2/sink/Redis.md +++ b/docs/zh/connector-v2/sink/Redis.md @@ -12,20 +12,25 @@ ## 选项 -| 名称 | 类型 | 是否必须 | 默认值 | -|----------------|--------|---------------------|--------| -| host | string | 是 | - | -| port | int | 是 | - | -| key | string | 是 | - | -| data_type | string | 是 | - | -| user | string | 否 | - | -| auth | string | 否 | - | -| db_num | int | 否 | 0 | -| mode | string | 否 | single | -| nodes | list | 当 mode=cluster 时为:是 | - | -| format | string | 否 | json | -| expire | long | 否 | -1 | -| common-options | | 否 | - | +| name | type | required | default value | +|--------------------|---------|-----------------------|---------------| +| host | string | yes | - | +| port | int | yes | - | +| key | string | yes | - | +| data_type | string | yes | - | +| batch_size | int | no | 10 | +| user | string | no | - | +| auth | string | no | - | +| db_num | int | no | 0 | +| mode | string | no | single | +| nodes | list | yes when mode=cluster | - | +| format | string | no | json | +| expire | long | no | -1 | +| support_custom_key | boolean | no | false | +| value_field | string | no | - | +| hash_key_field | string | no | - | +| hash_value_field | string | no | - | +| common-options | | no | - | ### host [string] @@ -48,13 +53,12 @@ Redis 端口 | 200 | 获取成功 | true | | 500 | 内部错误 | false | -如果将字段名称指定为 `code` 并将 data_type 设置为 `key`,将有两个数据写入 Redis: -1. `200 -> {code: 200, message: true, data: 获取成功}` -2. `500 -> {code: 500, message: false, data: 内部错误}` - -如果将字段名称指定为 `value` 并将 data_type 设置为 `key`,则由于上游数据的字段中没有 `value` 字段,将只有一个数据写入 Redis: - -1. `value -> {code: 500, message: false, data: 内部错误}` +如果将字段名称指定为 code 并将 data_type 设置为 key,将有两个数据写入 Redis: +1. `200 -> {code: 200, data: 获取成功, success: true}` +2. `500 -> {code: 500, data: 内部错误, success: false}` + +如果将字段名称指定为 value 并将 data_type 设置为 key,则由于上游数据的字段中没有 value 字段,将只有一个数据写入 Redis: +1. `value -> {code: 500, data: 内部错误, success: false}` 请参见 data_type 部分以了解具体的写入规则。 @@ -128,6 +132,59 @@ Redis 节点信息,在集群模式下使用,必须按如下格式: 设置 Redis 的过期时间,单位为秒。默认值为 -1,表示键不会自动过期。 +### support_custom_key [boolean] + +设置为true,表示启用自定义Key。 + +上游数据如下: + +| code | data | success | +|------|------|---------| +| 200 | 获取成功 | true | +| 500 | 内部错误 | false | + +可以使用`{`和`}`符号自定义Redis键名,`{}`中的字段名会被解析替换为上游数据中的某个字段值,例如:将字段名称指定为 `{code}` 并将 data_type 设置为 `key`,将有两个数据写入 Redis: +1. `200 -> {code: 200, data: 获取成功, success: true}` +2. `500 -> {code: 500, data: 内部错误, success: false}` + +Redis键名可以由固定部分和变化部分组成,通过Redis分组符号:连接,例如:将字段名称指定为 `code:{code}` 并将 data_type 设置为 `key`,将有两个数据写入 Redis: +1. `code:200 -> {code: 200, data: 获取成功, success: true}` +2. `code:500 -> {code: 500, data: 内部错误, success: false}` + +### value_field [string] + +要写入Redis的值的字段, `data_type` 支持 `key` `list` `set` `zset`. + +当你指定Redis键名字段`key`指定为 `value`,值字段`value_field`指定为`data`,并将`data_type`指定为`key`时, + +上游数据如下: + +| code | data | success | +|------|------|---------| +| 200 | 获取成功 | true | + +如下的数据会被写入Redis: +1. `value -> 获取成功` + +### hash_key_field [string] + +要写入Redis的hash键字段, `data_type` 支持 `hash` + +### hash_value_field [string] + +要写入Redis的hash值字段, `data_type` 支持 `hash` + +当你指定Redis键名字段`key`指定为 `value`,hash键字段`hash_key_field`指定为`data`,hash值字段`hash_value_field`指定为`success`,并将`data_type`指定为`hash`时, + +上游数据如下: + +| code | data | success | +|------|------|---------| +| 200 | 获取成功 | true | + +如下的数据会被写入Redis: +1. `value -> 获取成功 | true` + ### common options Sink 插件通用参数,请参考 [Sink Common Options](../sink-common-options.md) 获取详情 @@ -145,6 +202,43 @@ Redis { } ``` +自定义Key示例: + +```hocon +Redis { + host = localhost + port = 6379 + key = "name:{name}" + support_custom_key = true + data_type = key +} +``` + +自定义Value示例: + +```hocon +Redis { + host = localhost + port = 6379 + key = person + value_field = "name" + data_type = key +} +``` + +自定义HashKey和HashValue示例: + +```hocon +Redis { + host = localhost + port = 6379 + key = person + hash_key_field = "name" + hash_value_field = "age" + data_type = hash +} +``` + ## 更新日志 ### 2.2.0-beta 2022-09-26 diff --git a/docs/zh/connector-v2/source/Doris.md b/docs/zh/connector-v2/source/Doris.md new file mode 100644 index 00000000000..ba3549473a5 --- /dev/null +++ b/docs/zh/connector-v2/source/Doris.md @@ -0,0 +1,212 @@ +# Doris + +> Doris 源连接器 + +## 支持的引擎 + +> Spark
+> Flink
+> SeaTunnel Zeta
+ +## 主要功能 + +- [x] [批处理](../../concept/connector-v2-features.md) +- [ ] [流处理](../../concept/connector-v2-features.md) +- [ ] [精确一次](../../concept/connector-v2-features.md) +- [x] [列投影](../../concept/connector-v2-features.md) +- [x] [并行度](../../concept/connector-v2-features.md) +- [x] [支持用户自定义分片](../../concept/connector-v2-features.md) +- [x] [支持多表读](../../concept/connector-v2-features.md) + +## 描述 + +用于 Apache Doris 的源连接器。 + +## 支持的数据源信息 + +| 数据源 | 支持版本 | 驱动 | Url | Maven | +|------------|--------------------------------------|--------|-----|-------| +| Doris | 仅支持Doris2.0及以上版本. | - | - | - | + +## 数据类型映射 + +| Doris 数据类型 | SeaTunnel 数据类型 | +|--------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------| +| INT | INT | +| TINYINT | TINYINT | +| SMALLINT | SMALLINT | +| BIGINT | BIGINT | +| LARGEINT | STRING | +| BOOLEAN | BOOLEAN | +| DECIMAL | DECIMAL((Get the designated column's specified column size)+1,
(Gets the designated column's number of digits to right of the decimal point.))) | +| FLOAT | FLOAT | +| DOUBLE | DOUBLE | +| CHAR
VARCHAR
STRING
TEXT | STRING | +| DATE | DATE | +| DATETIME
DATETIME(p) | TIMESTAMP | +| ARRAY | ARRAY | + +## 源选项 + +基础配置: + +| 名称 | 类型 | 是否必须 | 默认值 | 描述 | +|----------------------------------|--------|----------|------------|-----------------------------------------------------------------------------------------------------| +| fenodes | string | yes | - | FE 地址, 格式:`"fe_host:fe_http_port"` | +| username | string | yes | - | 用户名 | +| password | string | yes | - | 密码 | +| doris.request.retries | int | no | 3 | 请求Doris FE的重试次数 | +| doris.request.read.timeout.ms | int | no | 30000 | | +| doris.request.connect.timeout.ms | int | no | 30000 | | +| query-port | string | no | 9030 | Doris查询端口 | +| doris.request.query.timeout.s | int | no | 3600 | Doris扫描数据的超时时间,单位秒 | +| table_list | string | 否 | - | 表清单 | + +表清单配置: + +| 名称 | 类型 | 是否必须 | 默认值 | 描述 | +|----------------------------------|--------|----------|------------|-----------------------------------------------------------------------------------------------------| +| database | string | yes | - | 数据库 | +| table | string | yes | - | 表名 | +| doris.read.field | string | no | - | 选择要读取的Doris表字段 | +| doris.filter.query | string | no | - | 数据过滤. 格式:"字段 = 值", 例如:doris.filter.query = "F_ID > 2" | +| doris.batch.size | int | no | 1024 | 每次能够从BE中读取到的最大行数 | +| doris.exec.mem.limit | long | no | 2147483648 | 单个be扫描请求可以使用的最大内存。默认内存为2G(2147483648) | + +注意: 当此配置对应于单个表时,您可以将table_list中的配置项展平到外层。 + +### 提示 + +> 不建议随意修改高级参数 + +## 例子 + +### 单表 +> 这是一个从doris读取数据后,输出到控制台的例子: + +``` +env { + parallelism = 2 + job.mode = "BATCH" +} +source{ + Doris { + fenodes = "doris_e2e:8030" + username = root + password = "" + database = "e2e_source" + table = "doris_e2e_table" + } +} + +transform { + # If you would like to get more information about how to configure seatunnel and see full list of transform plugins, + # please go to https://seatunnel.apache.org/docs/transform/sql +} + +sink { + Console {} +} +``` + +使用`doris.read.field`参数来选择需要读取的Doris表字段: + +``` +env { + parallelism = 2 + job.mode = "BATCH" +} +source{ + Doris { + fenodes = "doris_e2e:8030" + username = root + password = "" + database = "e2e_source" + table = "doris_e2e_table" + doris.read.field = "F_ID,F_INT,F_BIGINT,F_TINYINT,F_SMALLINT" + } +} + +transform { + # If you would like to get more information about how to configure seatunnel and see full list of transform plugins, + # please go to https://seatunnel.apache.org/docs/transform/sql +} + +sink { + Console {} +} +``` + +使用`doris.filter.query`来过滤数据,参数值将作为过滤条件直接传递到doris: + +``` +env { + parallelism = 2 + job.mode = "BATCH" +} +source{ + Doris { + fenodes = "doris_e2e:8030" + username = root + password = "" + database = "e2e_source" + table = "doris_e2e_table" + doris.filter.query = "F_ID > 2" + } +} + +transform { + # If you would like to get more information about how to configure seatunnel and see full list of transform plugins, + # please go to https://seatunnel.apache.org/docs/transform/sql +} + +sink { + Console {} +} +``` +### 多表 +``` +env{ + parallelism = 1 + job.mode = "BATCH" +} + +source{ + Doris { + fenodes = "xxxx:8030" + username = root + password = "" + table_list = [ + { + database = "st_source_0" + table = "doris_table_0" + doris.read.field = "F_ID,F_INT,F_BIGINT,F_TINYINT" + doris.filter.query = "F_ID >= 50" + }, + { + database = "st_source_1" + table = "doris_table_1" + } + ] + } +} + +transform {} + +sink{ + Doris { + fenodes = "xxxx:8030" + schema_save_mode = "RECREATE_SCHEMA" + username = root + password = "" + database = "st_sink" + table = "${table_name}" + sink.enable-2pc = "true" + sink.label-prefix = "test_json" + doris.config = { + format="json" + read_json_by_line="true" + } + } +} +``` diff --git a/docs/zh/connector-v2/source/HdfsFile.md b/docs/zh/connector-v2/source/HdfsFile.md index 0f983a80bcf..9cd254ef808 100644 --- a/docs/zh/connector-v2/source/HdfsFile.md +++ b/docs/zh/connector-v2/source/HdfsFile.md @@ -39,7 +39,7 @@ ## 源选项 -| 名称 | 类型 | 是否必须 | 默认值 | 描述 | +| 名称 | 类型 | 是否必须 | 默认值 | 描述 | |---------------------------|---------|------|----------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | path | string | 是 | - | 源文件路径。 | | file_format_type | string | 是 | - | 我们支持以下文件类型:`text` `json` `csv` `orc` `parquet` `excel`。请注意,最终文件名将以文件格式的后缀结束,文本文件的后缀是 `txt`。 | @@ -55,6 +55,7 @@ | kerberos_principal | string | 否 | - | kerberos 的 principal。 | | kerberos_keytab_path | string | 否 | - | kerberos 的 keytab 路径。 | | skip_header_row_number | long | 否 | 0 | 跳过前几行,但仅适用于 txt 和 csv。例如,设置如下:`skip_header_row_number = 2`。然后 Seatunnel 将跳过源文件中的前两行。 | +| file_filter_pattern | string | 否 | - | 过滤模式,用于过滤文件。 | | schema | config | 否 | - | 上游数据的模式字段。 | | sheet_name | string | 否 | - | 读取工作簿的表格,仅在文件格式为 excel 时使用。 | | compress_codec | string | 否 | none | 文件的压缩编解码器。 | @@ -64,6 +65,60 @@ **delimiter** 参数在版本 2.3.5 后将被弃用,请改用 **field_delimiter**。 +### file_filter_pattern [string] + +过滤模式,用于过滤文件。 + +这个过滤规则遵循正则表达式. 关于详情,请参考 https://en.wikipedia.org/wiki/Regular_expression 学习 + +这里是一些例子. + +文件清单: +``` +/data/seatunnel/20241001/report.txt +/data/seatunnel/20241007/abch202410.csv +/data/seatunnel/20241002/abcg202410.csv +/data/seatunnel/20241005/old_data.csv +/data/seatunnel/20241012/logo.png +``` +匹配规则: + +**例子 1**: *匹配所有txt为后缀名的文件*,匹配正则为: +``` +/data/seatunnel/20241001/.*\.txt +``` +匹配的结果是: +``` +/data/seatunnel/20241001/report.txt +``` +**例子 2**: *匹配所有文件名以abc开头的文件*,匹配正则为: +``` +/data/seatunnel/20241002/abc.* +``` +匹配的结果是: +``` +/data/seatunnel/20241007/abch202410.csv +/data/seatunnel/20241002/abcg202410.csv +``` +**例子 3**: *匹配所有文件名以abc开头,并且文件第四个字母是 h 或者 g 的文件*, 匹配正则为: +``` +/data/seatunnel/20241007/abc[h,g].* +``` +匹配的结果是: +``` +/data/seatunnel/20241007/abch202410.csv +``` +**例子 4**: *匹配所有文件夹第三级以 202410 开头并且文件后缀名是.csv的文件*, 匹配正则为: +``` +/data/seatunnel/202410\d*/.*\.csv +``` +匹配的结果是: +``` +/data/seatunnel/20241007/abch202410.csv +/data/seatunnel/20241002/abcg202410.csv +/data/seatunnel/20241005/old_data.csv +``` + ### compress_codec [string] 文件的压缩编解码器及支持的详细信息如下所示: @@ -125,3 +180,25 @@ sink { } ``` +### Filter File + +```hocon +env { + parallelism = 1 + job.mode = "BATCH" +} + +source { + HdfsFile { + path = "/apps/hive/demo/student" + file_format_type = "json" + fs.defaultFS = "hdfs://namenode001" + file_filter_pattern = "abc[DX]*.*" + } +} + +sink { + Console { + } +} +``` \ No newline at end of file diff --git a/docs/zh/connector-v2/source/Prometheus.md b/docs/zh/connector-v2/source/Prometheus.md new file mode 100644 index 00000000000..7618a6c1c6b --- /dev/null +++ b/docs/zh/connector-v2/source/Prometheus.md @@ -0,0 +1,152 @@ +# Prometheus + +> Prometheus 数据源连接器 + +## 描述 + +用于读取prometheus数据。 + +## 主要特性 + +- [x] [批处理](../../concept/connector-v2-features.md) +- [ ] [流处理](../../concept/connector-v2-features.md) +- [ ] [并行](../../concept/connector-v2-features.md) + +## 源选项 + +| 名称 | 类型 | 是否必填 | 默认值 | +|-----------------------------|---------|------|-----------------| +| url | String | Yes | - | +| query | String | Yes | - | +| query_type | String | Yes | Instant | +| content_field | String | Yes | $.data.result.* | +| schema.fields | Config | Yes | - | +| format | String | No | json | +| params | Map | Yes | - | +| poll_interval_millis | int | No | - | +| retry | int | No | - | +| retry_backoff_multiplier_ms | int | No | 100 | +| retry_backoff_max_ms | int | No | 10000 | +| enable_multi_lines | boolean | No | false | +| common-options | config | No | | + +### url [String] + +http 请求路径。 + +### query [String] + +Prometheus 表达式查询字符串 + +### query_type [String] + +Instant/Range + +1. Instant : 简单指标的即时查询。 +2. Range : 一段时间内指标数据。 + +https://prometheus.io/docs/prometheus/latest/querying/api/ + +### params [Map] + +http 请求参数 + +### poll_interval_millis [int] + +流模式下请求HTTP API间隔(毫秒) + +### retry [int] + +The max retry times if request http return to `IOException` + +### retry_backoff_multiplier_ms [int] + +请求http返回到' IOException '的最大重试次数 + +### retry_backoff_max_ms [int] + +http请求失败,最大重试回退时间(毫秒) + +### format [String] + +上游数据的格式,默认为json。 + +### schema [Config] + +按照如下填写一个固定值 + +```hocon + schema = { + fields { + metric = "map" + value = double + time = long + } + } + +``` + +#### fields [Config] + +上游数据的模式字段 + +### common options + +源插件常用参数,请参考[Source Common Options](../source-common-options.md) 了解详细信息 + +## 示例 + +### Instant: + +```hocon +source { + Prometheus { + result_table_name = "http" + url = "http://mockserver:1080" + query = "up" + query_type = "Instant" + content_field = "$.data.result.*" + format = "json" + schema = { + fields { + metric = "map" + value = double + time = long + } + } + } +} +``` + +### Range + +```hocon +source { + Prometheus { + result_table_name = "http" + url = "http://mockserver:1080" + query = "up" + query_type = "Range" + content_field = "$.data.result.*" + format = "json" + start = "2024-07-22T20:10:30.781Z" + end = "2024-07-22T20:11:00.781Z" + step = "15s" + schema = { + fields { + metric = "map" + value = double + time = long + } + } + } + } +``` + +## Changelog + +### next version + +- 添加Prometheus源连接器 +- 减少配置项 + diff --git a/docs/zh/contribution/setup.md b/docs/zh/contribution/setup.md index c00c3132c22..662663a4961 100644 --- a/docs/zh/contribution/setup.md +++ b/docs/zh/contribution/setup.md @@ -75,7 +75,7 @@ Apache SeaTunnel 使用 `Spotless` 来统一代码风格和格式检查。可以 完成上面所有的工作后,环境搭建已经完成, 可以直接运行我们的示例了。 所有的示例在 `seatunnel-examples` 模块里, 你可以随意选择进行编译和调试,参考 [running or debugging it in IDEA](https://www.jetbrains.com/help/idea/run-debug-configuration.html)。 -我们使用 `seatunnel-examples/seatunnel-engine-examples/src/main/java/org/apache/seatunnel/example/engine/SeaTunnelEngineExample.java` +我们使用 `seatunnel-examples/seatunnel-engine-examples/src/main/java/org/apache/seatunnel/example/engine/SeaTunnelEngineLocalExample.java` 作为示例, 运行成功后的输出如下: ```log diff --git a/docs/zh/seatunnel-engine/checkpoint-storage.md b/docs/zh/seatunnel-engine/checkpoint-storage.md index 86165d5d3be..a60fdff5ae0 100644 --- a/docs/zh/seatunnel-engine/checkpoint-storage.md +++ b/docs/zh/seatunnel-engine/checkpoint-storage.md @@ -12,7 +12,7 @@ sidebar_position: 7 SeaTunnel Engine支持以下检查点存储类型: -- HDFS (OSS,S3,HDFS,LocalFile) +- HDFS (OSS,COS,S3,HDFS,LocalFile) - LocalFile (本地),(已弃用: 使用HDFS(LocalFile)替代). 我们使用微内核设计模式将检查点存储模块从引擎中分离出来。这允许用户实现他们自己的检查点存储模块。 @@ -71,6 +71,42 @@ seatunnel: 阿里云OSS凭证提供程序实现见: [验证凭证提供](https://github.com/aliyun/aliyun-oss-java-sdk/tree/master/src/main/java/com/aliyun/oss/common/auth) +#### COS + +腾讯云COS基于hdfs-file,所以你可以参考[Hadoop COS文档](https://hadoop.apache.org/docs/stable/hadoop-cos/cloud-storage/)来配置COS. + +除了与公共COS buckets交互之外,COS客户端需要与buckets交互所需的凭据。 +客户端支持多种身份验证机制,并且可以配置使用哪种机制及其使用顺序。也可以使用com.qcloud.cos.auth.COSCredentialsProvider的自定义实现。 +如果您使用SimpleCredentialsProvider(可以从腾讯云API密钥管理中获得),它们包括一个secretId和一个secretKey。 +您可以这样配置: + +```yaml +seatunnel: + engine: + checkpoint: + interval: 6000 + timeout: 7000 + storage: + type: hdfs + max-retained: 3 + plugin-config: + storage.type: cos + cos.bucket: cosn://your-bucket + fs.cosn.credentials.provider: org.apache.hadoop.fs.cosn.auth.SimpleCredentialsProvider + fs.cosn.userinfo.secretId: your-secretId + fs.cosn.userinfo.secretKey: your-secretKey + fs.cosn.bucket.region: your-region +``` + +有关Hadoop Credential Provider API的更多信息,请参见: [Credential Provider API](https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-common/CredentialProviderAPI.html). + +腾讯云COS相关配置可参考:[Tencent Hadoop-COS文档](https://doc.fincloud.tencent.cn/tcloud/Storage/COS/846365/hadoop) + +使用前请将如下jar添加到lib目录下: +- [hadoop-cos-3.4.1.jar](https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-cos/3.4.1) +- [cos_api-bundle-5.6.69.jar](https://mvnrepository.com/artifact/com.qcloud/cos_api-bundle/5.6.69) +- [hadoop-shaded-guava-1.1.1.jar](https://mvnrepository.com/artifact/org.apache.hadoop.thirdparty/hadoop-shaded-guava/1.1.1) + #### S3 S3基于hdfs-file,所以你可以参考[Hadoop s3文档](https://hadoop.apache.org/docs/stable/hadoop-aws/tools/hadoop-aws/index.html)来配置s3。 diff --git a/docs/zh/seatunnel-engine/download-seatunnel.md b/docs/zh/seatunnel-engine/download-seatunnel.md index 18b8cc68db5..8d06a2e4f78 100644 --- a/docs/zh/seatunnel-engine/download-seatunnel.md +++ b/docs/zh/seatunnel-engine/download-seatunnel.md @@ -20,7 +20,7 @@ import TabItem from '@theme/TabItem'; 或者您也可以通过终端下载 ```shell -export version="2.3.8" +export version="2.3.9" wget "https://archive.apache.org/dist/seatunnel/${version}/apache-seatunnel-${version}-bin.tar.gz" tar -xzvf "apache-seatunnel-${version}-bin.tar.gz" ``` @@ -30,13 +30,13 @@ tar -xzvf "apache-seatunnel-${version}-bin.tar.gz" 从2.2.0-beta版本开始,二进制包不再默认提供连接器依赖,因此在第一次使用它时,您需要执行以下命令来安装连接器:(当然,您也可以从 [Apache Maven Repository](https://repo.maven.apache.org/maven2/org/apache/seatunnel/) 手动下载连接器,然后将其移动至`connectors/seatunnel`目录下)。 ```bash -sh bin/install-plugin.sh 2.3.8 +sh bin/install-plugin.sh 2.3.9 ``` -如果您需要指定的连接器版本,以2.3.7为例,您需要执行如下命令 +如果您需要指定的连接器版本,以2.3.9为例,您需要执行如下命令 ```bash -sh bin/install-plugin.sh 2.3.8 +sh bin/install-plugin.sh 2.3.9 ``` 通常您并不需要所有的连接器插件,所以您可以通过配置`config/plugin_config`来指定您所需要的插件,例如,您只需要`connector-console`插件,那么您可以修改plugin.properties配置文件如下 diff --git a/docs/zh/seatunnel-engine/hybrid-cluster-deployment.md b/docs/zh/seatunnel-engine/hybrid-cluster-deployment.md index ad783b82328..709259d72d0 100644 --- a/docs/zh/seatunnel-engine/hybrid-cluster-deployment.md +++ b/docs/zh/seatunnel-engine/hybrid-cluster-deployment.md @@ -136,6 +136,23 @@ seatunnel: classloader-cache-mode: true ``` +### 4.6 作业调度策略 + +当资源不足时,作业调度策略可以配置为以下两种模式: + +1. `WAIT`:等待资源可用。 +2. `REJECT`:拒绝作业,默认值。 + +示例 + +```yaml +seatunnel: + engine: + job-schedule-strategy: WAIT +``` + +当`dynamic-slot: ture`时,`job-schedule-strategy: WAIT` 配置会失效,将被强制修改为`job-schedule-strategy: REJECT`,因为动态Slot时该参数没有意义,可以直接提交。 + ## 5. 配置 SeaTunnel Engine 网络服务 所有 SeaTunnel Engine 网络相关的配置都在 `hazelcast.yaml` 文件中. diff --git a/docs/zh/seatunnel-engine/logging.md b/docs/zh/seatunnel-engine/logging.md index 8f04eaa9117..f97ea572e8c 100644 --- a/docs/zh/seatunnel-engine/logging.md +++ b/docs/zh/seatunnel-engine/logging.md @@ -30,10 +30,10 @@ MDC 由 slf4j 传播到日志后端,后者通常会自动将其添加到日志 Log4j2 使用属性文件进行控制。 -SeaTunnel Engine 发行版在 `confing` 目录中附带以下 log4j 属性文件,如果启用了 Log4j2,则会自动使用这些文件: +SeaTunnel Engine 发行版在 `config` 目录中附带以下 log4j 属性文件,如果启用了 Log4j2,则会自动使用这些文件: -- `log4j2_client.properties`: 由命令行客户端使用 (e.g., `seatunnel.sh`) -- `log4j2.properties`: 由 SeaTunnel 引擎服务使用 (e.g., `seatunnel-cluster.sh`) +- `log4j2_client.properties`: 由命令行客户端使用 (例如, `seatunnel.sh`) +- `log4j2.properties`: 由 SeaTunnel 引擎服务使用 (例如, `seatunnel-cluster.sh`) 默认情况下,日志文件输出到 `logs` 目录。 @@ -80,6 +80,37 @@ appender.file.layout.pattern = [%X{ST-JID}] %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p [%- SeaTunnel Engine 自动集成了大多数 Log 桥接器,允许针对 Log4j1/Logback 类工作的现有应用程序继续工作。 +### REST-API方式查询日志 + +SeaTunnel 提供了一个 API,用于查询日志。 + +**使用样例:** +- 获取所有节点jobId为`733584788375666689`的日志信息:`http://localhost:8080/logs/733584788375666689` +- 获取所有节点日志列表:`http://localhost:8080/logs` +- 获取所有节点日志列表以JSON格式返回:`http://localhost:8080/logs?format=json` +- 获取日志文件内容:`http://localhost:8080/logs/job-898380162133917698.log` + +有关详细信息,请参阅 [REST-API](rest-api-v2.md)。 + +## SeaTunnel 日志配置 + +### 定时删除旧日志 + +SeaTunnel 支持定时删除旧日志文件,以避免磁盘空间不足。您可以在 `seatunnel.yml` 文件中添加以下配置: + +```yaml +seatunnel: + engine: + history-job-expire-minutes: 1440 + telemetry: + logs: + scheduled-deletion-enable: true +``` + +- `history-job-expire-minutes`: 设置历史作业和日志的保留时间(单位:分钟)。系统将在指定的时间后自动清除过期的作业信息和日志文件。 +- `scheduled-deletion-enable`: 启用定时清理功能,默认为 `true`。系统将在作业达到 `history-job-expire-minutes` 设置的过期时间后自动删除相关日志文件。关闭该功能后,日志将永久保留在磁盘上,需要用户自行管理,否则可能影响磁盘占用。建议根据需求合理配置。 + + ## 开发人员最佳实践 您可以通过调用 `org.slf4j.LoggerFactory#LoggerFactory.getLogger` 并以您的类的类作为参数来创建 SLF4J 记录器。 diff --git a/docs/zh/seatunnel-engine/rest-api-v1.md b/docs/zh/seatunnel-engine/rest-api-v1.md index 5154922ec07..5aa9f111dfa 100644 --- a/docs/zh/seatunnel-engine/rest-api-v1.md +++ b/docs/zh/seatunnel-engine/rest-api-v1.md @@ -119,10 +119,19 @@ network: }, "createTime": "", "jobDag": { - "vertices": [ + "jobId": "", + "envOptions": [], + "vertexInfoMap": [ + { + "vertexId": 1, + "type": "", + "vertexName": "", + "tablePaths": [ + "" + ] + } ], - "edges": [ - ] + "pipelineEdges": {} }, "pluginJarsUrls": [ ], @@ -160,6 +169,7 @@ network: "createTime": "", "jobDag": { "jobId": "", + "envOptions": [], "vertexInfoMap": [ { "vertexId": 1, @@ -239,6 +249,7 @@ network: "createTime": "", "jobDag": { "jobId": "", + "envOptions": [], "vertexInfoMap": [ { "vertexId": 1, @@ -305,6 +316,7 @@ network: "finishTime": "", "jobDag": { "jobId": "", + "envOptions": [], "vertexInfoMap": [ { "vertexId": 1, @@ -316,7 +328,8 @@ network: } ], "pipelineEdges": {} - }, "metrics": "" + }, + "metrics": "" } ] ``` @@ -765,4 +778,74 @@ network: "message": "Invalid JSON format in request body." } ``` - \ No newline at end of file + + + +------------------------------------------------------------------------------------------ + +### 获取所有节点日志内容 + +
+ GET /hazelcast/rest/maps/logs/:jobId (返回日志列表。) + +#### 请求参数 + +#### 参数(在请求体中params字段中添加) + +> | 参数名称 | 是否必传 | 参数类型 | 参数描述 | +> |----------------------|----------|--------|-----------------------------------| +> | jobId | optional | string | job id | + +当`jobId`为空时,返回所有节点的日志信息,否则返回指定`jobId`在所有节点的的日志列表。 + +#### 响应 + +返回请求节点的日志列表、内容 + +#### 返回所有日志文件列表 + +如果你想先查看日志列表,可以通过`GET`请求获取日志列表,`http://localhost:5801/hazelcast/rest/maps/logs?format=json` + +```json +[ + { + "node": "localhost:5801", + "logLink": "http://localhost:5801/hazelcast/rest/maps/logs/job-899485770241277953.log", + "logName": "job-899485770241277953.log" + }, + { + "node": "localhost:5801", + "logLink": "http://localhost:5801/hazelcast/rest/maps/logs/job-899470314109468673.log", + "logName": "job-899470314109468673.log" + } +] +``` + +当前支持的格式有`json`和`html`,默认为`html`。 + +#### 例子 + +获取所有节点jobId为`733584788375666689`的日志信息:`http://localhost:5801/hazelcast/rest/maps/logs/733584788375666689` +获取所有节点日志列表:`http://localhost:5801/hazelcast/rest/maps/logs` +获取所有节点日志列表以JSON格式返回:`http://localhost:5801/hazelcast/rest/maps/logs?format=json` +获取日志文件内容:`http://localhost:5801/hazelcast/rest/maps/logs/job-898380162133917698.log`` + + +
+ + +### 获取单节点日志内容 + +
+ GET /hazelcast/rest/maps/log (返回日志列表。) + +#### 响应 + +返回请求节点的日志列表 + +#### 例子 + +获取当前节点的日志列表:`http://localhost:5801/hazelcast/rest/maps/log` +获取日志文件内容:`http://localhost:5801/hazelcast/rest/maps/log/job-898380162133917698.log` + +
diff --git a/docs/zh/seatunnel-engine/rest-api-v2.md b/docs/zh/seatunnel-engine/rest-api-v2.md index df884fa18ec..0ec9741d401 100644 --- a/docs/zh/seatunnel-engine/rest-api-v2.md +++ b/docs/zh/seatunnel-engine/rest-api-v2.md @@ -8,13 +8,19 @@ SeaTunnel有一个用于监控的API,可用于查询运行作业的状态和 ## 概述 -v2版本的api使用jetty支持,与v1版本的接口规范相同 ,可以通过修改`seatunnel.yaml`中的配置项来指定端口和context-path +v2版本的api使用jetty支持,与v1版本的接口规范相同 ,可以通过修改`seatunnel.yaml`中的配置项来指定端口和context-path, +同时可以配置 `enable-dynamic-port` 开启动态端口(默认从 `port` 开始累加),默认为关闭, +如果`enable-dynamic-port`为`true`,我们将使用`port`和`port`+`port-range`范围内未使用的端口,默认范围是100。 + ```yaml seatunnel: engine: - enable-http: true - port: 8080 + http: + enable-http: true + port: 8080 + enable-dynamic-port: false + port-range: 100 ``` 同时也可以配置context-path,配置如下: @@ -23,9 +29,10 @@ seatunnel: seatunnel: engine: - enable-http: true - port: 8080 - context-path: /seatunnel + http: + enable-http: true + port: 8080 + context-path: /seatunnel ``` ## API参考 @@ -80,14 +87,21 @@ seatunnel: "jobId": "", "jobName": "", "jobStatus": "", - "envOptions": { - }, "createTime": "", "jobDag": { - "vertices": [ + "jobId": "", + "envOptions": [], + "vertexInfoMap": [ + { + "vertexId": 1, + "type": "", + "vertexName": "", + "tablePaths": [ + "" + ] + } ], - "edges": [ - ] + "pipelineEdges": {} }, "pluginJarsUrls": [ ], @@ -125,6 +139,7 @@ seatunnel: "createTime": "", "jobDag": { "jobId": "", + "envOptions": [], "vertexInfoMap": [ { "vertexId": 1, @@ -204,6 +219,7 @@ seatunnel: "createTime": "", "jobDag": { "jobId": "", + "envOptions": [], "vertexInfoMap": [ { "vertexId": 1, @@ -270,6 +286,7 @@ seatunnel: "finishTime": "", "jobDag": { "jobId": "", + "envOptions": [], "vertexInfoMap": [ { "vertexId": 1, @@ -728,4 +745,75 @@ seatunnel: "message": "Invalid JSON format in request body." } ``` - \ No newline at end of file + + + +------------------------------------------------------------------------------------------ + +### 获取所有节点日志内容 + +
+ GET /logs/:jobId (返回日志列表。) + +#### 请求参数 + +#### 参数(在请求体中params字段中添加) + +> | 参数名称 | 是否必传 | 参数类型 | 参数描述 | +> |----------------------|----------|--------|-----------------------------------| +> | jobId | optional | string | job id | + +当`jobId`为空时,返回所有节点的日志信息,否则返回指定`jobId`在所有节点的的日志列表。 + +#### 响应 + +返回请求节点的日志列表、内容 + +#### 返回所有日志文件列表 + +如果你想先查看日志列表,可以通过`GET`请求获取日志列表,`http://localhost:8080/logs?format=json` + +```json +[ + { + "node": "localhost:8080", + "logLink": "http://localhost:8080/logs/job-899485770241277953.log", + "logName": "job-899485770241277953.log" + }, + { + "node": "localhost:8080", + "logLink": "http://localhost:8080/logs/job-899470314109468673.log", + "logName": "job-899470314109468673.log" + } +] +``` + +当前支持的格式有`json`和`html`,默认为`html`。 + + +#### 例子 + +获取所有节点jobId为`733584788375666689`的日志信息:`http://localhost:8080/logs/733584788375666689` +获取所有节点日志列表:`http://localhost:8080/logs` +获取所有节点日志列表以JSON格式返回:`http://localhost:8080/logs?format=json` +获取日志文件内容:`http://localhost:8080/logs/job-898380162133917698.log` + + +
+ + +### 获取单节点日志内容 + +
+ GET /log (返回日志列表。) + +#### 响应 + +返回请求节点的日志列表 + +#### 例子 + +获取当前节点的日志列表:`http://localhost:5801/log` +获取日志文件内容:`http://localhost:5801/log/job-898380162133917698.log`` + +
diff --git a/docs/zh/seatunnel-engine/separated-cluster-deployment.md b/docs/zh/seatunnel-engine/separated-cluster-deployment.md index d28ec3601c3..dbe4865272e 100644 --- a/docs/zh/seatunnel-engine/separated-cluster-deployment.md +++ b/docs/zh/seatunnel-engine/separated-cluster-deployment.md @@ -284,6 +284,23 @@ netty-common-4.1.89.Final.jar seatunnel-hadoop3-3.1.4-uber.jar ``` +### 4.7 作业调度策略 + +当资源不足时,作业调度策略可以配置为以下两种模式: + +1. `WAIT`:等待资源可用。 +2. `REJECT`:拒绝作业,默认值。 + +示例 + +```yaml +seatunnel: + engine: + job-schedule-strategy: WAIT +``` + +当`dynamic-slot: ture`时,`job-schedule-strategy: WAIT` 配置会失效,将被强制修改为`job-schedule-strategy: REJECT`,因为动态Slot时该参数没有意义,可以直接提交。 + ## 5. 配置 SeaTunnel Engine 网络服务 所有 SeaTunnel Engine 网络相关的配置都在 `hazelcast-master.yaml`和`hazelcast-worker.yaml` 文件中. diff --git a/docs/zh/seatunnel-engine/telemetry/grafana-dashboard.json b/docs/zh/seatunnel-engine/telemetry/grafana-dashboard.json index e69de29bb2d..7a87e47ff38 100644 --- a/docs/zh/seatunnel-engine/telemetry/grafana-dashboard.json +++ b/docs/zh/seatunnel-engine/telemetry/grafana-dashboard.json @@ -0,0 +1 @@ +{"annotations":{"list":[{"builtIn":1,"datasource":{"type":"prometheus","uid":"edwo9tknxxgqof"},"enable":true,"hide":true,"iconColor":"rgba(0, 211, 255, 1)","name":"Annotations & Alerts","target":{"limit":100,"matchAny":false,"tags":[],"type":"dashboard"},"type":"dashboard"}]},"editable":true,"fiscalYearStartMonth":0,"graphTooltip":0,"id":8,"links":[],"liveNow":false,"panels":[{"datasource":{"default":true,"type":"prometheus","uid":"edwo9tknxxgqof"},"fieldConfig":{"defaults":{"color":{"mode":"thresholds"},"mappings":[],"thresholds":{"mode":"absolute","steps":[{"color":"super-light-blue","value":null},{"color":"red","value":100000}]}},"overrides":[]},"gridPos":{"h":4,"w":12,"x":0,"y":0},"id":17,"options":{"colorMode":"background","graphMode":"none","justifyMode":"center","orientation":"auto","percentChangeColorMode":"standard","reduceOptions":{"calcs":["lastNotNull"],"fields":"","values":false},"showPercentChange":false,"textMode":"auto","wideLayout":true},"pluginVersion":"11.2.0","targets":[{"datasource":{"type":"prometheus","uid":"jUi2yaj4k"},"editorMode":"code","exemplar":true,"expr":"node_count{instance=~\"$instance\"}","interval":"","legendFormat":"","range":true,"refId":"A"}],"title":"Total Node Count","type":"stat"},{"datasource":{"default":true,"type":"prometheus","uid":"edwo9tknxxgqof"},"fieldConfig":{"defaults":{"color":{"mode":"thresholds"},"mappings":[],"thresholds":{"mode":"absolute","steps":[{"color":"green","value":null},{"color":"red","value":80}]}},"overrides":[]},"gridPos":{"h":4,"w":12,"x":12,"y":0},"id":18,"options":{"colorMode":"background","graphMode":"none","justifyMode":"auto","orientation":"auto","percentChangeColorMode":"standard","reduceOptions":{"calcs":["lastNotNull"],"fields":"","values":false},"showPercentChange":false,"text":{"titleSize":1},"textMode":"auto","wideLayout":true},"pluginVersion":"11.2.0","targets":[{"datasource":{"type":"prometheus","uid":"jUi2yaj4k"},"editorMode":"code","exemplar":true,"expr":"sum(node_state{instance=~\"$instance\"})","interval":"","legendFormat":"__auto","range":true,"refId":"A"}],"title":"UP Node Count","type":"stat"},{"collapsed":false,"datasource":{"type":"prometheus","uid":"edwo9tknxxgqof"},"gridPos":{"h":1,"w":24,"x":0,"y":4},"id":22,"panels":[],"targets":[{"datasource":{"type":"prometheus","uid":"edwo9tknxxgqof"},"refId":"A"}],"title":"Hazelcast Partition","type":"row"},{"datasource":{"default":true,"type":"prometheus","uid":"edwo9tknxxgqof"},"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisBorderShow":false,"axisCenteredZero":false,"axisColorMode":"text","axisLabel":"","axisPlacement":"auto","barAlignment":0,"barWidthFactor":0.6,"drawStyle":"line","fillOpacity":12,"gradientMode":"opacity","hideFrom":{"legend":false,"tooltip":false,"viz":false},"insertNulls":false,"lineInterpolation":"smooth","lineWidth":1,"pointSize":1,"scaleDistribution":{"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{"group":"A","mode":"none"},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[{"color":"green","value":null},{"color":"red","value":80}]}},"overrides":[]},"gridPos":{"h":7,"w":12,"x":0,"y":5},"id":32,"options":{"legend":{"calcs":[],"displayMode":"list","placement":"bottom","showLegend":true},"tooltip":{"mode":"single","sort":"none"}},"targets":[{"datasource":{"type":"prometheus","uid":"jUi2yaj4k"},"editorMode":"code","exemplar":true,"expr":"hazelcast_partition_partitionCount{instance=~\"$instance\"}","interval":"","legendFormat":"{{instance}}","range":true,"refId":"A"}],"title":"partitionCount","type":"timeseries"},{"datasource":{"default":true,"type":"prometheus","uid":"edwo9tknxxgqof"},"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisBorderShow":false,"axisCenteredZero":false,"axisColorMode":"text","axisLabel":"","axisPlacement":"auto","barAlignment":0,"barWidthFactor":0.6,"drawStyle":"line","fillOpacity":12,"gradientMode":"opacity","hideFrom":{"legend":false,"tooltip":false,"viz":false},"insertNulls":false,"lineInterpolation":"linear","lineWidth":1,"pointSize":1,"scaleDistribution":{"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{"group":"A","mode":"none"},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[{"color":"green","value":null},{"color":"red","value":80}]}},"overrides":[]},"gridPos":{"h":7,"w":12,"x":12,"y":5},"id":33,"options":{"legend":{"calcs":[],"displayMode":"list","placement":"bottom","showLegend":true},"tooltip":{"mode":"single","sort":"none"}},"targets":[{"datasource":{"type":"prometheus","uid":"jUi2yaj4k"},"editorMode":"code","exemplar":true,"expr":"hazelcast_partition_activePartition{instance=~\"$instance\"}","interval":"","legendFormat":"{{instance}}","range":true,"refId":"A"}],"title":"activePartition","type":"timeseries"},{"datasource":{"default":true,"type":"prometheus","uid":"edwo9tknxxgqof"},"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisBorderShow":false,"axisCenteredZero":false,"axisColorMode":"text","axisLabel":"","axisPlacement":"auto","barAlignment":0,"barWidthFactor":0.6,"drawStyle":"line","fillOpacity":12,"gradientMode":"opacity","hideFrom":{"legend":false,"tooltip":false,"viz":false},"insertNulls":false,"lineInterpolation":"smooth","lineWidth":1,"pointSize":1,"scaleDistribution":{"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{"group":"A","mode":"none"},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[{"color":"green","value":null},{"color":"red","value":80}]}},"overrides":[]},"gridPos":{"h":7,"w":12,"x":0,"y":12},"id":34,"options":{"legend":{"calcs":[],"displayMode":"list","placement":"bottom","showLegend":true},"tooltip":{"mode":"single","sort":"none"}},"pluginVersion":"8.3.3","targets":[{"datasource":{"type":"prometheus","uid":"jUi2yaj4k"},"editorMode":"code","exemplar":true,"expr":"hazelcast_partition_isClusterSafe{instance=~\"$instance\"}","interval":"","legendFormat":"{{instance}}","range":true,"refId":"A"}],"title":"isClusterSafe","type":"timeseries"},{"datasource":{"default":true,"type":"prometheus","uid":"edwo9tknxxgqof"},"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisBorderShow":false,"axisCenteredZero":false,"axisColorMode":"text","axisLabel":"","axisPlacement":"auto","barAlignment":0,"barWidthFactor":0.6,"drawStyle":"line","fillOpacity":12,"gradientMode":"opacity","hideFrom":{"legend":false,"tooltip":false,"viz":false},"insertNulls":false,"lineInterpolation":"smooth","lineWidth":1,"pointSize":1,"scaleDistribution":{"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{"group":"A","mode":"none"},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[{"color":"green","value":null},{"color":"red","value":80}]}},"overrides":[]},"gridPos":{"h":7,"w":12,"x":12,"y":12},"id":35,"options":{"legend":{"calcs":[],"displayMode":"list","placement":"bottom","showLegend":true},"tooltip":{"mode":"single","sort":"none"}},"targets":[{"datasource":{"type":"prometheus","uid":"jUi2yaj4k"},"editorMode":"code","exemplar":true,"expr":"hazelcast_partition_isLocalMemberSafe{instance=~\"$instance\"}","interval":"","legendFormat":"{{instance}}","range":true,"refId":"A"}],"title":"isLocalMemberSafe","type":"timeseries"},{"datasource":{"type":"prometheus","uid":"edwo9tknxxgqof"},"gridPos":{"h":1,"w":24,"x":0,"y":19},"id":20,"targets":[{"datasource":{"type":"prometheus","uid":"edwo9tknxxgqof"},"refId":"A"}],"title":"Hazelcast Executor","type":"row"},{"datasource":{"default":true,"type":"prometheus","uid":"edwo9tknxxgqof"},"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisBorderShow":false,"axisCenteredZero":false,"axisColorMode":"text","axisLabel":"","axisPlacement":"auto","barAlignment":0,"barWidthFactor":0.6,"drawStyle":"line","fillOpacity":12,"gradientMode":"opacity","hideFrom":{"legend":false,"tooltip":false,"viz":false},"insertNulls":false,"lineInterpolation":"smooth","lineWidth":1,"pointSize":1,"scaleDistribution":{"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{"group":"A","mode":"none"},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[{"color":"green","value":null},{"color":"red","value":80}]}},"overrides":[]},"gridPos":{"h":6,"w":24,"x":0,"y":20},"id":24,"options":{"legend":{"calcs":[],"displayMode":"list","placement":"right","showLegend":true},"tooltip":{"mode":"single","sort":"none"}},"targets":[{"datasource":{"type":"prometheus","uid":"jUi2yaj4k"},"editorMode":"code","exemplar":true,"expr":"hazelcast_executor_executedCount{instance=~\"$instance\"}","interval":"","legendFormat":"{{instance}}-{{type}}","range":true,"refId":"A"}],"title":"executedCount","type":"timeseries"},{"datasource":{"default":true,"type":"prometheus","uid":"edwo9tknxxgqof"},"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisBorderShow":false,"axisCenteredZero":false,"axisColorMode":"text","axisLabel":"","axisPlacement":"auto","barAlignment":0,"barWidthFactor":0.6,"drawStyle":"line","fillOpacity":12,"gradientMode":"opacity","hideFrom":{"legend":false,"tooltip":false,"viz":false},"insertNulls":false,"lineInterpolation":"smooth","lineWidth":1,"pointSize":1,"scaleDistribution":{"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{"group":"A","mode":"none"},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[{"color":"green","value":null},{"color":"red","value":80}]}},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":26},"id":26,"options":{"legend":{"calcs":[],"displayMode":"list","placement":"right","showLegend":true},"tooltip":{"mode":"single","sort":"none"}},"targets":[{"datasource":{"type":"prometheus","uid":"jUi2yaj4k"},"editorMode":"code","exemplar":true,"expr":"hazelcast_executor_isTerminated{instance=~\"$instance\"}","interval":"","legendFormat":"{{instance}}-{{type}}","range":true,"refId":"A"}],"title":"isTerminated","type":"timeseries"},{"datasource":{"default":true,"type":"prometheus","uid":"edwo9tknxxgqof"},"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisBorderShow":false,"axisCenteredZero":false,"axisColorMode":"text","axisLabel":"","axisPlacement":"auto","barAlignment":0,"barWidthFactor":0.6,"drawStyle":"line","fillOpacity":12,"gradientMode":"opacity","hideFrom":{"legend":false,"tooltip":false,"viz":false},"insertNulls":false,"lineInterpolation":"smooth","lineWidth":1,"pointSize":1,"scaleDistribution":{"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{"group":"A","mode":"none"},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[{"color":"green","value":null},{"color":"red","value":80}]}},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":26},"id":25,"options":{"legend":{"calcs":[],"displayMode":"list","placement":"right","showLegend":true},"tooltip":{"mode":"single","sort":"none"}},"targets":[{"datasource":{"type":"prometheus","uid":"jUi2yaj4k"},"editorMode":"code","exemplar":true,"expr":"hazelcast_executor_isShutdown{instance=~\"$instance\"}","interval":"","legendFormat":"{{instance}}-{{type}}","range":true,"refId":"A"}],"title":"isShutdown","type":"timeseries"},{"datasource":{"default":true,"type":"prometheus","uid":"edwo9tknxxgqof"},"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisBorderShow":false,"axisCenteredZero":false,"axisColorMode":"text","axisLabel":"","axisPlacement":"auto","barAlignment":0,"barWidthFactor":0.6,"drawStyle":"line","fillOpacity":12,"gradientMode":"opacity","hideFrom":{"legend":false,"tooltip":false,"viz":false},"insertNulls":false,"lineInterpolation":"smooth","lineWidth":1,"pointSize":1,"scaleDistribution":{"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{"group":"A","mode":"none"},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[{"color":"green","value":null},{"color":"red","value":80}]}},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":34},"id":28,"options":{"legend":{"calcs":[],"displayMode":"list","placement":"right","showLegend":true},"tooltip":{"mode":"single","sort":"none"}},"targets":[{"datasource":{"type":"prometheus","uid":"jUi2yaj4k"},"editorMode":"code","exemplar":true,"expr":"hazelcast_executor_poolSize{instance=~\"$instance\"}","interval":"","legendFormat":"{{instance}}-{{type}}","range":true,"refId":"A"}],"title":"poolSize","type":"timeseries"},{"datasource":{"default":true,"type":"prometheus","uid":"edwo9tknxxgqof"},"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisBorderShow":false,"axisCenteredZero":false,"axisColorMode":"text","axisLabel":"","axisPlacement":"auto","barAlignment":0,"barWidthFactor":0.6,"drawStyle":"line","fillOpacity":12,"gradientMode":"opacity","hideFrom":{"legend":false,"tooltip":false,"viz":false},"insertNulls":false,"lineInterpolation":"smooth","lineWidth":1,"pointSize":1,"scaleDistribution":{"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{"group":"A","mode":"none"},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[{"color":"green","value":null},{"color":"red","value":80}]}},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":34},"id":27,"options":{"legend":{"calcs":[],"displayMode":"list","placement":"right","showLegend":true},"tooltip":{"mode":"single","sort":"none"}},"targets":[{"datasource":{"type":"prometheus","uid":"jUi2yaj4k"},"editorMode":"code","exemplar":true,"expr":"hazelcast_executor_maxPoolSize{instance=~\"$instance\"}","interval":"","legendFormat":"{{instance}}-{{type}}","range":true,"refId":"A"}],"title":"maxPoolSize","type":"timeseries"},{"datasource":{"default":true,"type":"prometheus","uid":"edwo9tknxxgqof"},"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisBorderShow":false,"axisCenteredZero":false,"axisColorMode":"text","axisLabel":"","axisPlacement":"auto","barAlignment":0,"barWidthFactor":0.6,"drawStyle":"line","fillOpacity":0,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"insertNulls":false,"lineInterpolation":"linear","lineWidth":1,"pointSize":1,"scaleDistribution":{"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{"group":"A","mode":"none"},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[{"color":"green","value":null},{"color":"red","value":80}]}},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":42},"id":30,"options":{"legend":{"calcs":[],"displayMode":"list","placement":"right","showLegend":true},"tooltip":{"mode":"single","sort":"none"}},"targets":[{"datasource":{"type":"prometheus","uid":"jUi2yaj4k"},"editorMode":"code","exemplar":true,"expr":"hazelcast_executor_queueRemainingCapacity{instance=~\"$instance\"}","interval":"","legendFormat":"{{instance}}-{{type}}","range":true,"refId":"A"}],"title":"queueRemainingCapacity","type":"timeseries"},{"datasource":{"default":true,"type":"prometheus","uid":"edwo9tknxxgqof"},"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisBorderShow":false,"axisCenteredZero":false,"axisColorMode":"text","axisLabel":"","axisPlacement":"auto","barAlignment":0,"barWidthFactor":0.6,"drawStyle":"line","fillOpacity":0,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"insertNulls":false,"lineInterpolation":"linear","lineWidth":1,"pointSize":1,"scaleDistribution":{"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{"group":"A","mode":"none"},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[{"color":"green","value":null},{"color":"red","value":80}]}},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":42},"id":29,"options":{"legend":{"calcs":[],"displayMode":"list","placement":"right","showLegend":true},"tooltip":{"mode":"single","sort":"none"}},"targets":[{"datasource":{"type":"prometheus","uid":"jUi2yaj4k"},"editorMode":"code","exemplar":true,"expr":"hazelcast_executor_queueSize{instance=~\"$instance\"}","interval":"","legendFormat":"{{instance}}-{{type}}","range":true,"refId":"A"}],"title":"queueSize","type":"timeseries"},{"collapsed":false,"datasource":{"type":"prometheus","uid":"edwo9tknxxgqof"},"gridPos":{"h":1,"w":24,"x":0,"y":50},"id":7,"panels":[],"targets":[{"datasource":{"type":"prometheus","uid":"edwo9tknxxgqof"},"refId":"A"}],"title":"System","type":"row"},{"datasource":{"default":true,"type":"prometheus","uid":"edwo9tknxxgqof"},"description":"","fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisBorderShow":false,"axisCenteredZero":false,"axisColorMode":"text","axisLabel":"","axisPlacement":"auto","barAlignment":0,"barWidthFactor":0.6,"drawStyle":"line","fillOpacity":18,"gradientMode":"opacity","hideFrom":{"legend":false,"tooltip":false,"viz":false},"insertNulls":false,"lineInterpolation":"smooth","lineStyle":{"fill":"solid"},"lineWidth":1,"pointSize":1,"scaleDistribution":{"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{"group":"A","mode":"none"},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[{"color":"green","value":null},{"color":"red","value":80}]}},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":51},"id":9,"interval":"300","options":{"legend":{"calcs":[],"displayMode":"list","placement":"bottom","showLegend":true},"tooltip":{"mode":"single","sort":"none"}},"targets":[{"datasource":{"type":"prometheus","uid":"jUi2yaj4k"},"editorMode":"code","exemplar":true,"expr":"rate(process_cpu_seconds_total{instance=~\"$instance\"}[$__interval])*100","interval":"","legendFormat":"{{instance}}","range":true,"refId":"A"}],"title":"Cpu Usage","type":"timeseries"},{"datasource":{"default":true,"type":"prometheus","uid":"edwo9tknxxgqof"},"description":"","fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisBorderShow":false,"axisCenteredZero":false,"axisColorMode":"text","axisLabel":"","axisPlacement":"auto","barAlignment":0,"barWidthFactor":0.6,"drawStyle":"line","fillOpacity":22,"gradientMode":"opacity","hideFrom":{"legend":false,"tooltip":false,"viz":false},"insertNulls":false,"lineInterpolation":"smooth","lineWidth":1,"pointSize":1,"scaleDistribution":{"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{"group":"A","mode":"none"},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[{"color":"green","value":null},{"color":"red","value":80}]}},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":51},"id":10,"options":{"legend":{"calcs":[],"displayMode":"list","placement":"bottom","showLegend":true},"tooltip":{"mode":"single","sort":"none"}},"targets":[{"datasource":{"type":"prometheus","uid":"jUi2yaj4k"},"editorMode":"code","exemplar":true,"expr":"100 * (jvm_memory_bytes_used{instance=~\"$instance\",area=\"heap\"} / jvm_memory_bytes_max{instance=~\"$instance\",area=\"heap\"})","interval":"","legendFormat":"{{instance}}","range":true,"refId":"A"}],"title":"Heap Memory Usage","type":"timeseries"},{"datasource":{"default":true,"type":"prometheus","uid":"edwo9tknxxgqof"},"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisBorderShow":false,"axisCenteredZero":false,"axisColorMode":"text","axisLabel":"","axisPlacement":"auto","barAlignment":0,"barWidthFactor":0.6,"drawStyle":"line","fillOpacity":12,"gradientMode":"opacity","hideFrom":{"legend":false,"tooltip":false,"viz":false},"insertNulls":false,"lineInterpolation":"smooth","lineWidth":1,"pointSize":1,"scaleDistribution":{"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{"group":"A","mode":"none"},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[{"color":"green","value":null},{"color":"red","value":80}]}},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":59},"id":12,"interval":"300","options":{"legend":{"calcs":[],"displayMode":"list","placement":"bottom","showLegend":true},"tooltip":{"mode":"single","sort":"none"}},"targets":[{"datasource":{"type":"prometheus","uid":"jUi2yaj4k"},"editorMode":"code","exemplar":true,"expr":"increase(jvm_gc_collection_seconds_count[$__interval])","interval":"","legendFormat":"{{instance}}-{{gc}}","range":true,"refId":"A"}],"title":"GC Count","type":"timeseries"},{"datasource":{"default":true,"type":"prometheus","uid":"edwo9tknxxgqof"},"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisBorderShow":false,"axisCenteredZero":false,"axisColorMode":"text","axisLabel":"","axisPlacement":"auto","barAlignment":0,"barWidthFactor":0.6,"drawStyle":"line","fillOpacity":12,"gradientMode":"opacity","hideFrom":{"legend":false,"tooltip":false,"viz":false},"insertNulls":false,"lineInterpolation":"smooth","lineWidth":1,"pointSize":1,"scaleDistribution":{"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{"group":"A","mode":"none"},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[{"color":"green","value":null},{"color":"red","value":80}]}},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":59},"id":13,"interval":"300","options":{"legend":{"calcs":[],"displayMode":"list","placement":"bottom","showLegend":true},"tooltip":{"mode":"single","sort":"none"}},"targets":[{"datasource":{"type":"prometheus","uid":"jUi2yaj4k"},"editorMode":"code","exemplar":true,"expr":"increase(jvm_gc_collection_seconds_sum{instance=~\"$instance\"}[$__interval])*1000","interval":"","legendFormat":"{{instance}}-{{gc}}","range":true,"refId":"A"}],"title":"GC Cost Time","type":"timeseries"},{"datasource":{"default":true,"type":"prometheus","uid":"edwo9tknxxgqof"},"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisBorderShow":false,"axisCenteredZero":false,"axisColorMode":"text","axisLabel":"","axisPlacement":"auto","barAlignment":0,"barWidthFactor":0.6,"drawStyle":"line","fillOpacity":12,"gradientMode":"opacity","hideFrom":{"legend":false,"tooltip":false,"viz":false},"insertNulls":false,"lineInterpolation":"linear","lineWidth":1,"pointSize":1,"scaleDistribution":{"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{"group":"A","mode":"none"},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[{"color":"green","value":null},{"color":"red","value":80}]}},"overrides":[]},"gridPos":{"h":8,"w":24,"x":0,"y":67},"id":14,"options":{"legend":{"calcs":[],"displayMode":"list","placement":"right","showLegend":true},"tooltip":{"mode":"single","sort":"none"}},"targets":[{"datasource":{"type":"prometheus","uid":"jUi2yaj4k"},"editorMode":"code","exemplar":true,"expr":"jvm_threads_current{instance=~\"$instance\"}","interval":"","legendFormat":"{{instance}}-current","range":true,"refId":"A"},{"datasource":{"type":"prometheus","uid":"jUi2yaj4k"},"editorMode":"code","exemplar":true,"expr":"jvm_threads_daemon{instance=~\"$instance\"}","hide":false,"interval":"","legendFormat":"{{instance}}-daemon","range":true,"refId":"B"},{"datasource":{"type":"prometheus","uid":"jUi2yaj4k"},"editorMode":"code","exemplar":true,"expr":"jvm_threads_peak{instance=~\"$instance\"}","hide":false,"interval":"","legendFormat":"{{instance}}-peak","range":true,"refId":"C"},{"datasource":{"type":"prometheus","uid":"jUi2yaj4k"},"editorMode":"code","exemplar":true,"expr":"jvm_threads_deadlocked{instance=~\"$instance\"}","hide":false,"interval":"","legendFormat":"{{instance}}-deadlocked","range":true,"refId":"D"}],"title":"Jvm Threads","type":"timeseries"},{"collapsed":false,"datasource":{"type":"prometheus","uid":"edwo9tknxxgqof"},"gridPos":{"h":1,"w":24,"x":0,"y":75},"id":5,"panels":[],"targets":[{"datasource":{"type":"prometheus","uid":"edwo9tknxxgqof"},"refId":"A"}],"title":"Job","type":"row"},{"datasource":{"default":true,"type":"prometheus","uid":"edwo9tknxxgqof"},"fieldConfig":{"defaults":{"color":{"mode":"continuous-YlBl"},"mappings":[],"thresholds":{"mode":"absolute","steps":[{"color":"green","value":null}]}},"overrides":[]},"gridPos":{"h":6,"w":24,"x":0,"y":76},"id":2,"options":{"displayMode":"basic","maxVizHeight":300,"minVizHeight":16,"minVizWidth":8,"namePlacement":"auto","orientation":"vertical","reduceOptions":{"calcs":["lastNotNull"],"fields":"","values":false},"showUnfilled":true,"sizing":"auto","valueMode":"color"},"pluginVersion":"11.2.0","targets":[{"datasource":{"type":"prometheus","uid":"jUi2yaj4k"},"editorMode":"code","exemplar":true,"expr":"sum(job_count) by (type) ","hide":false,"interval":"","legendFormat":"__auto","range":true,"refId":"A"}],"title":"Job Count","type":"bargauge"},{"datasource":{"default":true,"type":"prometheus","uid":"edwo9tknxxgqof"},"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisBorderShow":false,"axisCenteredZero":false,"axisColorMode":"text","axisLabel":"","axisPlacement":"auto","barAlignment":0,"barWidthFactor":0.6,"drawStyle":"line","fillOpacity":12,"gradientMode":"opacity","hideFrom":{"legend":false,"tooltip":false,"viz":false},"insertNulls":false,"lineInterpolation":"smooth","lineWidth":1,"pointSize":1,"scaleDistribution":{"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{"group":"A","mode":"none"},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[{"color":"green","value":null},{"color":"red","value":80}]}},"overrides":[]},"gridPos":{"h":6,"w":12,"x":0,"y":82},"id":3,"options":{"legend":{"calcs":[],"displayMode":"list","placement":"bottom","showLegend":true},"tooltip":{"mode":"single","sort":"none"}},"targets":[{"datasource":{"type":"prometheus","uid":"jUi2yaj4k"},"editorMode":"code","exemplar":true,"expr":"job_thread_pool_activeCount{instance=~\"$instance\"}","interval":"","legendFormat":"{{instance}}-{{type}}","range":true,"refId":"A"},{"datasource":{"type":"prometheus","uid":"jUi2yaj4k"},"editorMode":"code","exemplar":true,"expr":"job_thread_pool_corePoolSize{instance=~\"$instance\"}","hide":false,"interval":"","legendFormat":"{{instance}}-{{type}}","range":true,"refId":"B"},{"datasource":{"type":"prometheus","uid":"jUi2yaj4k"},"editorMode":"code","exemplar":true,"expr":"job_thread_pool_maximumPoolSize{instance=~\"$instance\"}","hide":true,"interval":"","legendFormat":"{{instance}}-{{type}}","range":true,"refId":"C"},{"datasource":{"type":"prometheus","uid":"jUi2yaj4k"},"editorMode":"code","exemplar":true,"expr":"job_thread_pool_poolSize{instance=~\"$instance\"}","hide":false,"interval":"","legendFormat":"{{instance}}-{{type}}","range":true,"refId":"D"}],"title":"Job Thread Pool","type":"timeseries"},{"datasource":{"default":true,"type":"prometheus","uid":"edwo9tknxxgqof"},"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisBorderShow":false,"axisCenteredZero":false,"axisColorMode":"text","axisLabel":"","axisPlacement":"auto","barAlignment":0,"barWidthFactor":0.6,"drawStyle":"line","fillOpacity":12,"gradientMode":"opacity","hideFrom":{"legend":false,"tooltip":false,"viz":false},"insertNulls":false,"lineInterpolation":"smooth","lineWidth":1,"pointSize":1,"scaleDistribution":{"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{"group":"A","mode":"none"},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[{"color":"green","value":null},{"color":"red","value":80}]}},"overrides":[]},"gridPos":{"h":6,"w":12,"x":12,"y":82},"id":15,"options":{"legend":{"calcs":[],"displayMode":"list","placement":"bottom","showLegend":true},"tooltip":{"mode":"single","sort":"none"}},"targets":[{"datasource":{"type":"prometheus","uid":"jUi2yaj4k"},"editorMode":"code","exemplar":true,"expr":"job_thread_pool_completedTask_total{instance=~\"$instance\"}","interval":"","legendFormat":"{{instance}}-{{type}}","range":true,"refId":"A"},{"datasource":{"type":"prometheus","uid":"jUi2yaj4k"},"editorMode":"code","exemplar":true,"expr":"job_thread_pool_task_total{instance=~\"$instance\"}","hide":false,"interval":"","legendFormat":"{{instance}}-{{type}}","range":true,"refId":"B"}],"title":"Job Thread Pool Total","type":"timeseries"}],"refresh":"30s","schemaVersion":39,"tags":[],"templating":{"list":[{"current":{"selected":true,"text":["All"],"value":["$__all"]},"datasource":{"type":"prometheus","uid":"edwo9tknxxgqof"},"definition":"label_values(cluster_info,instance)","description":"instance","hide":0,"includeAll":true,"label":"","multi":true,"name":"instance","options":[],"query":{"qryType":5,"query":"label_values(cluster_info,instance)","refId":"PrometheusVariableQueryEditor-VariableQuery"},"refresh":1,"regex":"","skipUrlSync":false,"sort":0,"type":"query"}]},"time":{"from":"now-15m","to":"now"},"timepicker":{},"timezone":"","title":"Seatunnel","uid":"bdx1j097hmku8d","version":11,"weekStart":""} \ No newline at end of file diff --git a/docs/zh/seatunnel-engine/web-ui.md b/docs/zh/seatunnel-engine/web-ui.md new file mode 100644 index 00000000000..596ad271d9a --- /dev/null +++ b/docs/zh/seatunnel-engine/web-ui.md @@ -0,0 +1,47 @@ +# Apache SeaTunnel Web UI 文档 + +## 访问 + +在访问 web ui 之前我们需要开启 http rest api。首先需要在`seatunnel.yaml`配置文件中配置 + +``` +seatunnel: + engine: + http: + enable-http: true + port: 8080 +``` + +然后访问 `http://ip:8080/#/overview` + +## 概述 + +Apache SeaTunnel 的 Web UI 提供了一个友好的用户界面,用于监控和管理 SeaTunnel 作业。通过 Web UI,用户可以实时查看当前运行的作业、已完成的作业,以及集群中工作节点和管理节点的状态。主要功能模块包括 Jobs、Workers 和 Master,每个模块都提供了详细的状态信息和操作选项,帮助用户高效地管理和优化其数据处理流程。 +![overview.png](../../images/ui/overview.png) + +## 作业 + +### 运行中的作业 + +“运行中的作业”模块列出了当前正在执行的所有 SeaTunnel 作业。用户可以查看每个作业的基本信息,包括作业 ID、提交时间、状态、执行时间等。点击具体作业可以查看更多详细信息,如任务分布、资源使用情况和日志输出,便于用户实时监控作业进度并及时处理潜在问题。 +![running.png](../../images/ui/running.png) +![detail.png](../../images/ui/detail.png) + +### 已完成的作业 + +“已完成的作业”模块展示了所有已成功完成或失败的 SeaTunnel 作业。此部分提供了每个作业的执行结果、完成时间、耗时以及失败原因(如果有)。用户可以通过此模块回顾过去的作业记录,分析作业性能,进行故障排查或重复执行某些特定作业。 +![finished.png](../../images/ui/finished.png) + +## 工作节点 + +### 工作节点信息 + +“工作节点”模块展示了集群中所有工作节点的详细信息,包括每个工作节点的地址、运行状态、CPU 和内存使用情况、正在执行的任务数量等。通过该模块,用户可以监控各个工作节点的健康状况,及时发现和处理资源瓶颈或节点故障,确保 SeaTunnel 集群的稳定运行。 +![workers.png](../../images/ui/workers.png) + +## 管理节点 + +### 管理节点信息 + +“管理节点”模块提供了 SeaTunnel 集群中主节点的状态和配置信息。用户可以查看 Master 节点的地址、运行状态、负责的作业调度情况以及整体集群的资源分配情况。该模块帮助用户全面了解集群的核心管理部分,便于进行集群配置优化和故障排查。 +![master.png](../../images/ui/master.png) diff --git a/docs/zh/start-v2/docker/docker.md b/docs/zh/start-v2/docker/docker.md index 548b318598d..1c4bc5d4b10 100644 --- a/docs/zh/start-v2/docker/docker.md +++ b/docs/zh/start-v2/docker/docker.md @@ -40,7 +40,7 @@ docker run --rm -it -v /tmp/job/:/config apache/seatunnel: ./bin/se ```shell cd seatunnel # Use already sett maven profile -mvn -B clean install -Dmaven.test.skip=true -Dmaven.javadoc.skip=true -Dlicense.skipAddThirdParty=true -D"docker.build.skip"=false -D"docker.verify.skip"=false -D"docker.push.skip"=true -D"docker.tag"=2.3.8 -Dmaven.deploy.skip -D"skip.spotless"=true --no-snapshot-updates -Pdocker,seatunnel +mvn -B clean install -Dmaven.test.skip=true -Dmaven.javadoc.skip=true -Dlicense.skipAddThirdParty=true -D"docker.build.skip"=false -D"docker.verify.skip"=false -D"docker.push.skip"=true -D"docker.tag"=2.3.9 -Dmaven.deploy.skip -D"skip.spotless"=true --no-snapshot-updates -Pdocker,seatunnel # Check the docker image docker images | grep apache/seatunnel @@ -53,10 +53,10 @@ mvn clean package -DskipTests -Dskip.spotless=true # Build docker image cd seatunnel-dist -docker build -f src/main/docker/Dockerfile --build-arg VERSION=2.3.8 -t apache/seatunnel:2.3.8 . +docker build -f src/main/docker/Dockerfile --build-arg VERSION=2.3.9 -t apache/seatunnel:2.3.9 . # If you build from dev branch, you should add SNAPSHOT suffix to the version -docker build -f src/main/docker/Dockerfile --build-arg VERSION=2.3.8-SNAPSHOT -t apache/seatunnel:2.3.8-SNAPSHOT . +docker build -f src/main/docker/Dockerfile --build-arg VERSION=2.3.9-SNAPSHOT -t apache/seatunnel:2.3.9-SNAPSHOT . # Check the docker image docker images | grep apache/seatunnel diff --git a/docs/zh/start-v2/locally/deployment.md b/docs/zh/start-v2/locally/deployment.md index ce17e773319..927f5476ece 100644 --- a/docs/zh/start-v2/locally/deployment.md +++ b/docs/zh/start-v2/locally/deployment.md @@ -22,7 +22,7 @@ import TabItem from '@theme/TabItem'; 或者您也可以通过终端下载: ```shell -export version="2.3.8" +export version="2.3.9" wget "https://archive.apache.org/dist/seatunnel/${version}/apache-seatunnel-${version}-bin.tar.gz" tar -xzvf "apache-seatunnel-${version}-bin.tar.gz" ``` @@ -35,10 +35,10 @@ tar -xzvf "apache-seatunnel-${version}-bin.tar.gz" sh bin/install-plugin.sh ``` -如果您需要指定的连接器版本,以2.3.7为例,您需要执行如下命令: +如果您需要指定的连接器版本,以2.3.9为例,您需要执行如下命令: ```bash -sh bin/install-plugin.sh 2.3.8 +sh bin/install-plugin.sh 2.3.9 ``` 通常情况下,你不需要所有的连接器插件。你可以通过配置`config/plugin_config`来指定所需的插件。例如,如果你想让示例应用程序正常工作,你将需要`connector-console`和`connector-fake`插件。你可以修改`plugin_config`配置文件,如下所示: @@ -71,7 +71,7 @@ connector-console cd seatunnel sh ./mvnw clean install -DskipTests -Dskip.spotless=true # 获取构建好的二进制包 -cp seatunnel-dist/target/apache-seatunnel-2.3.8-bin.tar.gz /The-Path-You-Want-To-Copy +cp seatunnel-dist/target/apache-seatunnel-2.3.9-bin.tar.gz /The-Path-You-Want-To-Copy cd /The-Path-You-Want-To-Copy tar -xzvf "apache-seatunnel-${version}-bin.tar.gz" diff --git a/docs/zh/transform-v2/sql-functions.md b/docs/zh/transform-v2/sql-functions.md index 57c440a39b3..7e3f8454e1d 100644 --- a/docs/zh/transform-v2/sql-functions.md +++ b/docs/zh/transform-v2/sql-functions.md @@ -302,6 +302,15 @@ REPEAT(NAME || ' ', 10) REPLACE(NAME, ' ') + +### SPLIT + +将字符串切分成数组。 + +示例: + +select SPLIT(test,';') as arrays + ### SOUNDEX ```SOUNDEX(string)``` @@ -964,3 +973,37 @@ from 示例: case when c_string in ('c_string') then 1 else 0 end + +### UUID + +```UUID()``` + +通过java函数生成uuid + +示例: + +select UUID() as seatunnel_uuid + + +### ARRAY + +生成一个数组。 + +示例: + +select ARRAY('test1','test2','test3') as arrays + +### LATERAL VIEW +#### EXPLODE + +将 array 列展开成多行。 +OUTER EXPLODE 当 array 为NULL或者为空时,返回NULL +EXPLODE(SPLIT(FIELD_NAME,separator))用来切分字符串类型,SPLIT 第一个参数是字段名,第二个参数是分隔符 +EXPLODE(ARRAY(value1,value2)) 用于自定义数组切分,在原有基础上生成一个新的字段。 +``` +SELECT * FROM fake + LATERAL VIEW EXPLODE ( SPLIT ( NAME, ',' ) ) AS NAME + LATERAL VIEW EXPLODE ( SPLIT ( pk_id, ';' ) ) AS pk_id + LATERAL VIEW OUTER EXPLODE ( age ) AS age + LATERAL VIEW OUTER EXPLODE ( ARRAY(1,1) ) AS num +``` diff --git a/plugin-mapping.properties b/plugin-mapping.properties index e314ef86613..82c941b70f6 100644 --- a/plugin-mapping.properties +++ b/plugin-mapping.properties @@ -132,6 +132,8 @@ seatunnel.sink.ObsFile = connector-file-obs seatunnel.source.Milvus = connector-milvus seatunnel.sink.Milvus = connector-milvus seatunnel.sink.ActiveMQ = connector-activemq +seatunnel.source.Prometheus = connector-prometheus +seatunnel.sink.Prometheus = connector-prometheus seatunnel.source.Qdrant = connector-qdrant seatunnel.sink.Qdrant = connector-qdrant seatunnel.source.Sls = connector-sls diff --git a/pom.xml b/pom.xml index e80dae90247..fdb967a1d18 100644 --- a/pom.xml +++ b/pom.xml @@ -56,7 +56,7 @@ - 2.3.8-SNAPSHOT + 2.3.9-SNAPSHOT 2.1.1 UTF-8 1.8 @@ -135,7 +135,7 @@ 2.0.0 1.17.6 2.29.0 - 4.5 + 4.9 2.7.0 4.0.16 9.4.56.v20240826 @@ -158,6 +158,8 @@ 0.16.0 true + 3.1.4 + diff --git a/seatunnel-api/src/main/java/org/apache/seatunnel/api/sink/SinkWriter.java b/seatunnel-api/src/main/java/org/apache/seatunnel/api/sink/SinkWriter.java index 4567e98cbfe..330580b980f 100644 --- a/seatunnel-api/src/main/java/org/apache/seatunnel/api/sink/SinkWriter.java +++ b/seatunnel-api/src/main/java/org/apache/seatunnel/api/sink/SinkWriter.java @@ -62,8 +62,23 @@ default void applySchemaChange(SchemaChangeEvent event) throws IOException {} * * @return the commit info need to commit */ + @Deprecated Optional prepareCommit() throws IOException; + /** + * prepare the commit, will be called before {@link #snapshotState(long checkpointId)}. If you + * need to use 2pc, you can return the commit info in this method, and receive the commit info + * in {@link SinkCommitter#commit(List)}. If this method failed (by throw exception), **Only** + * Spark engine will call {@link #abortPrepare()} + * + * @param checkpointId checkpointId + * @return the commit info need to commit + * @throws IOException If fail to prepareCommit + */ + default Optional prepareCommit(long checkpointId) throws IOException { + return prepareCommit(); + } + /** * @return The writer's state. * @throws IOException if fail to snapshot writer's state. diff --git a/seatunnel-api/src/main/java/org/apache/seatunnel/api/sink/multitablesink/MultiTableSinkWriter.java b/seatunnel-api/src/main/java/org/apache/seatunnel/api/sink/multitablesink/MultiTableSinkWriter.java index f01c3d65dcf..f5b30be5370 100644 --- a/seatunnel-api/src/main/java/org/apache/seatunnel/api/sink/multitablesink/MultiTableSinkWriter.java +++ b/seatunnel-api/src/main/java/org/apache/seatunnel/api/sink/multitablesink/MultiTableSinkWriter.java @@ -220,6 +220,11 @@ public List snapshotState(long checkpointId) throws IOException @Override public Optional prepareCommit() throws IOException { + return Optional.empty(); + } + + @Override + public Optional prepareCommit(long checkpointId) throws IOException { checkQueueRemain(); subSinkErrorCheck(); MultiTableCommitInfo multiTableCommitInfo = @@ -238,7 +243,9 @@ public Optional prepareCommit() throws IOException { .entrySet()) { Optional commit; try { - commit = sinkWriterEntry.getValue().prepareCommit(); + SinkWriter sinkWriter = + sinkWriterEntry.getValue(); + commit = sinkWriter.prepareCommit(checkpointId); } catch (IOException e) { throw new RuntimeException(e); } diff --git a/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/catalog/PhysicalColumn.java b/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/catalog/PhysicalColumn.java index db9da1b2b75..2a425000222 100644 --- a/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/catalog/PhysicalColumn.java +++ b/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/catalog/PhysicalColumn.java @@ -215,11 +215,25 @@ public static PhysicalColumn of( String comment, String sourceType, Map options) { + return new PhysicalColumn( + name, dataType, columnLength, nullable, defaultValue, comment, sourceType, options); + } + + public static PhysicalColumn of( + String name, + SeaTunnelDataType dataType, + Long columnLength, + Integer scale, + boolean nullable, + Object defaultValue, + String comment, + String sourceType, + Map options) { return new PhysicalColumn( name, dataType, columnLength, - null, + scale, nullable, defaultValue, comment, diff --git a/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/event/AlterTableNameEvent.java b/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/event/AlterTableNameEvent.java index 9454f6a5469..04590cae3af 100644 --- a/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/event/AlterTableNameEvent.java +++ b/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/event/AlterTableNameEvent.java @@ -26,7 +26,7 @@ @Getter @ToString(callSuper = true) -public class AlterTableNameEvent extends AlterTableColumnEvent { +public class AlterTableNameEvent extends AlterTableEvent { private final TableIdentifier newTableIdentifier; public AlterTableNameEvent( diff --git a/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/event/handler/AlterTableEventHandler.java b/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/event/handler/AlterTableEventHandler.java index b0972ec68a0..32966df4087 100644 --- a/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/event/handler/AlterTableEventHandler.java +++ b/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/event/handler/AlterTableEventHandler.java @@ -154,10 +154,19 @@ private SeaTunnelRowType applyChangeColumn( String oldColumn = changeColumnEvent.getOldColumn(); int oldColumnIndex = dataType.indexOf(oldColumn); + // The operation of rename column which only has the name of old column and the name of new + // column, + // so we need to fill the data type which is the same as the old column. + SeaTunnelDataType fieldType = dataType.getFieldType(oldColumnIndex); + Column column = changeColumnEvent.getColumn(); + if (column.getDataType() == null) { + column = column.copy(fieldType); + } + return applyModifyColumn( dataType, oldColumnIndex, - changeColumnEvent.getColumn(), + column, changeColumnEvent.isFirst(), changeColumnEvent.getAfterColumn()); } diff --git a/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/type/CommonOptions.java b/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/type/CommonOptions.java new file mode 100644 index 00000000000..839d611132b --- /dev/null +++ b/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/type/CommonOptions.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.api.table.type; + +import org.apache.seatunnel.api.table.catalog.Column; + +import lombok.Getter; + +/** + * Common option keys of SeaTunnel {@link Column#getOptions()} / {@link SeaTunnelRow#getOptions()}. + * Used to store some extra information of the column value. + */ +@Getter +public enum CommonOptions { + /** + * The key of {@link Column#getOptions()} to specify the column value is a json format string. + */ + JSON("Json"), + /** The key of {@link Column#getOptions()} to specify the column value is a metadata field. */ + METADATA("Metadata"), + /** + * The key of {@link SeaTunnelRow#getOptions()} to store the partition value of the row value. + */ + PARTITION("Partition"), + ; + + private final String name; + + CommonOptions(String name) { + this.name = name; + } +} diff --git a/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/type/SeaTunnelRow.java b/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/type/SeaTunnelRow.java index 10a5b33a935..b6da4eea7be 100644 --- a/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/type/SeaTunnelRow.java +++ b/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/type/SeaTunnelRow.java @@ -20,6 +20,7 @@ import java.io.Serializable; import java.nio.ByteBuffer; import java.util.Arrays; +import java.util.HashMap; import java.util.Map; import java.util.Objects; @@ -35,6 +36,8 @@ public final class SeaTunnelRow implements Serializable { private volatile int size; + private Map options; + public SeaTunnelRow(int arity) { this.fields = new Object[arity]; } @@ -55,6 +58,10 @@ public void setRowKind(RowKind rowKind) { this.rowKind = rowKind; } + public void setOptions(Map options) { + this.options = options; + } + public int getArity() { return fields.length; } @@ -67,6 +74,13 @@ public RowKind getRowKind() { return this.rowKind; } + public Map getOptions() { + if (options == null) { + options = new HashMap<>(); + } + return options; + } + public Object[] getFields() { return fields; } diff --git a/seatunnel-api/src/main/java/org/apache/seatunnel/api/transform/SeaTunnelMultiRowTransform.java b/seatunnel-api/src/main/java/org/apache/seatunnel/api/transform/SeaTunnelMultiRowTransform.java new file mode 100644 index 00000000000..1f78e8be486 --- /dev/null +++ b/seatunnel-api/src/main/java/org/apache/seatunnel/api/transform/SeaTunnelMultiRowTransform.java @@ -0,0 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.seatunnel.api.transform; + +import java.util.List; + +public interface SeaTunnelMultiRowTransform extends SeaTunnelTransform { + + /** + * Transform input data to {@link this#getProducedCatalogTable().getSeaTunnelRowType()} types + * data. + * + * @param row the data need be transformed. + * @return transformed data. + */ + List flatMap(T row); + + default T map(T row) { + throw new UnsupportedOperationException("Heads-up conversion is not supported"); + } +} diff --git a/seatunnel-api/src/test/java/org/apache/seatunnel/api/sink/multitablesink/MultiTableSinkWriterTest.java b/seatunnel-api/src/test/java/org/apache/seatunnel/api/sink/multitablesink/MultiTableSinkWriterTest.java index 66e0ff0d4ef..86722eb2466 100644 --- a/seatunnel-api/src/test/java/org/apache/seatunnel/api/sink/multitablesink/MultiTableSinkWriterTest.java +++ b/seatunnel-api/src/test/java/org/apache/seatunnel/api/sink/multitablesink/MultiTableSinkWriterTest.java @@ -57,7 +57,7 @@ public void testPrepareCommitState() throws IOException { DefaultSerializer defaultSerializer = new DefaultSerializer<>(); for (int i = 0; i < 100; i++) { - byte[] bytes = defaultSerializer.serialize(multiTableSinkWriter.prepareCommit().get()); + byte[] bytes = defaultSerializer.serialize(multiTableSinkWriter.prepareCommit(i).get()); defaultSerializer.deserialize(bytes); } } diff --git a/seatunnel-common/src/main/java/org/apache/seatunnel/common/Handover.java b/seatunnel-common/src/main/java/org/apache/seatunnel/common/Handover.java index 1686514a15b..3132d93a169 100644 --- a/seatunnel-common/src/main/java/org/apache/seatunnel/common/Handover.java +++ b/seatunnel-common/src/main/java/org/apache/seatunnel/common/Handover.java @@ -30,7 +30,10 @@ public final class Handover implements Closeable { new LinkedBlockingQueue<>(DEFAULT_QUEUE_SIZE); private Throwable error; - public boolean isEmpty() { + public boolean isEmpty() throws Exception { + if (error != null) { + rethrowException(error, error.getMessage()); + } return blockingQueue.isEmpty(); } diff --git a/seatunnel-common/src/main/java/org/apache/seatunnel/common/config/Common.java b/seatunnel-common/src/main/java/org/apache/seatunnel/common/config/Common.java index 95928d1e4cc..0ebdc341fac 100644 --- a/seatunnel-common/src/main/java/org/apache/seatunnel/common/config/Common.java +++ b/seatunnel-common/src/main/java/org/apache/seatunnel/common/config/Common.java @@ -39,6 +39,8 @@ public class Common { + private static final String FLINK_YARN_APPLICATION_PATH = "runtime.tar.gz"; + private Common() { throw new IllegalStateException("Utility class"); } @@ -113,8 +115,10 @@ public static Path appRootDir() { } catch (URISyntaxException e) { throw new RuntimeException(e); } - } else if (DeployMode.CLUSTER == MODE || DeployMode.RUN_APPLICATION == MODE) { + } else if (DeployMode.CLUSTER == MODE) { return Paths.get(""); + } else if (DeployMode.RUN_APPLICATION == MODE) { + return Paths.get(FLINK_YARN_APPLICATION_PATH); } else { throw new IllegalStateException("deploy mode not support : " + MODE); } diff --git a/seatunnel-common/src/main/java/org/apache/seatunnel/common/utils/FileUtils.java b/seatunnel-common/src/main/java/org/apache/seatunnel/common/utils/FileUtils.java index bfdda942755..279c4bf4cad 100644 --- a/seatunnel-common/src/main/java/org/apache/seatunnel/common/utils/FileUtils.java +++ b/seatunnel-common/src/main/java/org/apache/seatunnel/common/utils/FileUtils.java @@ -37,6 +37,7 @@ import java.nio.file.Paths; import java.util.Arrays; import java.util.List; +import java.util.Objects; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -194,4 +195,31 @@ private static void deleteFiles(@NonNull File file) { throw CommonError.fileOperationFailed("SeaTunnel", "delete", file.toString(), e); } } + + public static List listFile(String dirPath) { + try { + File file = new File(dirPath); + if (file.isDirectory()) { + File[] files = file.listFiles(); + if (files == null) { + return null; + } + return Arrays.stream(files) + .map( + currFile -> { + if (currFile.isDirectory()) { + return null; + } else { + return Arrays.asList(currFile); + } + }) + .filter(Objects::nonNull) + .flatMap(List::stream) + .collect(Collectors.toList()); + } + return Arrays.asList(file); + } catch (Exception e) { + throw CommonError.fileOperationFailed("SeaTunnel", "list", dirPath, e); + } + } } diff --git a/seatunnel-common/src/test/java/org/apache/seatunnel/common/HandoverTest.java b/seatunnel-common/src/test/java/org/apache/seatunnel/common/HandoverTest.java new file mode 100644 index 00000000000..199a2d4e723 --- /dev/null +++ b/seatunnel-common/src/test/java/org/apache/seatunnel/common/HandoverTest.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.common; + +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + +public class HandoverTest { + + @Test + public void testThrowExceptionWhenQueueIsEmtpy() { + Handover handover = new Handover<>(); + handover.reportError(new RuntimeException("test")); + Assertions.assertThrows(RuntimeException.class, handover::isEmpty); + } +} diff --git a/seatunnel-config/seatunnel-config-base/pom.xml b/seatunnel-config/seatunnel-config-base/pom.xml index 6c75e35cbd0..5610cab85e5 100644 --- a/seatunnel-config/seatunnel-config-base/pom.xml +++ b/seatunnel-config/seatunnel-config-base/pom.xml @@ -69,11 +69,29 @@ com/typesafe/config/ConfigParseOptions.class com/typesafe/config/ConfigMergeable.class com/typesafe/config/impl/ConfigParser.class + com/typesafe/config/impl/ConfigParser$1.class + com/typesafe/config/impl/ConfigParser$ParseContext.class com/typesafe/config/impl/ConfigNodePath.class com/typesafe/config/impl/PathParser.class + com/typesafe/config/impl/PathParser$Element.class com/typesafe/config/impl/Path.class com/typesafe/config/impl/SimpleConfigObject.class + com/typesafe/config/impl/SimpleConfigObject$1.class + com/typesafe/config/impl/SimpleConfigObject$RenderComparator.class + com/typesafe/config/impl/SimpleConfigObject$ResolveModifier.class com/typesafe/config/impl/PropertiesParser.class + com/typesafe/config/impl/PropertiesParser$1.class + com/typesafe/config/impl/ConfigImpl.class + com/typesafe/config/impl/ConfigImpl$1.class + com/typesafe/config/impl/ConfigImpl$ClasspathNameSource.class + com/typesafe/config/impl/ConfigImpl$ClasspathNameSourceWithClass.class + com/typesafe/config/impl/ConfigImpl$DebugHolder.class + com/typesafe/config/impl/ConfigImpl$DefaultIncluderHolder.class + com/typesafe/config/impl/ConfigImpl$EnvVariablesHolder.class + com/typesafe/config/impl/ConfigImpl$FileNameSource.class + com/typesafe/config/impl/ConfigImpl$LoaderCache.class + com/typesafe/config/impl/ConfigImpl$LoaderCacheHolder.class + com/typesafe/config/impl/ConfigImpl$SystemPropertiesHolder.class diff --git a/seatunnel-config/seatunnel-config-shade/src/main/java/org/apache/seatunnel/shade/com/typesafe/config/impl/ConfigImpl.java b/seatunnel-config/seatunnel-config-shade/src/main/java/org/apache/seatunnel/shade/com/typesafe/config/impl/ConfigImpl.java new file mode 100644 index 00000000000..f078897ed2d --- /dev/null +++ b/seatunnel-config/seatunnel-config-shade/src/main/java/org/apache/seatunnel/shade/com/typesafe/config/impl/ConfigImpl.java @@ -0,0 +1,471 @@ +/* + * Copyright (C) 2011-2012 Typesafe Inc. + */ + +package org.apache.seatunnel.shade.com.typesafe.config.impl; + +import org.apache.seatunnel.shade.com.typesafe.config.Config; +import org.apache.seatunnel.shade.com.typesafe.config.ConfigException; +import org.apache.seatunnel.shade.com.typesafe.config.ConfigIncluder; +import org.apache.seatunnel.shade.com.typesafe.config.ConfigMemorySize; +import org.apache.seatunnel.shade.com.typesafe.config.ConfigObject; +import org.apache.seatunnel.shade.com.typesafe.config.ConfigOrigin; +import org.apache.seatunnel.shade.com.typesafe.config.ConfigParseOptions; +import org.apache.seatunnel.shade.com.typesafe.config.ConfigParseable; +import org.apache.seatunnel.shade.com.typesafe.config.ConfigValue; + +import java.io.File; +import java.lang.ref.WeakReference; +import java.net.URL; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.concurrent.Callable; + +/** + * Internal implementation detail, not ABI stable, do not touch. For use only by the {@link + * com.typesafe.config} package. + */ +public class ConfigImpl { + + private static class LoaderCache { + private Config currentSystemProperties; + private WeakReference currentLoader; + private Map cache; + + LoaderCache() { + this.currentSystemProperties = null; + this.currentLoader = new WeakReference(null); + this.cache = new LinkedHashMap(); + } + + // for now, caching as long as the loader remains the same, + // drop entire cache if it changes. + synchronized Config getOrElseUpdate( + ClassLoader loader, String key, Callable updater) { + if (loader != currentLoader.get()) { + // reset the cache if we start using a different loader + cache.clear(); + currentLoader = new WeakReference(loader); + } + + Config systemProperties = systemPropertiesAsConfig(); + if (systemProperties != currentSystemProperties) { + cache.clear(); + currentSystemProperties = systemProperties; + } + + Config config = cache.get(key); + if (config == null) { + try { + config = updater.call(); + } catch (RuntimeException e) { + throw e; // this will include ConfigException + } catch (Exception e) { + throw new ConfigException.Generic(e.getMessage(), e); + } + if (config == null) + throw new ConfigException.BugOrBroken("null config from cache updater"); + cache.put(key, config); + } + + return config; + } + } + + private static class LoaderCacheHolder { + static final LoaderCache cache = new LoaderCache(); + } + + public static Config computeCachedConfig( + ClassLoader loader, String key, Callable updater) { + LoaderCache cache; + try { + cache = LoaderCacheHolder.cache; + } catch (ExceptionInInitializerError e) { + throw ConfigImplUtil.extractInitializerError(e); + } + return cache.getOrElseUpdate(loader, key, updater); + } + + static class FileNameSource implements SimpleIncluder.NameSource { + @Override + public ConfigParseable nameToParseable(String name, ConfigParseOptions parseOptions) { + return Parseable.newFile(new File(name), parseOptions); + } + }; + + static class ClasspathNameSource implements SimpleIncluder.NameSource { + @Override + public ConfigParseable nameToParseable(String name, ConfigParseOptions parseOptions) { + return Parseable.newResources(name, parseOptions); + } + }; + + static class ClasspathNameSourceWithClass implements SimpleIncluder.NameSource { + private final Class klass; + + public ClasspathNameSourceWithClass(Class klass) { + this.klass = klass; + } + + @Override + public ConfigParseable nameToParseable(String name, ConfigParseOptions parseOptions) { + return Parseable.newResources(klass, name, parseOptions); + } + }; + + public static ConfigObject parseResourcesAnySyntax( + Class klass, String resourceBasename, ConfigParseOptions baseOptions) { + SimpleIncluder.NameSource source = new ClasspathNameSourceWithClass(klass); + return SimpleIncluder.fromBasename(source, resourceBasename, baseOptions); + } + + public static ConfigObject parseResourcesAnySyntax( + String resourceBasename, ConfigParseOptions baseOptions) { + SimpleIncluder.NameSource source = new ClasspathNameSource(); + return SimpleIncluder.fromBasename(source, resourceBasename, baseOptions); + } + + public static ConfigObject parseFileAnySyntax(File basename, ConfigParseOptions baseOptions) { + SimpleIncluder.NameSource source = new FileNameSource(); + return SimpleIncluder.fromBasename(source, basename.getPath(), baseOptions); + } + + static AbstractConfigObject emptyObject(String originDescription) { + ConfigOrigin origin = + originDescription != null ? SimpleConfigOrigin.newSimple(originDescription) : null; + return emptyObject(origin); + } + + public static Config emptyConfig(String originDescription) { + return emptyObject(originDescription).toConfig(); + } + + static AbstractConfigObject empty(ConfigOrigin origin) { + return emptyObject(origin); + } + + // default origin for values created with fromAnyRef and no origin specified + private static final ConfigOrigin defaultValueOrigin = + SimpleConfigOrigin.newSimple("hardcoded value"); + private static final ConfigBoolean defaultTrueValue = + new ConfigBoolean(defaultValueOrigin, true); + private static final ConfigBoolean defaultFalseValue = + new ConfigBoolean(defaultValueOrigin, false); + private static final ConfigNull defaultNullValue = new ConfigNull(defaultValueOrigin); + private static final SimpleConfigList defaultEmptyList = + new SimpleConfigList(defaultValueOrigin, Collections.emptyList()); + private static final SimpleConfigObject defaultEmptyObject = + SimpleConfigObject.empty(defaultValueOrigin); + + private static SimpleConfigList emptyList(ConfigOrigin origin) { + if (origin == null || origin == defaultValueOrigin) return defaultEmptyList; + else return new SimpleConfigList(origin, Collections.emptyList()); + } + + private static AbstractConfigObject emptyObject(ConfigOrigin origin) { + // we want null origin to go to SimpleConfigObject.empty() to get the + // origin "empty config" rather than "hardcoded value" + if (origin == defaultValueOrigin) return defaultEmptyObject; + else return SimpleConfigObject.empty(origin); + } + + private static ConfigOrigin valueOrigin(String originDescription) { + if (originDescription == null) return defaultValueOrigin; + else return SimpleConfigOrigin.newSimple(originDescription); + } + + public static ConfigValue fromAnyRef(Object object, String originDescription) { + ConfigOrigin origin = valueOrigin(originDescription); + return fromAnyRef(object, origin, FromMapMode.KEYS_ARE_KEYS); + } + + public static ConfigObject fromPathMap( + Map pathMap, String originDescription) { + ConfigOrigin origin = valueOrigin(originDescription); + return (ConfigObject) fromAnyRef(pathMap, origin, FromMapMode.KEYS_ARE_PATHS); + } + + static AbstractConfigValue fromAnyRef(Object object, ConfigOrigin origin, FromMapMode mapMode) { + if (origin == null) throw new ConfigException.BugOrBroken("origin not supposed to be null"); + + if (object == null) { + if (origin != defaultValueOrigin) return new ConfigNull(origin); + else return defaultNullValue; + } else if (object instanceof AbstractConfigValue) { + return (AbstractConfigValue) object; + } else if (object instanceof Boolean) { + if (origin != defaultValueOrigin) { + return new ConfigBoolean(origin, (Boolean) object); + } else if ((Boolean) object) { + return defaultTrueValue; + } else { + return defaultFalseValue; + } + } else if (object instanceof String) { + return new ConfigString.Quoted(origin, (String) object); + } else if (object instanceof Number) { + // here we always keep the same type that was passed to us, + // rather than figuring out if a Long would fit in an Int + // or a Double has no fractional part. i.e. deliberately + // not using ConfigNumber.newNumber() when we have a + // Double, Integer, or Long. + if (object instanceof Double) { + return new ConfigDouble(origin, (Double) object, null); + } else if (object instanceof Integer) { + return new ConfigInt(origin, (Integer) object, null); + } else if (object instanceof Long) { + return new ConfigLong(origin, (Long) object, null); + } else { + return ConfigNumber.newNumber(origin, ((Number) object).doubleValue(), null); + } + } else if (object instanceof Duration) { + return new ConfigLong(origin, ((Duration) object).toMillis(), null); + } else if (object instanceof Map) { + if (((Map) object).isEmpty()) return emptyObject(origin); + + if (mapMode == FromMapMode.KEYS_ARE_KEYS) { + Map values = + new LinkedHashMap(); + for (Map.Entry entry : ((Map) object).entrySet()) { + Object key = entry.getKey(); + if (!(key instanceof String)) + throw new ConfigException.BugOrBroken( + "bug in method caller: not valid to create ConfigObject from map with non-String key: " + + key); + AbstractConfigValue value = fromAnyRef(entry.getValue(), origin, mapMode); + values.put((String) key, value); + } + + return new SimpleConfigObject(origin, values); + } else { + return PropertiesParser.fromPathMap(origin, (Map) object); + } + } else if (object instanceof Iterable) { + Iterator i = ((Iterable) object).iterator(); + if (!i.hasNext()) return emptyList(origin); + + List values = new ArrayList(); + while (i.hasNext()) { + AbstractConfigValue v = fromAnyRef(i.next(), origin, mapMode); + values.add(v); + } + + return new SimpleConfigList(origin, values); + } else if (object instanceof ConfigMemorySize) { + return new ConfigLong(origin, ((ConfigMemorySize) object).toBytes(), null); + } else { + throw new ConfigException.BugOrBroken( + "bug in method caller: not valid to create ConfigValue from: " + object); + } + } + + private static class DefaultIncluderHolder { + static final ConfigIncluder defaultIncluder = new SimpleIncluder(null); + } + + static ConfigIncluder defaultIncluder() { + try { + return DefaultIncluderHolder.defaultIncluder; + } catch (ExceptionInInitializerError e) { + throw ConfigImplUtil.extractInitializerError(e); + } + } + + private static Properties getSystemProperties() { + // Avoid ConcurrentModificationException due to parallel setting of system properties by + // copying properties + final Properties systemProperties = System.getProperties(); + final Properties systemPropertiesCopy = new Properties(); + synchronized (systemProperties) { + systemPropertiesCopy.putAll(systemProperties); + } + return systemPropertiesCopy; + } + + private static AbstractConfigObject loadSystemProperties() { + return (AbstractConfigObject) + Parseable.newProperties( + getSystemProperties(), + ConfigParseOptions.defaults() + .setOriginDescription("system properties")) + .parse(); + } + + private static class SystemPropertiesHolder { + // this isn't final due to the reloadSystemPropertiesConfig() hack below + static volatile AbstractConfigObject systemProperties = loadSystemProperties(); + } + + static AbstractConfigObject systemPropertiesAsConfigObject() { + try { + return SystemPropertiesHolder.systemProperties; + } catch (ExceptionInInitializerError e) { + throw ConfigImplUtil.extractInitializerError(e); + } + } + + public static Config systemPropertiesAsConfig() { + return systemPropertiesAsConfigObject().toConfig(); + } + + public static void reloadSystemPropertiesConfig() { + // ConfigFactory.invalidateCaches() relies on this having the side + // effect that it drops all caches + SystemPropertiesHolder.systemProperties = loadSystemProperties(); + } + + private static AbstractConfigObject loadEnvVariables() { + return PropertiesParser.fromStringMap(newSimpleOrigin("env variables"), System.getenv()); + } + + private static class EnvVariablesHolder { + static volatile AbstractConfigObject envVariables = loadEnvVariables(); + } + + static AbstractConfigObject envVariablesAsConfigObject() { + try { + return EnvVariablesHolder.envVariables; + } catch (ExceptionInInitializerError e) { + throw ConfigImplUtil.extractInitializerError(e); + } + } + + public static Config envVariablesAsConfig() { + return envVariablesAsConfigObject().toConfig(); + } + + public static void reloadEnvVariablesConfig() { + // ConfigFactory.invalidateCaches() relies on this having the side + // effect that it drops all caches + EnvVariablesHolder.envVariables = loadEnvVariables(); + } + + public static Config defaultReference(final ClassLoader loader) { + return computeCachedConfig( + loader, + "defaultReference", + new Callable() { + @Override + public Config call() { + Config unresolvedResources = + Parseable.newResources( + "reference.conf", + ConfigParseOptions.defaults() + .setClassLoader(loader)) + .parse() + .toConfig(); + return systemPropertiesAsConfig() + .withFallback(unresolvedResources) + .resolve(); + } + }); + } + + private static class DebugHolder { + private static String LOADS = "loads"; + private static String SUBSTITUTIONS = "substitutions"; + + private static Map loadDiagnostics() { + Map result = new LinkedHashMap(); + result.put(LOADS, false); + result.put(SUBSTITUTIONS, false); + + // People do -Dconfig.trace=foo,bar to enable tracing of different things + String s = System.getProperty("config.trace"); + if (s == null) { + return result; + } else { + String[] keys = s.split(","); + for (String k : keys) { + if (k.equals(LOADS)) { + result.put(LOADS, true); + } else if (k.equals(SUBSTITUTIONS)) { + result.put(SUBSTITUTIONS, true); + } else { + System.err.println( + "config.trace property contains unknown trace topic '" + k + "'"); + } + } + return result; + } + } + + private static final Map diagnostics = loadDiagnostics(); + + private static final boolean traceLoadsEnabled = diagnostics.get(LOADS); + private static final boolean traceSubstitutionsEnabled = diagnostics.get(SUBSTITUTIONS); + + static boolean traceLoadsEnabled() { + return traceLoadsEnabled; + } + + static boolean traceSubstitutionsEnabled() { + return traceSubstitutionsEnabled; + } + } + + public static boolean traceLoadsEnabled() { + try { + return DebugHolder.traceLoadsEnabled(); + } catch (ExceptionInInitializerError e) { + throw ConfigImplUtil.extractInitializerError(e); + } + } + + public static boolean traceSubstitutionsEnabled() { + try { + return DebugHolder.traceSubstitutionsEnabled(); + } catch (ExceptionInInitializerError e) { + throw ConfigImplUtil.extractInitializerError(e); + } + } + + public static void trace(String message) { + System.err.println(message); + } + + public static void trace(int indentLevel, String message) { + while (indentLevel > 0) { + System.err.print(" "); + indentLevel -= 1; + } + System.err.println(message); + } + + // the basic idea here is to add the "what" and have a canonical + // toplevel error message. the "original" exception may however have extra + // detail about what happened. call this if you have a better "what" than + // further down on the stack. + static ConfigException.NotResolved improveNotResolved( + Path what, ConfigException.NotResolved original) { + String newMessage = + what.render() + + " has not been resolved, you need to call Config#resolve()," + + " see API docs for Config#resolve()"; + if (newMessage.equals(original.getMessage())) return original; + else return new ConfigException.NotResolved(newMessage, original); + } + + public static ConfigOrigin newSimpleOrigin(String description) { + if (description == null) { + return defaultValueOrigin; + } else { + return SimpleConfigOrigin.newSimple(description); + } + } + + public static ConfigOrigin newFileOrigin(String filename) { + return SimpleConfigOrigin.newFile(filename); + } + + public static ConfigOrigin newURLOrigin(URL url) { + return SimpleConfigOrigin.newURL(url); + } +} diff --git a/seatunnel-config/seatunnel-config-shade/src/main/java/org/apache/seatunnel/shade/com/typesafe/config/impl/SimpleConfigObject.java b/seatunnel-config/seatunnel-config-shade/src/main/java/org/apache/seatunnel/shade/com/typesafe/config/impl/SimpleConfigObject.java index 735df6829c9..b10148977b7 100644 --- a/seatunnel-config/seatunnel-config-shade/src/main/java/org/apache/seatunnel/shade/com/typesafe/config/impl/SimpleConfigObject.java +++ b/seatunnel-config/seatunnel-config-shade/src/main/java/org/apache/seatunnel/shade/com/typesafe/config/impl/SimpleConfigObject.java @@ -20,6 +20,7 @@ import java.util.HashSet; import java.util.Iterator; import java.util.LinkedHashMap; +import java.util.LinkedHashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -277,7 +278,7 @@ protected SimpleConfigObject mergedWithObject(AbstractConfigObject abstractFallb boolean changed = false; boolean allResolved = true; Map merged = new LinkedHashMap<>(); - Set allKeys = new HashSet<>(); + Set allKeys = new LinkedHashSet<>(); allKeys.addAll(this.keySet()); allKeys.addAll(fallback.keySet()); @@ -386,8 +387,7 @@ ResolveResult resolveSubstitutions( ResolveSource sourceWithParent = source.pushParent(this); try { - SimpleConfigObject.ResolveModifier modifier = - new SimpleConfigObject.ResolveModifier(context, sourceWithParent); + ResolveModifier modifier = new ResolveModifier(context, sourceWithParent); AbstractConfigValue value = this.modifyMayThrow(modifier); return ResolveResult.make(modifier.context, value).asObjectResult(); } catch (NotPossibleToResolve | RuntimeException var6) { @@ -562,7 +562,7 @@ public boolean containsValue(Object v) { } public Set> entrySet() { - HashSet> entries = new HashSet<>(); + HashSet> entries = new LinkedHashSet<>(); for (Entry stringAbstractConfigValueEntry : this.value.entrySet()) { @@ -584,7 +584,7 @@ public int size() { } public Collection values() { - return new HashSet<>(this.value.values()); + return new ArrayList<>(this.value.values()); } static SimpleConfigObject empty() { diff --git a/seatunnel-config/seatunnel-config-shade/src/test/java/org/apache/seatunnel/config/ConfigTest.java b/seatunnel-config/seatunnel-config-shade/src/test/java/org/apache/seatunnel/config/ConfigTest.java new file mode 100644 index 00000000000..6d8eb73ffae --- /dev/null +++ b/seatunnel-config/seatunnel-config-shade/src/test/java/org/apache/seatunnel/config/ConfigTest.java @@ -0,0 +1,43 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.config; + +import org.apache.seatunnel.shade.com.typesafe.config.Config; +import org.apache.seatunnel.shade.com.typesafe.config.ConfigFactory; +import org.apache.seatunnel.shade.com.typesafe.config.ConfigRenderOptions; + +import org.apache.seatunnel.config.utils.FileUtils; + +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + +import java.net.URISyntaxException; + +public class ConfigTest { + + @Test + public void testConfigKeyOrder() throws URISyntaxException { + String expected = + "{\"env\":{\"job.mode\":\"BATCH\"},\"source\":[{\"row.num\":100,\"schema\":{\"fields\":{\"name\":\"string\",\"age\":\"int\"}},\"plugin_name\":\"FakeSource\"}],\"sink\":[{\"plugin_name\":\"Console\"}]}"; + + Config config = + ConfigFactory.parseFile( + FileUtils.getFileFromResources("/seatunnel/serialize.conf")); + Assertions.assertEquals(expected, config.root().render(ConfigRenderOptions.concise())); + } +} diff --git a/seatunnel-config/seatunnel-config-sql/src/main/java/org/apache/seatunnel/config/sql/SqlConfigBuilder.java b/seatunnel-config/seatunnel-config-sql/src/main/java/org/apache/seatunnel/config/sql/SqlConfigBuilder.java index f0d68e089b5..00f6a40a436 100644 --- a/seatunnel-config/seatunnel-config-sql/src/main/java/org/apache/seatunnel/config/sql/SqlConfigBuilder.java +++ b/seatunnel-config/seatunnel-config-sql/src/main/java/org/apache/seatunnel/config/sql/SqlConfigBuilder.java @@ -43,7 +43,6 @@ import net.sf.jsqlparser.statement.insert.Insert; import net.sf.jsqlparser.statement.select.PlainSelect; import net.sf.jsqlparser.statement.select.Select; -import net.sf.jsqlparser.statement.select.SelectExpressionItem; import net.sf.jsqlparser.statement.select.SelectItem; import java.nio.file.Files; @@ -360,12 +359,12 @@ private static void parseInsertSql( String sourceTableName; String resultTableName; if (plainSelect.getFromItem() == null) { - List selectItems = plainSelect.getSelectItems(); + List> selectItems = plainSelect.getSelectItems(); if (selectItems.size() != 1) { throw new ParserException( "Source table must be specified in SQL: " + insertSql); } - SelectExpressionItem selectItem = (SelectExpressionItem) selectItems.get(0); + SelectItem selectItem = selectItems.get(0); Column column = (Column) selectItem.getExpression(); sourceTableName = column.getColumnName(); resultTableName = sourceTableName; diff --git a/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/schema/AbstractSchemaChangeResolver.java b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/schema/AbstractSchemaChangeResolver.java index ac86dd0d2bc..eba4d23ce80 100644 --- a/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/schema/AbstractSchemaChangeResolver.java +++ b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/schema/AbstractSchemaChangeResolver.java @@ -17,6 +17,12 @@ package org.apache.seatunnel.connectors.cdc.base.schema; +import org.apache.seatunnel.api.table.catalog.TableIdentifier; +import org.apache.seatunnel.api.table.catalog.TablePath; +import org.apache.seatunnel.api.table.event.AlterTableColumnEvent; +import org.apache.seatunnel.api.table.event.AlterTableColumnsEvent; +import org.apache.seatunnel.api.table.event.SchemaChangeEvent; +import org.apache.seatunnel.api.table.type.SeaTunnelDataType; import org.apache.seatunnel.connectors.cdc.base.config.JdbcSourceConfig; import org.apache.seatunnel.connectors.cdc.base.utils.SourceRecordUtils; @@ -25,17 +31,24 @@ import org.apache.kafka.connect.source.SourceRecord; import com.google.common.collect.Lists; +import io.debezium.relational.Tables; +import io.debezium.relational.ddl.DdlParser; import io.debezium.relational.history.HistoryRecord; +import lombok.Setter; import lombok.extern.slf4j.Slf4j; import java.util.List; +import java.util.Objects; @Slf4j public abstract class AbstractSchemaChangeResolver implements SchemaChangeResolver { protected static final List SUPPORT_DDL = Lists.newArrayList("ALTER TABLE"); - protected JdbcSourceConfig jdbcSourceConfig; + protected final JdbcSourceConfig jdbcSourceConfig; + @Setter protected transient DdlParser ddlParser; + @Setter protected transient Tables tables; + @Setter protected String sourceDialectName; public AbstractSchemaChangeResolver(JdbcSourceConfig jdbcSourceConfig) { this.jdbcSourceConfig = jdbcSourceConfig; @@ -55,4 +68,39 @@ public boolean support(SourceRecord record) { .map(String::toUpperCase) .anyMatch(prefix -> ddl.toUpperCase().contains(prefix)); } + + @Override + public SchemaChangeEvent resolve(SourceRecord record, SeaTunnelDataType dataType) { + TablePath tablePath = SourceRecordUtils.getTablePath(record); + String ddl = SourceRecordUtils.getDdl(record); + if (Objects.isNull(ddlParser)) { + this.ddlParser = createDdlParser(tablePath); + } + if (Objects.isNull(tables)) { + this.tables = new Tables(); + } + ddlParser.setCurrentDatabase(tablePath.getDatabaseName()); + ddlParser.setCurrentSchema(tablePath.getSchemaName()); + // Parse DDL statement using Debezium's Antlr parser + ddlParser.parse(ddl, tables); + List parsedEvents = getAndClearParsedEvents(); + parsedEvents.forEach(e -> e.setSourceDialectName(getSourceDialectName())); + AlterTableColumnsEvent alterTableColumnsEvent = + new AlterTableColumnsEvent( + TableIdentifier.of( + StringUtils.EMPTY, + tablePath.getDatabaseName(), + tablePath.getSchemaName(), + tablePath.getTableName()), + parsedEvents); + alterTableColumnsEvent.setStatement(ddl); + alterTableColumnsEvent.setSourceDialectName(getSourceDialectName()); + return parsedEvents.isEmpty() ? null : alterTableColumnsEvent; + } + + protected abstract DdlParser createDdlParser(TablePath tablePath); + + protected abstract List getAndClearParsedEvents(); + + protected abstract String getSourceDialectName(); } diff --git a/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/parser/SeatunnelDDLParser.java b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/parser/SeatunnelDDLParser.java new file mode 100644 index 00000000000..355df7fddef --- /dev/null +++ b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/parser/SeatunnelDDLParser.java @@ -0,0 +1,67 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.cdc.base.source.parser; + +import org.apache.seatunnel.api.table.catalog.TableIdentifier; + +import org.apache.commons.lang3.StringUtils; + +import io.debezium.relational.Column; +import io.debezium.relational.TableId; + +public interface SeatunnelDDLParser { + + /** + * @param column The column to convert + * @return The converted column in SeaTunnel format which has full type information + */ + default org.apache.seatunnel.api.table.catalog.Column toSeatunnelColumnWithFullTypeInfo( + Column column) { + org.apache.seatunnel.api.table.catalog.Column seatunnelColumn = toSeatunnelColumn(column); + String sourceColumnType = getSourceColumnTypeWithLengthScale(column); + return seatunnelColumn.reSourceType(sourceColumnType); + } + + /** + * @param column The column to convert + * @return The converted column in SeaTunnel format + */ + org.apache.seatunnel.api.table.catalog.Column toSeatunnelColumn(Column column); + + /** + * @param column The column to convert + * @return The type with length and scale + */ + default String getSourceColumnTypeWithLengthScale(Column column) { + StringBuilder sb = new StringBuilder(column.typeName()); + if (column.length() >= 0) { + sb.append('(').append(column.length()); + if (column.scale().isPresent()) { + sb.append(", ").append(column.scale().get()); + } + + sb.append(')'); + } + return sb.toString(); + } + + default TableIdentifier toTableIdentifier(TableId tableId) { + return new TableIdentifier( + StringUtils.EMPTY, tableId.catalog(), tableId.schema(), tableId.table()); + } +} diff --git a/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/reader/external/IncrementalSourceScanFetcher.java b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/reader/external/IncrementalSourceScanFetcher.java index 7f927af5878..2ed961865e9 100644 --- a/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/reader/external/IncrementalSourceScanFetcher.java +++ b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/reader/external/IncrementalSourceScanFetcher.java @@ -222,12 +222,15 @@ private void checkReadException() { @Override public void close() { try { - if (taskContext != null) { - taskContext.close(); - } + // 1. try close the split task if (snapshotSplitReadTask != null) { - snapshotSplitReadTask.shutdown(); + try { + snapshotSplitReadTask.shutdown(); + } catch (Exception e) { + log.error("Close snapshot split read task error", e); + } } + // 2. close the fetcher thread if (executorService != null) { executorService.shutdown(); if (!executorService.awaitTermination( @@ -240,6 +243,11 @@ public void close() { } } catch (Exception e) { log.error("Close scan fetcher error", e); + } finally { + // 3. close the task context + if (taskContext != null) { + taskContext.close(); + } } } diff --git a/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/reader/external/IncrementalSourceStreamFetcher.java b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/reader/external/IncrementalSourceStreamFetcher.java index 16e45376566..17536d9de09 100644 --- a/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/reader/external/IncrementalSourceStreamFetcher.java +++ b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/reader/external/IncrementalSourceStreamFetcher.java @@ -187,12 +187,15 @@ private void checkReadException() { @Override public void close() { try { - if (taskContext != null) { - taskContext.close(); - } + // 1. try close the split task if (streamFetchTask != null) { - streamFetchTask.shutdown(); + try { + streamFetchTask.shutdown(); + } catch (Exception e) { + log.error("Close stream split read task error", e); + } } + // 2. close the fetcher thread if (executorService != null) { executorService.shutdown(); if (!executorService.awaitTermination( @@ -205,6 +208,11 @@ public void close() { } } catch (Exception e) { log.error("Close stream fetcher error", e); + } finally { + // 3. close the task context + if (taskContext != null) { + taskContext.close(); + } } } diff --git a/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/utils/SourceRecordUtils.java b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/utils/SourceRecordUtils.java index 3245273ace2..f7e9577ddba 100644 --- a/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/utils/SourceRecordUtils.java +++ b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/utils/SourceRecordUtils.java @@ -36,6 +36,7 @@ import java.sql.ResultSet; import java.sql.SQLException; import java.util.Arrays; +import java.util.List; import static io.debezium.connector.AbstractSourceInfo.DATABASE_NAME_KEY; import static io.debezium.connector.AbstractSourceInfo.SCHEMA_NAME_KEY; @@ -46,8 +47,12 @@ public class SourceRecordUtils { private SourceRecordUtils() {} - public static final String SCHEMA_CHANGE_EVENT_KEY_NAME = - "io.debezium.connector.mysql.SchemaChangeKey"; + /** Todo: Support more schema change event key name, currently only support MySQL and Oracle. */ + public static final List SUPPORT_SCHEMA_CHANGE_EVENT_KEY_NAME = + Arrays.asList( + "io.debezium.connector.mysql.SchemaChangeKey", + "io.debezium.connector.oracle.SchemaChangeKey"); + public static final String HEARTBEAT_VALUE_SCHEMA_KEY_NAME = "io.debezium.connector.common.Heartbeat"; private static final DocumentReader DOCUMENT_READER = DocumentReader.defaultReader(); @@ -97,7 +102,9 @@ public static Long getFetchTimestamp(SourceRecord record) { public static boolean isSchemaChangeEvent(SourceRecord sourceRecord) { Schema keySchema = sourceRecord.keySchema(); - return keySchema != null && SCHEMA_CHANGE_EVENT_KEY_NAME.equalsIgnoreCase(keySchema.name()); + return keySchema != null + && SUPPORT_SCHEMA_CHANGE_EVENT_KEY_NAME.stream() + .anyMatch(name -> name.equalsIgnoreCase(keySchema.name())); } public static boolean isDataChangeRecord(SourceRecord record) { diff --git a/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/debezium/row/SeaTunnelRowDebeziumDeserializationConverters.java b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/debezium/row/SeaTunnelRowDebeziumDeserializationConverters.java index 0a2fb09cf8d..89b9c50c30d 100644 --- a/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/debezium/row/SeaTunnelRowDebeziumDeserializationConverters.java +++ b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/debezium/row/SeaTunnelRowDebeziumDeserializationConverters.java @@ -85,7 +85,7 @@ public SeaTunnelRow convert(SourceRecord record, Struct struct, Schema schema) if (field == null) { row.setField(i, null); } else { - Object fieldValue = struct.get(fieldName); + Object fieldValue = struct.getWithoutDefault(fieldName); Schema fieldSchema = field.schema(); Object convertedField = SeaTunnelRowDebeziumDeserializationConverters.convertField( @@ -494,11 +494,11 @@ public Object convert(Object dbzObj, Schema schema) throws Exception { SeaTunnelRow row = new SeaTunnelRow(arity); for (int i = 0; i < arity; i++) { String fieldName = fieldNames[i]; - Object fieldValue = struct.get(fieldName); Field field = schema.field(fieldName); if (field == null) { row.setField(i, null); } else { + Object fieldValue = struct.getWithoutDefault(fieldName); Schema fieldSchema = field.schema(); Object convertedField = SeaTunnelRowDebeziumDeserializationConverters.convertField( diff --git a/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/debezium/row/SeaTunnelRowDebeziumDeserializeSchema.java b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/debezium/row/SeaTunnelRowDebeziumDeserializeSchema.java index d09e7b77b5c..948e872d480 100644 --- a/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/debezium/row/SeaTunnelRowDebeziumDeserializeSchema.java +++ b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/debezium/row/SeaTunnelRowDebeziumDeserializeSchema.java @@ -117,7 +117,15 @@ public void deserialize(SourceRecord record, Collector collector) private void deserializeSchemaChangeRecord( SourceRecord record, Collector collector) { - SchemaChangeEvent schemaChangeEvent = schemaChangeResolver.resolve(record, resultTypeInfo); + SchemaChangeEvent schemaChangeEvent = null; + try { + if (schemaChangeResolver != null) { + schemaChangeEvent = schemaChangeResolver.resolve(record, resultTypeInfo); + } + } catch (Exception e) { + log.warn("Failed to resolve schemaChangeEvent, just skip.", e); + return; + } if (schemaChangeEvent == null) { log.warn("Unsupported resolve schemaChangeEvent {}, just skip.", record); return; diff --git a/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/test/java/org/apache/seatunnel/connectors/cdc/base/source/reader/external/IncrementalSourceStreamFetcherTest.java b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/test/java/org/apache/seatunnel/connectors/cdc/base/source/reader/external/IncrementalSourceStreamFetcherTest.java index ee8d4d7e5df..23906ae6f47 100644 --- a/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/test/java/org/apache/seatunnel/connectors/cdc/base/source/reader/external/IncrementalSourceStreamFetcherTest.java +++ b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/test/java/org/apache/seatunnel/connectors/cdc/base/source/reader/external/IncrementalSourceStreamFetcherTest.java @@ -50,6 +50,7 @@ import java.util.concurrent.atomic.AtomicReference; import static io.debezium.config.CommonConnectorConfig.TRANSACTION_TOPIC; +import static io.debezium.connector.AbstractSourceInfo.DEBEZIUM_CONNECTOR_KEY; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; @@ -336,7 +337,22 @@ static SourceRecord createSchemaChangeUnknownEvent() { static SourceRecord createSchemaChangeEvent(String topic) { Schema keySchema = - SchemaBuilder.struct().name(SourceRecordUtils.SCHEMA_CHANGE_EVENT_KEY_NAME).build(); + SchemaBuilder.struct().name("io.debezium.connector.mysql.SchemaChangeKey").build(); + Schema valueKeySchema = + SchemaBuilder.struct() + .name("io.debezium.connector.mysql.Source") + .field(DEBEZIUM_CONNECTOR_KEY, Schema.STRING_SCHEMA) + .build(); + Struct valueValues = new Struct(valueKeySchema); + valueValues.put(DEBEZIUM_CONNECTOR_KEY, "mysql"); + + Schema valueSchema = + SchemaBuilder.struct() + .field(Envelope.FieldName.SOURCE, valueKeySchema) + .name("") + .build(); + Struct value = new Struct(valueSchema); + value.put(valueSchema.field(Envelope.FieldName.SOURCE), valueValues); SourceRecord record = new SourceRecord( Collections.emptyMap(), @@ -344,8 +360,8 @@ static SourceRecord createSchemaChangeEvent(String topic) { topic, keySchema, null, - null, - null); + valueSchema, + value); Assertions.assertTrue(SourceRecordUtils.isSchemaChangeEvent(record)); return record; } diff --git a/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/test/java/org/apache/seatunnel/connectors/cdc/debezium/row/SeaTunnelRowDebeziumDeserializationConvertersTest.java b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/test/java/org/apache/seatunnel/connectors/cdc/debezium/row/SeaTunnelRowDebeziumDeserializationConvertersTest.java new file mode 100644 index 00000000000..74e832d6e0f --- /dev/null +++ b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/test/java/org/apache/seatunnel/connectors/cdc/debezium/row/SeaTunnelRowDebeziumDeserializationConvertersTest.java @@ -0,0 +1,78 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.cdc.debezium.row; + +import org.apache.seatunnel.api.table.type.BasicType; +import org.apache.seatunnel.api.table.type.SeaTunnelDataType; +import org.apache.seatunnel.api.table.type.SeaTunnelRow; +import org.apache.seatunnel.api.table.type.SeaTunnelRowType; +import org.apache.seatunnel.connectors.cdc.debezium.DebeziumDeserializationConverterFactory; +import org.apache.seatunnel.connectors.cdc.debezium.MetadataConverter; + +import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.connect.data.SchemaBuilder; +import org.apache.kafka.connect.data.Struct; +import org.apache.kafka.connect.source.SourceRecord; + +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + +import java.time.ZoneId; +import java.util.ArrayList; +import java.util.HashMap; + +public class SeaTunnelRowDebeziumDeserializationConvertersTest { + + @Test + void testDefaultValueNotUsed() throws Exception { + SeaTunnelRowDebeziumDeserializationConverters converters = + new SeaTunnelRowDebeziumDeserializationConverters( + new SeaTunnelRowType( + new String[] {"id", "name"}, + new SeaTunnelDataType[] { + BasicType.INT_TYPE, BasicType.STRING_TYPE + }), + new MetadataConverter[] {}, + ZoneId.systemDefault(), + DebeziumDeserializationConverterFactory.DEFAULT); + Schema schema = + SchemaBuilder.struct() + .field("id", SchemaBuilder.int32().build()) + .field("name", SchemaBuilder.string().defaultValue("UL")) + .build(); + Struct value = new Struct(schema); + // the value of `name` is null, so do not put value for it + value.put("id", 1); + SourceRecord record = + new SourceRecord( + new HashMap<>(), + new HashMap<>(), + "topicName", + null, + SchemaBuilder.int32().build(), + 1, + schema, + value, + null, + new ArrayList<>()); + + SeaTunnelRow row = converters.convert(record, value, schema); + Assertions.assertEquals(row.getField(0), 1); + Assertions.assertNull(row.getField(1)); + } +} diff --git a/seatunnel-connectors-v2/connector-cdc/connector-cdc-mongodb/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/mongodb/MongodbIncrementalSourceFactory.java b/seatunnel-connectors-v2/connector-cdc/connector-cdc-mongodb/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/mongodb/MongodbIncrementalSourceFactory.java index ede71f0f792..03b3e1c9ba3 100644 --- a/seatunnel-connectors-v2/connector-cdc/connector-cdc-mongodb/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/mongodb/MongodbIncrementalSourceFactory.java +++ b/seatunnel-connectors-v2/connector-cdc/connector-cdc-mongodb/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/mongodb/MongodbIncrementalSourceFactory.java @@ -22,6 +22,9 @@ import org.apache.seatunnel.api.source.SourceSplit; import org.apache.seatunnel.api.table.catalog.CatalogTable; import org.apache.seatunnel.api.table.catalog.CatalogTableUtil; +import org.apache.seatunnel.api.table.catalog.TableIdentifier; +import org.apache.seatunnel.api.table.catalog.TablePath; +import org.apache.seatunnel.api.table.catalog.schema.TableSchemaOptions; import org.apache.seatunnel.api.table.connector.TableSource; import org.apache.seatunnel.api.table.factory.Factory; import org.apache.seatunnel.api.table.factory.TableSourceFactory; @@ -31,11 +34,16 @@ import org.apache.seatunnel.connectors.cdc.base.option.SourceOptions; import org.apache.seatunnel.connectors.cdc.base.option.StartupMode; import org.apache.seatunnel.connectors.seatunnel.cdc.mongodb.config.MongodbSourceOptions; +import org.apache.seatunnel.connectors.seatunnel.cdc.mongodb.exception.MongodbConnectorException; import com.google.auto.service.AutoService; import java.io.Serializable; import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static org.apache.seatunnel.common.exception.CommonErrorCodeDeprecated.ILLEGAL_ARGUMENT; @AutoService(Factory.class) public class MongodbIncrementalSourceFactory implements TableSourceFactory { @@ -50,7 +58,8 @@ public OptionRule optionRule() { .required( MongodbSourceOptions.HOSTS, MongodbSourceOptions.DATABASE, - MongodbSourceOptions.COLLECTION) + MongodbSourceOptions.COLLECTION, + TableSchemaOptions.SCHEMA) .optional( MongodbSourceOptions.USERNAME, MongodbSourceOptions.PASSWORD, @@ -79,9 +88,28 @@ public Class getSourceClass() { public TableSource createSource(TableSourceFactoryContext context) { return () -> { - List catalogTables = + List configCatalog = CatalogTableUtil.getCatalogTables( context.getOptions(), context.getClassLoader()); + List collections = context.getOptions().get(MongodbSourceOptions.COLLECTION); + if (collections.size() != configCatalog.size()) { + throw new MongodbConnectorException( + ILLEGAL_ARGUMENT, + "The number of collections must be equal to the number of schema tables"); + } + List catalogTables = + IntStream.range(0, configCatalog.size()) + .mapToObj( + i -> { + CatalogTable catalogTable = configCatalog.get(i); + String fullName = collections.get(i); + TableIdentifier tableIdentifier = + TableIdentifier.of( + catalogTable.getCatalogName(), + TablePath.of(fullName)); + return CatalogTable.of(tableIdentifier, catalogTable); + }) + .collect(Collectors.toList()); SeaTunnelDataType dataType = CatalogTableUtil.convertToMultipleRowType(catalogTables); return (SeaTunnelSource) diff --git a/seatunnel-connectors-v2/connector-cdc/connector-cdc-mongodb/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/mongodb/sender/MongoDBConnectorDeserializationSchema.java b/seatunnel-connectors-v2/connector-cdc/connector-cdc-mongodb/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/mongodb/sender/MongoDBConnectorDeserializationSchema.java index 8ce920e8416..4811217cf4c 100644 --- a/seatunnel-connectors-v2/connector-cdc/connector-cdc-mongodb/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/mongodb/sender/MongoDBConnectorDeserializationSchema.java +++ b/seatunnel-connectors-v2/connector-cdc/connector-cdc-mongodb/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/mongodb/sender/MongoDBConnectorDeserializationSchema.java @@ -17,7 +17,10 @@ package org.apache.seatunnel.connectors.seatunnel.cdc.mongodb.sender; +import org.apache.seatunnel.shade.com.google.common.annotations.VisibleForTesting; + import org.apache.seatunnel.api.source.Collector; +import org.apache.seatunnel.api.table.catalog.TablePath; import org.apache.seatunnel.api.table.type.ArrayType; import org.apache.seatunnel.api.table.type.DecimalType; import org.apache.seatunnel.api.table.type.MapType; @@ -62,10 +65,13 @@ import static org.apache.seatunnel.common.exception.CommonErrorCodeDeprecated.ILLEGAL_ARGUMENT; import static org.apache.seatunnel.common.exception.CommonErrorCodeDeprecated.UNSUPPORTED_DATA_TYPE; import static org.apache.seatunnel.common.exception.CommonErrorCodeDeprecated.UNSUPPORTED_OPERATION; +import static org.apache.seatunnel.connectors.seatunnel.cdc.mongodb.config.MongodbSourceOptions.COLL_FIELD; +import static org.apache.seatunnel.connectors.seatunnel.cdc.mongodb.config.MongodbSourceOptions.DB_FIELD; import static org.apache.seatunnel.connectors.seatunnel.cdc.mongodb.config.MongodbSourceOptions.DEFAULT_JSON_WRITER_SETTINGS; import static org.apache.seatunnel.connectors.seatunnel.cdc.mongodb.config.MongodbSourceOptions.DOCUMENT_KEY; import static org.apache.seatunnel.connectors.seatunnel.cdc.mongodb.config.MongodbSourceOptions.ENCODE_VALUE_FIELD; import static org.apache.seatunnel.connectors.seatunnel.cdc.mongodb.config.MongodbSourceOptions.FULL_DOCUMENT; +import static org.apache.seatunnel.connectors.seatunnel.cdc.mongodb.config.MongodbSourceOptions.NS_FIELD; import static org.apache.seatunnel.connectors.seatunnel.cdc.mongodb.utils.MongodbRecordUtils.extractBsonDocument; import static org.apache.seatunnel.shade.com.google.common.base.Preconditions.checkNotNull; @@ -169,8 +175,16 @@ private SeaTunnelRow extractRowData( } private String extractTableId(SourceRecord record) { - // TODO extract table id from record - return null; + Struct messageStruct = (Struct) record.value(); + Struct nsStruct = (Struct) messageStruct.get(NS_FIELD); + String databaseName = nsStruct.getString(DB_FIELD); + String tableName = nsStruct.getString(COLL_FIELD); + return TablePath.of(databaseName, null, tableName).toString(); + } + + @VisibleForTesting + public String extractTableIdForTest(SourceRecord record) { + return extractTableId(record); } // ------------------------------------------------------------------------------------- diff --git a/seatunnel-connectors-v2/connector-cdc/connector-cdc-mongodb/src/test/java/mongodb/sender/MongoDBConnectorDeserializationSchemaTest.java b/seatunnel-connectors-v2/connector-cdc/connector-cdc-mongodb/src/test/java/mongodb/sender/MongoDBConnectorDeserializationSchemaTest.java new file mode 100644 index 00000000000..7fba3213f62 --- /dev/null +++ b/seatunnel-connectors-v2/connector-cdc/connector-cdc-mongodb/src/test/java/mongodb/sender/MongoDBConnectorDeserializationSchemaTest.java @@ -0,0 +1,113 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package mongodb.sender; + +import org.apache.seatunnel.api.table.catalog.CatalogTable; +import org.apache.seatunnel.api.table.catalog.CatalogTableUtil; +import org.apache.seatunnel.api.table.catalog.PhysicalColumn; +import org.apache.seatunnel.api.table.catalog.TableIdentifier; +import org.apache.seatunnel.api.table.catalog.TableSchema; +import org.apache.seatunnel.api.table.type.BasicType; +import org.apache.seatunnel.api.table.type.SeaTunnelDataType; +import org.apache.seatunnel.api.table.type.SeaTunnelRow; +import org.apache.seatunnel.connectors.seatunnel.cdc.mongodb.sender.MongoDBConnectorDeserializationSchema; +import org.apache.seatunnel.connectors.seatunnel.cdc.mongodb.utils.MongodbRecordUtils; + +import org.apache.kafka.connect.source.SourceRecord; + +import org.bson.BsonDocument; +import org.bson.BsonInt64; +import org.bson.BsonString; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + +import java.util.Collections; +import java.util.Map; + +import static org.apache.seatunnel.connectors.seatunnel.cdc.mongodb.config.MongodbSourceOptions.COLL_FIELD; +import static org.apache.seatunnel.connectors.seatunnel.cdc.mongodb.config.MongodbSourceOptions.DB_FIELD; +import static org.apache.seatunnel.connectors.seatunnel.cdc.mongodb.config.MongodbSourceOptions.DOCUMENT_KEY; +import static org.apache.seatunnel.connectors.seatunnel.cdc.mongodb.config.MongodbSourceOptions.FULL_DOCUMENT; +import static org.apache.seatunnel.connectors.seatunnel.cdc.mongodb.config.MongodbSourceOptions.ID_FIELD; +import static org.apache.seatunnel.connectors.seatunnel.cdc.mongodb.config.MongodbSourceOptions.NS_FIELD; +import static org.apache.seatunnel.connectors.seatunnel.cdc.mongodb.config.MongodbSourceOptions.OPERATION_TYPE; +import static org.apache.seatunnel.connectors.seatunnel.cdc.mongodb.config.MongodbSourceOptions.OPERATION_TYPE_INSERT; +import static org.apache.seatunnel.connectors.seatunnel.cdc.mongodb.config.MongodbSourceOptions.SNAPSHOT_FIELD; +import static org.apache.seatunnel.connectors.seatunnel.cdc.mongodb.config.MongodbSourceOptions.SNAPSHOT_TRUE; +import static org.apache.seatunnel.connectors.seatunnel.cdc.mongodb.config.MongodbSourceOptions.SOURCE_FIELD; +import static org.apache.seatunnel.connectors.seatunnel.cdc.mongodb.config.MongodbSourceOptions.TS_MS_FIELD; +import static org.apache.seatunnel.connectors.seatunnel.cdc.mongodb.utils.MongodbRecordUtils.createSourceOffsetMap; + +public class MongoDBConnectorDeserializationSchemaTest { + + @Test + public void extractTableId() { + CatalogTable catalogTable = + CatalogTable.of( + TableIdentifier.of("catalog", "database", "table"), + TableSchema.builder() + .column( + PhysicalColumn.of( + "name1", BasicType.STRING_TYPE, 1L, true, null, "")) + .column( + PhysicalColumn.of( + "name1", BasicType.STRING_TYPE, 1L, true, null, "")) + .build(), + Collections.emptyMap(), + Collections.emptyList(), + "comment"); + SeaTunnelDataType dataType = + CatalogTableUtil.convertToMultipleRowType(Collections.singletonList(catalogTable)); + MongoDBConnectorDeserializationSchema schema = + new MongoDBConnectorDeserializationSchema(dataType, dataType); + + // Build SourceRecord + Map partitionMap = + MongodbRecordUtils.createPartitionMap("localhost:27017", "inventory", "products"); + + BsonDocument valueDocument = + new BsonDocument() + .append( + ID_FIELD, + new BsonDocument(ID_FIELD, new BsonInt64(10000000000001L))) + .append(OPERATION_TYPE, new BsonString(OPERATION_TYPE_INSERT)) + .append( + NS_FIELD, + new BsonDocument(DB_FIELD, new BsonString("inventory")) + .append(COLL_FIELD, new BsonString("products"))) + .append( + DOCUMENT_KEY, + new BsonDocument(ID_FIELD, new BsonInt64(10000000000001L))) + .append(FULL_DOCUMENT, new BsonDocument()) + .append(TS_MS_FIELD, new BsonInt64(System.currentTimeMillis())) + .append( + SOURCE_FIELD, + new BsonDocument(SNAPSHOT_FIELD, new BsonString(SNAPSHOT_TRUE)) + .append(TS_MS_FIELD, new BsonInt64(0L))); + BsonDocument keyDocument = new BsonDocument(ID_FIELD, valueDocument.get(ID_FIELD)); + SourceRecord sourceRecord = + MongodbRecordUtils.buildSourceRecord( + partitionMap, + createSourceOffsetMap(keyDocument.getDocument(ID_FIELD), true), + "inventory.products", + keyDocument, + valueDocument); + Object tableId = schema.extractTableIdForTest(sourceRecord); + Assertions.assertEquals("inventory.products", tableId); + } +} diff --git a/seatunnel-connectors-v2/connector-cdc/connector-cdc-mysql/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/mysql/source/MySqlSchemaChangeResolver.java b/seatunnel-connectors-v2/connector-cdc/connector-cdc-mysql/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/mysql/source/MySqlSchemaChangeResolver.java index 3ea4a0dfcea..7420f91eea7 100644 --- a/seatunnel-connectors-v2/connector-cdc/connector-cdc-mysql/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/mysql/source/MySqlSchemaChangeResolver.java +++ b/seatunnel-connectors-v2/connector-cdc/connector-cdc-mysql/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/mysql/source/MySqlSchemaChangeResolver.java @@ -17,64 +17,37 @@ package org.apache.seatunnel.connectors.seatunnel.cdc.mysql.source; -import org.apache.seatunnel.api.table.catalog.TableIdentifier; import org.apache.seatunnel.api.table.catalog.TablePath; import org.apache.seatunnel.api.table.event.AlterTableColumnEvent; -import org.apache.seatunnel.api.table.event.AlterTableColumnsEvent; -import org.apache.seatunnel.api.table.event.SchemaChangeEvent; -import org.apache.seatunnel.api.table.type.SeaTunnelDataType; import org.apache.seatunnel.connectors.cdc.base.config.JdbcSourceConfig; import org.apache.seatunnel.connectors.cdc.base.config.SourceConfig; import org.apache.seatunnel.connectors.cdc.base.schema.AbstractSchemaChangeResolver; -import org.apache.seatunnel.connectors.cdc.base.utils.SourceRecordUtils; import org.apache.seatunnel.connectors.seatunnel.cdc.mysql.source.parser.CustomMySqlAntlrDdlParser; import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.DatabaseIdentifier; -import org.apache.commons.lang3.StringUtils; -import org.apache.kafka.connect.source.SourceRecord; - -import io.debezium.relational.Tables; +import io.debezium.relational.ddl.DdlParser; import java.util.List; -import java.util.Objects; public class MySqlSchemaChangeResolver extends AbstractSchemaChangeResolver { - private transient Tables tables; - private transient CustomMySqlAntlrDdlParser customMySqlAntlrDdlParser; public MySqlSchemaChangeResolver(SourceConfig.Factory sourceConfigFactory) { super(sourceConfigFactory.create(0)); } @Override - public SchemaChangeEvent resolve(SourceRecord record, SeaTunnelDataType dataType) { - TablePath tablePath = SourceRecordUtils.getTablePath(record); - String ddl = SourceRecordUtils.getDdl(record); - if (Objects.isNull(customMySqlAntlrDdlParser)) { - this.customMySqlAntlrDdlParser = - new CustomMySqlAntlrDdlParser( - tablePath, this.jdbcSourceConfig.getDbzConnectorConfig()); - } - if (Objects.isNull(tables)) { - this.tables = new Tables(); - } - customMySqlAntlrDdlParser.setCurrentDatabase(tablePath.getDatabaseName()); - customMySqlAntlrDdlParser.setCurrentSchema(tablePath.getSchemaName()); - // Parse DDL statement using Debezium's Antlr parser - customMySqlAntlrDdlParser.parse(ddl, tables); - List parsedEvents = - customMySqlAntlrDdlParser.getAndClearParsedEvents(); - parsedEvents.forEach(e -> e.setSourceDialectName(DatabaseIdentifier.MYSQL)); - AlterTableColumnsEvent alterTableColumnsEvent = - new AlterTableColumnsEvent( - TableIdentifier.of( - StringUtils.EMPTY, - tablePath.getDatabaseName(), - tablePath.getSchemaName(), - tablePath.getTableName()), - parsedEvents); - alterTableColumnsEvent.setStatement(ddl); - alterTableColumnsEvent.setSourceDialectName(DatabaseIdentifier.MYSQL); - return parsedEvents.isEmpty() ? null : alterTableColumnsEvent; + protected DdlParser createDdlParser(TablePath tablePath) { + return new CustomMySqlAntlrDdlParser( + tablePath, this.jdbcSourceConfig.getDbzConnectorConfig()); + } + + @Override + protected List getAndClearParsedEvents() { + return ((CustomMySqlAntlrDdlParser) ddlParser).getAndClearParsedEvents(); + } + + @Override + protected String getSourceDialectName() { + return DatabaseIdentifier.MYSQL; } } diff --git a/seatunnel-connectors-v2/connector-cdc/connector-cdc-mysql/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/mysql/source/parser/CustomAlterTableParserListener.java b/seatunnel-connectors-v2/connector-cdc/connector-cdc-mysql/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/mysql/source/parser/CustomAlterTableParserListener.java index bf36d7831ee..2a1e9b2762f 100644 --- a/seatunnel-connectors-v2/connector-cdc/connector-cdc-mysql/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/mysql/source/parser/CustomAlterTableParserListener.java +++ b/seatunnel-connectors-v2/connector-cdc/connector-cdc-mysql/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/mysql/source/parser/CustomAlterTableParserListener.java @@ -23,6 +23,7 @@ import org.apache.seatunnel.api.table.event.AlterTableColumnEvent; import org.apache.seatunnel.api.table.event.AlterTableDropColumnEvent; import org.apache.seatunnel.api.table.event.AlterTableModifyColumnEvent; +import org.apache.seatunnel.connectors.cdc.base.source.parser.SeatunnelDDLParser; import org.apache.seatunnel.connectors.seatunnel.cdc.mysql.utils.MySqlTypeUtils; import org.apache.commons.lang3.StringUtils; @@ -40,7 +41,8 @@ import java.util.LinkedList; import java.util.List; -public class CustomAlterTableParserListener extends MySqlParserBaseListener { +public class CustomAlterTableParserListener extends MySqlParserBaseListener + implements SeatunnelDDLParser { private static final int STARTING_INDEX = 1; private final MySqlAntlrDdlParser parser; private final List listeners; @@ -95,9 +97,7 @@ public void exitAlterByAddColumn(MySqlParser.AlterByAddColumnContext ctx) { () -> { Column column = columnDefinitionListener.getColumn(); org.apache.seatunnel.api.table.catalog.Column seatunnelColumn = - toSeatunnelColumn(column); - String sourceColumnType = getSourceColumnType(column); - seatunnelColumn = seatunnelColumn.reSourceType(sourceColumnType); + toSeatunnelColumnWithFullTypeInfo(column); if (ctx.FIRST() != null) { AlterTableAddColumnEvent alterTableAddColumnEvent = AlterTableAddColumnEvent.addFirst(tableIdentifier, seatunnelColumn); @@ -153,9 +153,7 @@ public void exitAlterByModifyColumn(MySqlParser.AlterByModifyColumnContext ctx) () -> { Column column = columnDefinitionListener.getColumn(); org.apache.seatunnel.api.table.catalog.Column seatunnelColumn = - toSeatunnelColumn(column); - String sourceColumnType = getSourceColumnType(column); - seatunnelColumn = seatunnelColumn.reSourceType(sourceColumnType); + toSeatunnelColumnWithFullTypeInfo(column); if (ctx.FIRST() != null) { AlterTableModifyColumnEvent alterTableModifyColumnEvent = AlterTableModifyColumnEvent.modifyFirst( @@ -197,9 +195,7 @@ public void exitAlterByChangeColumn(MySqlParser.AlterByChangeColumnContext ctx) () -> { Column column = columnDefinitionListener.getColumn(); org.apache.seatunnel.api.table.catalog.Column seatunnelColumn = - toSeatunnelColumn(column); - String sourceColumnType = getSourceColumnType(column); - seatunnelColumn = seatunnelColumn.reSourceType(sourceColumnType); + toSeatunnelColumnWithFullTypeInfo(column); String oldColumnName = column.name(); String newColumnName = parser.parseName(ctx.newColumn); seatunnelColumn = seatunnelColumn.rename(newColumnName); @@ -223,24 +219,8 @@ public void enterAlterByDropColumn(MySqlParser.AlterByDropColumnContext ctx) { super.enterAlterByDropColumn(ctx); } - private org.apache.seatunnel.api.table.catalog.Column toSeatunnelColumn(Column column) { + @Override + public org.apache.seatunnel.api.table.catalog.Column toSeatunnelColumn(Column column) { return MySqlTypeUtils.convertToSeaTunnelColumn(column, dbzConnectorConfig); } - - private TableIdentifier toTableIdentifier(TableId tableId) { - return new TableIdentifier("", tableId.catalog(), tableId.schema(), tableId.table()); - } - - private String getSourceColumnType(Column column) { - StringBuilder sb = new StringBuilder(column.typeName()); - if (column.length() >= 0) { - sb.append('(').append(column.length()); - if (column.scale().isPresent()) { - sb.append(", ").append(column.scale().get()); - } - - sb.append(')'); - } - return sb.toString(); - } } diff --git a/seatunnel-connectors-v2/connector-cdc/connector-cdc-mysql/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/mysql/utils/MySqlTypeUtils.java b/seatunnel-connectors-v2/connector-cdc/connector-cdc-mysql/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/mysql/utils/MySqlTypeUtils.java index 22f9514c6f3..fd85258eb3c 100644 --- a/seatunnel-connectors-v2/connector-cdc/connector-cdc-mysql/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/mysql/utils/MySqlTypeUtils.java +++ b/seatunnel-connectors-v2/connector-cdc/connector-cdc-mysql/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/mysql/utils/MySqlTypeUtils.java @@ -30,7 +30,6 @@ import io.debezium.relational.RelationalDatabaseConnectorConfig; import lombok.extern.slf4j.Slf4j; -import java.util.Objects; import java.util.Optional; /** Utilities for converting from MySQL types to SeaTunnel types. */ @@ -70,7 +69,6 @@ public static org.apache.seatunnel.api.table.catalog.Column convertToSeaTunnelCo Optional defaultValueExpression = column.defaultValueExpression(); Object defaultValue = defaultValueExpression.orElse(null); if (defaultValueExpression.isPresent() - && Objects.nonNull(defaultValue) && !MysqlDefaultValueUtils.isSpecialDefaultValue(defaultValue)) { defaultValue = mySqlDefaultValueConverter @@ -82,11 +80,14 @@ public static org.apache.seatunnel.api.table.catalog.Column convertToSeaTunnelCo .name(column.name()) .columnType(column.typeName()) .dataType(column.typeName()) - .length((long) column.length()) - .precision((long) column.length()) .scale(column.scale().orElse(0)) .nullable(column.isOptional()) .defaultValue(defaultValue); + + if (column.length() >= 0) { + builder.length((long) column.length()).precision((long) column.length()); + } + switch (column.typeName().toUpperCase()) { case MySqlTypeConverter.MYSQL_CHAR: case MySqlTypeConverter.MYSQL_VARCHAR: diff --git a/seatunnel-connectors-v2/connector-cdc/connector-cdc-oracle/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/oracle/source/OracleIncrementalSource.java b/seatunnel-connectors-v2/connector-cdc/connector-cdc-oracle/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/oracle/source/OracleIncrementalSource.java index a1bbd0cb25c..0a82803e0cb 100644 --- a/seatunnel-connectors-v2/connector-cdc/connector-cdc-oracle/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/oracle/source/OracleIncrementalSource.java +++ b/seatunnel-connectors-v2/connector-cdc/connector-cdc-oracle/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/oracle/source/OracleIncrementalSource.java @@ -28,6 +28,7 @@ import org.apache.seatunnel.connectors.cdc.base.config.SourceConfig; import org.apache.seatunnel.connectors.cdc.base.dialect.DataSourceDialect; import org.apache.seatunnel.connectors.cdc.base.option.JdbcSourceOptions; +import org.apache.seatunnel.connectors.cdc.base.option.SourceOptions; import org.apache.seatunnel.connectors.cdc.base.option.StartupMode; import org.apache.seatunnel.connectors.cdc.base.option.StopMode; import org.apache.seatunnel.connectors.cdc.base.source.IncrementalSource; @@ -100,20 +101,30 @@ public DebeziumDeserializationSchema createDebeziumDeserializationSchema( ReadonlyConfig config) { // todo:table metadata change reservation Map tableIdStructMap = tableChanges(); + Map debeziumProperties = config.get(SourceOptions.DEBEZIUM_PROPERTIES); if (DeserializeFormat.COMPATIBLE_DEBEZIUM_JSON.equals( config.get(JdbcSourceOptions.FORMAT))) { return (DebeziumDeserializationSchema) - new DebeziumJsonDeserializeSchema( - config.get(JdbcSourceOptions.DEBEZIUM_PROPERTIES)); + new DebeziumJsonDeserializeSchema(debeziumProperties); } SeaTunnelDataType physicalRowType = dataType; String zoneId = config.get(JdbcSourceOptions.SERVER_TIME_ZONE); + + boolean enableDDL = + Boolean.parseBoolean( + debeziumProperties.getOrDefault("include.schema.changes", "false")); + return (DebeziumDeserializationSchema) SeaTunnelRowDebeziumDeserializeSchema.builder() .setPhysicalRowType(physicalRowType) .setResultTypeInfo(physicalRowType) .setServerTimeZone(ZoneId.of(zoneId)) + .setSchemaChangeResolver( + enableDDL + ? new OracleSchemaChangeResolver( + createSourceConfigFactory(config)) + : null) .build(); } diff --git a/seatunnel-connectors-v2/connector-cdc/connector-cdc-oracle/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/oracle/source/OracleSchemaChangeResolver.java b/seatunnel-connectors-v2/connector-cdc/connector-cdc-oracle/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/oracle/source/OracleSchemaChangeResolver.java new file mode 100644 index 00000000000..326209eec44 --- /dev/null +++ b/seatunnel-connectors-v2/connector-cdc/connector-cdc-oracle/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/oracle/source/OracleSchemaChangeResolver.java @@ -0,0 +1,51 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.seatunnel.cdc.oracle.source; + +import org.apache.seatunnel.api.table.catalog.TablePath; +import org.apache.seatunnel.api.table.event.AlterTableColumnEvent; +import org.apache.seatunnel.connectors.cdc.base.config.JdbcSourceConfig; +import org.apache.seatunnel.connectors.cdc.base.config.SourceConfig; +import org.apache.seatunnel.connectors.cdc.base.schema.AbstractSchemaChangeResolver; +import org.apache.seatunnel.connectors.seatunnel.cdc.oracle.source.parser.CustomOracleAntlrDdlParser; +import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.DatabaseIdentifier; + +import io.debezium.relational.ddl.DdlParser; + +import java.util.List; + +public class OracleSchemaChangeResolver extends AbstractSchemaChangeResolver { + public OracleSchemaChangeResolver(SourceConfig.Factory sourceConfigFactory) { + super(sourceConfigFactory.create(0)); + } + + @Override + protected DdlParser createDdlParser(TablePath tablePath) { + return new CustomOracleAntlrDdlParser(tablePath); + } + + @Override + protected List getAndClearParsedEvents() { + return ((CustomOracleAntlrDdlParser) ddlParser).getAndClearParsedEvents(); + } + + @Override + protected String getSourceDialectName() { + return DatabaseIdentifier.ORACLE; + } +} diff --git a/seatunnel-connectors-v2/connector-cdc/connector-cdc-oracle/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/oracle/source/parser/BaseParserListener.java b/seatunnel-connectors-v2/connector-cdc/connector-cdc-oracle/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/oracle/source/parser/BaseParserListener.java new file mode 100644 index 00000000000..cba04a72625 --- /dev/null +++ b/seatunnel-connectors-v2/connector-cdc/connector-cdc-oracle/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/oracle/source/parser/BaseParserListener.java @@ -0,0 +1,75 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.seatunnel.cdc.oracle.source.parser; + +import io.debezium.ddl.parser.oracle.generated.PlSqlParser; +import io.debezium.ddl.parser.oracle.generated.PlSqlParserBaseListener; + +public class BaseParserListener extends PlSqlParserBaseListener { + + /** + * Resolves a table or column name from the provided string. + * + *

Oracle table and column names are inherently stored in upper-case; however, if the objects + * are created using double-quotes, the case of the object name is retained. Therefore when + * needing to parse a table or column name, this method will adhere to those rules and will + * always return the name in upper-case unless the provided name is double-quoted in which the + * returned value will have the double-quotes removed and case retained. + * + * @param name table or column name + * @return parsed table or column name from the supplied name argument + */ + private static String getTableOrColumnName(String name) { + return removeQuotes(name, true); + } + + /** + * Removes leading and trailing double quote characters from the provided string. + * + * @param text value to have double quotes removed + * @param upperCaseIfNotQuoted control if returned string is upper-cased if not quoted + * @return string that has had quotes removed + */ + @SuppressWarnings("SameParameterValue") + private static String removeQuotes(String text, boolean upperCaseIfNotQuoted) { + if (text != null && text.length() > 2 && text.startsWith("\"") && text.endsWith("\"")) { + return text.substring(1, text.length() - 1); + } + return (upperCaseIfNotQuoted && text != null) ? text.toUpperCase() : text; + } + + String getColumnName(final PlSqlParser.Column_nameContext ctx) { + final String columnName; + if (ctx.id_expression() != null && !ctx.id_expression().isEmpty()) { + columnName = + getTableOrColumnName( + ctx.id_expression(ctx.id_expression().size() - 1).getText()); + } else { + columnName = getTableOrColumnName(ctx.identifier().id_expression().getText()); + } + return columnName; + } + + String getColumnName(final PlSqlParser.Old_column_nameContext ctx) { + return getTableOrColumnName(ctx.getText()); + } + + String getColumnName(final PlSqlParser.New_column_nameContext ctx) { + return getTableOrColumnName(ctx.getText()); + } +} diff --git a/seatunnel-connectors-v2/connector-cdc/connector-cdc-oracle/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/oracle/source/parser/CustomAlterTableParserListener.java b/seatunnel-connectors-v2/connector-cdc/connector-cdc-oracle/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/oracle/source/parser/CustomAlterTableParserListener.java new file mode 100644 index 00000000000..7ebc7b49fff --- /dev/null +++ b/seatunnel-connectors-v2/connector-cdc/connector-cdc-oracle/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/oracle/source/parser/CustomAlterTableParserListener.java @@ -0,0 +1,231 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.seatunnel.cdc.oracle.source.parser; + +import org.apache.seatunnel.api.table.catalog.PhysicalColumn; +import org.apache.seatunnel.api.table.catalog.TableIdentifier; +import org.apache.seatunnel.api.table.event.AlterTableAddColumnEvent; +import org.apache.seatunnel.api.table.event.AlterTableChangeColumnEvent; +import org.apache.seatunnel.api.table.event.AlterTableColumnEvent; +import org.apache.seatunnel.api.table.event.AlterTableDropColumnEvent; +import org.apache.seatunnel.api.table.event.AlterTableModifyColumnEvent; +import org.apache.seatunnel.connectors.cdc.base.source.parser.SeatunnelDDLParser; +import org.apache.seatunnel.connectors.seatunnel.cdc.oracle.utils.OracleTypeUtils; + +import org.apache.commons.lang3.StringUtils; + +import org.antlr.v4.runtime.tree.ParseTreeListener; + +import io.debezium.ddl.parser.oracle.generated.PlSqlParser; +import io.debezium.relational.Column; +import io.debezium.relational.ColumnEditor; +import io.debezium.relational.TableId; +import lombok.extern.slf4j.Slf4j; + +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; + +@Slf4j +public class CustomAlterTableParserListener extends BaseParserListener + implements SeatunnelDDLParser { + + private static final int STARTING_INDEX = 0; + private CustomOracleAntlrDdlParser parser; + private final List listeners; + private CustomColumnDefinitionParserListener columnDefinitionListener; + private List columnEditors; + private int parsingColumnIndex = STARTING_INDEX; + + private final LinkedList changes; + private TableIdentifier tableIdentifier; + + public CustomAlterTableParserListener( + CustomOracleAntlrDdlParser parser, + List listeners, + LinkedList changes) { + this.parser = parser; + this.listeners = listeners; + this.changes = changes; + } + + @Override + public void enterAlter_table(PlSqlParser.Alter_tableContext ctx) { + TableId tableId = this.parser.parseQualifiedTableId(); + this.tableIdentifier = toTableIdentifier(tableId); + super.enterAlter_table(ctx); + } + + @Override + public void exitAlter_table(PlSqlParser.Alter_tableContext ctx) { + listeners.remove(columnDefinitionListener); + super.exitAlter_table(ctx); + } + + @Override + public void enterAdd_column_clause(PlSqlParser.Add_column_clauseContext ctx) { + List columns = ctx.column_definition(); + columnEditors = new ArrayList<>(columns.size()); + for (PlSqlParser.Column_definitionContext column : columns) { + String columnName = getColumnName(column.column_name()); + ColumnEditor editor = Column.editor().name(columnName); + columnEditors.add(editor); + } + columnDefinitionListener = new CustomColumnDefinitionParserListener(); + listeners.add(columnDefinitionListener); + super.enterAdd_column_clause(ctx); + } + + @Override + public void exitAdd_column_clause(PlSqlParser.Add_column_clauseContext ctx) { + columnEditors.forEach( + columnEditor -> { + Column column = columnEditor.create(); + org.apache.seatunnel.api.table.catalog.Column seaTunnelColumn = + toSeatunnelColumnWithFullTypeInfo(column); + AlterTableAddColumnEvent addEvent = + AlterTableAddColumnEvent.add(tableIdentifier, seaTunnelColumn); + changes.add(addEvent); + }); + listeners.remove(columnDefinitionListener); + columnDefinitionListener = null; + super.exitAdd_column_clause(ctx); + } + + @Override + public void enterModify_column_clauses(PlSqlParser.Modify_column_clausesContext ctx) { + List columns = ctx.modify_col_properties(); + columnEditors = new ArrayList<>(columns.size()); + for (PlSqlParser.Modify_col_propertiesContext column : columns) { + String columnName = getColumnName(column.column_name()); + ColumnEditor editor = Column.editor().name(columnName); + columnEditors.add(editor); + } + columnDefinitionListener = new CustomColumnDefinitionParserListener(); + listeners.add(columnDefinitionListener); + super.enterModify_column_clauses(ctx); + } + + @Override + public void exitModify_column_clauses(PlSqlParser.Modify_column_clausesContext ctx) { + parser.runIfNotNull( + () -> { + Column column = columnDefinitionListener.getColumn(); + org.apache.seatunnel.api.table.catalog.Column seaTunnelColumn = + toSeatunnelColumnWithFullTypeInfo(column); + AlterTableModifyColumnEvent alterTableModifyColumnEvent = + AlterTableModifyColumnEvent.modify(tableIdentifier, seaTunnelColumn); + changes.add(alterTableModifyColumnEvent); + listeners.remove(columnDefinitionListener); + columnDefinitionListener = null; + super.exitModify_column_clauses(ctx); + }, + columnDefinitionListener); + } + + @Override + public void enterModify_col_properties(PlSqlParser.Modify_col_propertiesContext ctx) { + parser.runIfNotNull( + () -> { + // column editor list is not null when a multiple columns are parsed in one + // statement + if (columnEditors.size() > parsingColumnIndex) { + // assign next column editor to parse another column definition + columnDefinitionListener.setColumnEditor( + columnEditors.get(parsingColumnIndex++)); + } + }, + columnEditors); + super.enterModify_col_properties(ctx); + } + + @Override + public void exitModify_col_properties(PlSqlParser.Modify_col_propertiesContext ctx) { + parser.runIfNotNull( + () -> { + if (columnEditors.size() == parsingColumnIndex) { + // all columns parsed + // reset global variables for next parsed statement + parsingColumnIndex = STARTING_INDEX; + } + }, + columnEditors); + super.exitModify_col_properties(ctx); + } + + @Override + public void enterColumn_definition(PlSqlParser.Column_definitionContext ctx) { + parser.runIfNotNull( + () -> { + // column editor list is not null when a multiple columns are parsed in one + // statement + if (columnEditors.size() > parsingColumnIndex) { + // assign next column editor to parse another column definition + columnDefinitionListener.setColumnEditor( + columnEditors.get(parsingColumnIndex++)); + } + }, + columnEditors); + } + + @Override + public void exitColumn_definition(PlSqlParser.Column_definitionContext ctx) { + parser.runIfNotNull( + () -> { + if (columnEditors.size() == parsingColumnIndex) { + // all columns parsed + // reset global variables for next parsed statement + parsingColumnIndex = STARTING_INDEX; + } + }, + columnEditors); + super.exitColumn_definition(ctx); + } + + @Override + public void enterDrop_column_clause(PlSqlParser.Drop_column_clauseContext ctx) { + List columnNameContexts = ctx.column_name(); + columnEditors = new ArrayList<>(columnNameContexts.size()); + for (PlSqlParser.Column_nameContext columnNameContext : columnNameContexts) { + String columnName = getColumnName(columnNameContext); + AlterTableDropColumnEvent alterTableDropColumnEvent = + new AlterTableDropColumnEvent(tableIdentifier, columnName); + changes.add(alterTableDropColumnEvent); + } + super.enterDrop_column_clause(ctx); + } + + @Override + public void enterRename_column_clause(PlSqlParser.Rename_column_clauseContext ctx) { + String oldColumnName = getColumnName(ctx.old_column_name()); + String newColumnName = getColumnName(ctx.new_column_name()); + PhysicalColumn newColumn = PhysicalColumn.builder().name(newColumnName).build(); + AlterTableChangeColumnEvent alterTableChangeColumnEvent = + AlterTableChangeColumnEvent.change(tableIdentifier, oldColumnName, newColumn); + if (StringUtils.isNotBlank(newColumnName) + && !StringUtils.equals(oldColumnName, newColumnName)) { + changes.add(alterTableChangeColumnEvent); + } + super.enterRename_column_clause(ctx); + } + + @Override + public org.apache.seatunnel.api.table.catalog.Column toSeatunnelColumn(Column column) { + return OracleTypeUtils.convertToSeaTunnelColumn(column); + } +} diff --git a/seatunnel-connectors-v2/connector-cdc/connector-cdc-oracle/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/oracle/source/parser/CustomColumnDefinitionParserListener.java b/seatunnel-connectors-v2/connector-cdc/connector-cdc-oracle/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/oracle/source/parser/CustomColumnDefinitionParserListener.java new file mode 100644 index 00000000000..12e02dbd281 --- /dev/null +++ b/seatunnel-connectors-v2/connector-cdc/connector-cdc-oracle/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/oracle/source/parser/CustomColumnDefinitionParserListener.java @@ -0,0 +1,281 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.seatunnel.cdc.oracle.source.parser; + +import io.debezium.ddl.parser.oracle.generated.PlSqlParser; +import io.debezium.relational.Column; +import io.debezium.relational.ColumnEditor; +import lombok.Getter; +import lombok.Setter; +import oracle.jdbc.OracleTypes; + +import java.sql.Types; + +@Getter +@Setter +public class CustomColumnDefinitionParserListener extends BaseParserListener { + private ColumnEditor columnEditor; + + public CustomColumnDefinitionParserListener() {} + + @Override + public void enterColumn_definition(PlSqlParser.Column_definitionContext ctx) { + if (columnEditor != null) { + resolveColumnDataType(ctx); + if (ctx.DEFAULT() != null) { + this.columnEditor.defaultValueExpression(ctx.column_default_value().getText()); + } + } + super.enterColumn_definition(ctx); + } + + @Override + public void enterModify_col_properties(PlSqlParser.Modify_col_propertiesContext ctx) { + if (columnEditor != null) { + resolveColumnDataType(ctx); + if (ctx.DEFAULT() != null) { + columnEditor.defaultValueExpression(ctx.column_default_value().getText()); + } + } + super.enterModify_col_properties(ctx); + } + + // todo use dataTypeResolver instead + private void resolveColumnDataType(PlSqlParser.Column_definitionContext ctx) { + columnEditor.name(getColumnName(ctx.column_name())); + + boolean hasNotNullConstraint = + ctx.inline_constraint().stream().anyMatch(c -> c.NOT() != null); + columnEditor.optional(!hasNotNullConstraint); + + if (ctx.datatype() == null) { + if (ctx.type_name() != null + && "MDSYS.SDO_GEOMETRY" + .equalsIgnoreCase(ctx.type_name().getText().replace("\"", ""))) { + columnEditor.jdbcType(Types.STRUCT).type("MDSYS.SDO_GEOMETRY"); + } + } else { + resolveColumnDataType(ctx.datatype()); + } + } + + private void resolveColumnDataType(PlSqlParser.Modify_col_propertiesContext ctx) { + columnEditor.name(getColumnName(ctx.column_name())); + + resolveColumnDataType(ctx.datatype()); + + boolean hasNullConstraint = + ctx.inline_constraint().stream().anyMatch(c -> c.NULL_() != null); + boolean hasNotNullConstraint = + ctx.inline_constraint().stream().anyMatch(c -> c.NOT() != null); + if (hasNotNullConstraint && columnEditor.isOptional()) { + columnEditor.optional(false); + } else if (hasNullConstraint && !columnEditor.isOptional()) { + columnEditor.optional(true); + } + } + + private void resolveColumnDataType(PlSqlParser.DatatypeContext ctx) { + // If the context is null, there is nothing this method can resolve and it is safe to return + if (ctx == null) { + return; + } + + if (ctx.native_datatype_element() != null) { + PlSqlParser.Precision_partContext precisionPart = ctx.precision_part(); + if (ctx.native_datatype_element().INT() != null + || ctx.native_datatype_element().INTEGER() != null + || ctx.native_datatype_element().SMALLINT() != null + || ctx.native_datatype_element().NUMERIC() != null + || ctx.native_datatype_element().DECIMAL() != null) { + // NUMERIC and DECIMAL types have by default zero scale + columnEditor.jdbcType(Types.NUMERIC).type("NUMBER"); + + if (precisionPart != null) { + setPrecision(precisionPart, columnEditor); + setScale(precisionPart, columnEditor); + } + } else if (ctx.native_datatype_element().DATE() != null) { + // JDBC driver reports type as timestamp but name DATE + columnEditor.jdbcType(Types.TIMESTAMP).type("DATE"); + } else if (ctx.native_datatype_element().TIMESTAMP() != null) { + if (ctx.WITH() != null && ctx.TIME() != null && ctx.ZONE() != null) { + if (ctx.LOCAL() != null) { + columnEditor + .jdbcType(OracleTypes.TIMESTAMPLTZ) + .type("TIMESTAMP WITH LOCAL TIME ZONE"); + } else { + columnEditor + .jdbcType(OracleTypes.TIMESTAMPTZ) + .type("TIMESTAMP WITH TIME ZONE"); + } + } else { + columnEditor.jdbcType(Types.TIMESTAMP).type("TIMESTAMP"); + } + + if (precisionPart == null) { + columnEditor.length(6); + } else { + setPrecision(precisionPart, columnEditor); + } + } + // VARCHAR is the same as VARCHAR2 in Oracle + else if (ctx.native_datatype_element().VARCHAR2() != null + || ctx.native_datatype_element().VARCHAR() != null) { + columnEditor.jdbcType(Types.VARCHAR).type("VARCHAR2"); + + if (precisionPart == null) { + columnEditor.length(getVarCharDefaultLength()); + } else { + setPrecision(precisionPart, columnEditor); + } + } else if (ctx.native_datatype_element().NVARCHAR2() != null) { + columnEditor.jdbcType(Types.NVARCHAR).type("NVARCHAR2"); + + if (precisionPart == null) { + columnEditor.length(getVarCharDefaultLength()); + } else { + setPrecision(precisionPart, columnEditor); + } + } else if (ctx.native_datatype_element().CHAR() != null) { + columnEditor.jdbcType(Types.CHAR).type("CHAR").length(1); + + if (precisionPart != null) { + setPrecision(precisionPart, columnEditor); + } + } else if (ctx.native_datatype_element().NCHAR() != null) { + columnEditor.jdbcType(Types.NCHAR).type("NCHAR").length(1); + + if (precisionPart != null) { + setPrecision(precisionPart, columnEditor); + } + } else if (ctx.native_datatype_element().BINARY_FLOAT() != null) { + columnEditor.jdbcType(OracleTypes.BINARY_FLOAT).type("BINARY_FLOAT"); + } else if (ctx.native_datatype_element().BINARY_DOUBLE() != null) { + columnEditor.jdbcType(OracleTypes.BINARY_DOUBLE).type("BINARY_DOUBLE"); + } + // PRECISION keyword is mandatory + else if (ctx.native_datatype_element().FLOAT() != null + || (ctx.native_datatype_element().DOUBLE() != null + && ctx.native_datatype_element().PRECISION() != null)) { + columnEditor.jdbcType(Types.FLOAT).type("FLOAT"); + + // TODO float's precision is about bits not decimal digits; should be ok for now to + // over-size + if (precisionPart != null) { + setPrecision(precisionPart, columnEditor); + } + } else if (ctx.native_datatype_element().REAL() != null) { + columnEditor + .jdbcType(Types.FLOAT) + .type("FLOAT") + // TODO float's precision is about bits not decimal digits; should be ok for + // now to over-size + .length(63); + } else if (ctx.native_datatype_element().NUMBER() != null) { + columnEditor.jdbcType(Types.NUMERIC).type("NUMBER"); + + if (precisionPart != null) { + if (precisionPart.ASTERISK() != null) { + // when asterisk is used, explicitly set precision to 38 + columnEditor.length(38); + } else { + setPrecision(precisionPart, columnEditor); + } + setScale(precisionPart, columnEditor); + } + } else if (ctx.native_datatype_element().BLOB() != null) { + columnEditor.jdbcType(Types.BLOB).type("BLOB"); + } else if (ctx.native_datatype_element().CLOB() != null) { + columnEditor.jdbcType(Types.CLOB).type("CLOB"); + } else if (ctx.native_datatype_element().NCLOB() != null) { + columnEditor.jdbcType(Types.NCLOB).type("NCLOB"); + } else if (ctx.native_datatype_element().RAW() != null) { + columnEditor.jdbcType(OracleTypes.RAW).type("RAW"); + + setPrecision(precisionPart, columnEditor); + } else if (ctx.native_datatype_element().SDO_GEOMETRY() != null) { + // Allows the registration of new SDO_GEOMETRY columns via an CREATE/ALTER TABLE + // This is the same registration of the column that is resolved during JDBC metadata + // inspection. + columnEditor.jdbcType(OracleTypes.OTHER).type("SDO_GEOMETRY").length(1); + } else if (ctx.native_datatype_element().ROWID() != null) { + columnEditor.jdbcType(Types.VARCHAR).type("ROWID"); + } else { + columnEditor + .jdbcType(OracleTypes.OTHER) + .type(ctx.native_datatype_element().getText()); + } + } else if (ctx.INTERVAL() != null + && ctx.YEAR() != null + && ctx.TO() != null + && ctx.MONTH() != null) { + columnEditor.jdbcType(OracleTypes.INTERVALYM).type("INTERVAL YEAR TO MONTH").length(2); + if (!ctx.expression().isEmpty()) { + columnEditor.length(Integer.valueOf((ctx.expression(0).getText()))); + } + } else if (ctx.INTERVAL() != null + && ctx.DAY() != null + && ctx.TO() != null + && ctx.SECOND() != null) { + columnEditor + .jdbcType(OracleTypes.INTERVALDS) + .type("INTERVAL DAY TO SECOND") + .length(2) + .scale(6); + for (final PlSqlParser.ExpressionContext e : ctx.expression()) { + if (e.getSourceInterval().startsAfter(ctx.TO().getSourceInterval())) { + columnEditor.scale(Integer.valueOf(e.getText())); + } else { + columnEditor.length(Integer.valueOf(e.getText())); + } + } + if (!ctx.expression().isEmpty()) { + columnEditor.length(Integer.valueOf((ctx.expression(0).getText()))); + } + } else { + columnEditor.jdbcType(OracleTypes.OTHER).type(ctx.getText()); + } + } + + public Column getColumn() { + return columnEditor.create(); + } + + private int getVarCharDefaultLength() { + // TODO replace with value from select name, value from v$parameter where + // name='max_string_size'; + return 4000; + } + + private void setPrecision( + PlSqlParser.Precision_partContext precisionPart, ColumnEditor columnEditor) { + columnEditor.length(Integer.valueOf(precisionPart.numeric(0).getText())); + } + + private void setScale( + PlSqlParser.Precision_partContext precisionPart, ColumnEditor columnEditor) { + if (precisionPart.numeric().size() > 1) { + columnEditor.scale(Integer.valueOf(precisionPart.numeric(1).getText())); + } else if (precisionPart.numeric_negative() != null) { + columnEditor.scale(Integer.valueOf(precisionPart.numeric_negative().getText())); + } else { + columnEditor.scale(0); + } + } +} diff --git a/seatunnel-connectors-v2/connector-cdc/connector-cdc-oracle/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/oracle/source/parser/CustomOracleAntlrDdlParser.java b/seatunnel-connectors-v2/connector-cdc/connector-cdc-oracle/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/oracle/source/parser/CustomOracleAntlrDdlParser.java new file mode 100644 index 00000000000..1d0384d7ed4 --- /dev/null +++ b/seatunnel-connectors-v2/connector-cdc/connector-cdc-oracle/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/oracle/source/parser/CustomOracleAntlrDdlParser.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.seatunnel.cdc.oracle.source.parser; + +import org.apache.seatunnel.api.table.catalog.TablePath; +import org.apache.seatunnel.api.table.event.AlterTableColumnEvent; + +import com.google.common.collect.Lists; +import io.debezium.antlr.AntlrDdlParserListener; +import io.debezium.connector.oracle.antlr.OracleDdlParser; +import io.debezium.relational.TableId; + +import java.util.LinkedList; +import java.util.List; + +/** A ddl parser that will use custom listener. */ +public class CustomOracleAntlrDdlParser extends OracleDdlParser { + + private final LinkedList parsedEvents; + + private final TablePath tablePath; + + public CustomOracleAntlrDdlParser(TablePath tablePath) { + super(); + this.tablePath = tablePath; + this.parsedEvents = new LinkedList<>(); + } + + public TableId parseQualifiedTableId() { + return new TableId( + tablePath.getDatabaseName(), tablePath.getSchemaName(), tablePath.getTableName()); + } + + @Override + protected AntlrDdlParserListener createParseTreeWalkerListener() { + return new CustomOracleAntlrDdlParserListener(this, parsedEvents); + } + + public List getAndClearParsedEvents() { + List result = Lists.newArrayList(parsedEvents); + parsedEvents.clear(); + return result; + } +} diff --git a/seatunnel-connectors-v2/connector-cdc/connector-cdc-oracle/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/oracle/source/parser/CustomOracleAntlrDdlParserListener.java b/seatunnel-connectors-v2/connector-cdc/connector-cdc-oracle/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/oracle/source/parser/CustomOracleAntlrDdlParserListener.java new file mode 100644 index 00000000000..1924861c4b0 --- /dev/null +++ b/seatunnel-connectors-v2/connector-cdc/connector-cdc-oracle/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/oracle/source/parser/CustomOracleAntlrDdlParserListener.java @@ -0,0 +1,68 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.seatunnel.cdc.oracle.source.parser; + +import org.apache.seatunnel.api.table.event.AlterTableColumnEvent; + +import org.antlr.v4.runtime.ParserRuleContext; +import org.antlr.v4.runtime.tree.ParseTreeListener; + +import io.debezium.antlr.AntlrDdlParserListener; +import io.debezium.antlr.ProxyParseTreeListenerUtil; +import io.debezium.text.ParsingException; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.LinkedList; +import java.util.List; +import java.util.concurrent.CopyOnWriteArrayList; + +public class CustomOracleAntlrDdlParserListener extends BaseParserListener + implements AntlrDdlParserListener { + + private final List listeners = new CopyOnWriteArrayList<>(); + private final Collection errors = new ArrayList<>(); + + public CustomOracleAntlrDdlParserListener( + CustomOracleAntlrDdlParser parser, LinkedList parsedEvents) { + // Currently only DDL statements that modify the table structure are supported, so add + // custom listeners to handle these events. + listeners.add(new CustomAlterTableParserListener(parser, listeners, parsedEvents)); + } + + /** + * Returns all caught errors during tree walk. + * + * @return list of Parsing exceptions + */ + @Override + public Collection getErrors() { + return Collections.emptyList(); + } + + @Override + public void enterEveryRule(ParserRuleContext ctx) { + ProxyParseTreeListenerUtil.delegateEnterRule(ctx, listeners, errors); + } + + @Override + public void exitEveryRule(ParserRuleContext ctx) { + ProxyParseTreeListenerUtil.delegateExitRule(ctx, listeners, errors); + } +} diff --git a/seatunnel-connectors-v2/connector-cdc/connector-cdc-oracle/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/oracle/utils/OracleTypeUtils.java b/seatunnel-connectors-v2/connector-cdc/connector-cdc-oracle/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/oracle/utils/OracleTypeUtils.java index 91547b17b2e..8147a187bb1 100644 --- a/seatunnel-connectors-v2/connector-cdc/connector-cdc-oracle/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/oracle/utils/OracleTypeUtils.java +++ b/seatunnel-connectors-v2/connector-cdc/connector-cdc-oracle/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/oracle/utils/OracleTypeUtils.java @@ -23,6 +23,8 @@ import io.debezium.relational.Column; +import java.util.Optional; + /** Utilities for converting from oracle types to SeaTunnel types. */ public class OracleTypeUtils { @@ -40,4 +42,33 @@ public static SeaTunnelDataType convertFromColumn(Column column) { OracleTypeConverter.INSTANCE.convert(typeDefine); return seaTunnelColumn.getDataType(); } + + public static org.apache.seatunnel.api.table.catalog.Column convertToSeaTunnelColumn( + io.debezium.relational.Column column) { + + Optional defaultValueExpression = column.defaultValueExpression(); + Object defaultValue = defaultValueExpression.orElse(null); + + BasicTypeDefine.BasicTypeDefineBuilder builder = + BasicTypeDefine.builder() + .name(column.name()) + .columnType(column.typeName()) + .dataType(column.typeName()) + .scale(column.scale().orElse(0)) + .nullable(column.isOptional()) + .defaultValue(defaultValue); + + // The default value of length in column is -1 if it is not set + if (column.length() >= 0) { + builder.length((long) column.length()).precision((long) column.length()); + } + + // TIMESTAMP or TIMESTAMP WITH TIME ZONE + // This is useful for OracleTypeConverter.convert() + if (column.typeName() != null && column.typeName().toUpperCase().startsWith("TIMESTAMP")) { + builder.scale(column.length()); + } + + return new OracleTypeConverter(false).convert(builder.build()); + } } diff --git a/seatunnel-connectors-v2/connector-cdc/connector-cdc-oracle/src/test/java/org/apache/seatunnel/connectors/seatunnel/cdc/oracle/source/parser/OracleDdlParserTest.java b/seatunnel-connectors-v2/connector-cdc/connector-cdc-oracle/src/test/java/org/apache/seatunnel/connectors/seatunnel/cdc/oracle/source/parser/OracleDdlParserTest.java new file mode 100644 index 00000000000..54986eae603 --- /dev/null +++ b/seatunnel-connectors-v2/connector-cdc/connector-cdc-oracle/src/test/java/org/apache/seatunnel/connectors/seatunnel/cdc/oracle/source/parser/OracleDdlParserTest.java @@ -0,0 +1,410 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.seatunnel.cdc.oracle.source.parser; + +import org.apache.seatunnel.api.table.catalog.Column; +import org.apache.seatunnel.api.table.catalog.TablePath; +import org.apache.seatunnel.api.table.event.AlterTableAddColumnEvent; +import org.apache.seatunnel.api.table.event.AlterTableChangeColumnEvent; +import org.apache.seatunnel.api.table.event.AlterTableColumnEvent; +import org.apache.seatunnel.api.table.event.AlterTableDropColumnEvent; +import org.apache.seatunnel.api.table.event.AlterTableModifyColumnEvent; +import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.oracle.OracleTypeConverter; + +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; + +import io.debezium.relational.Tables; + +import java.util.List; + +public class OracleDdlParserTest { + private static final String PDB_NAME = "qyws_empi"; + private static final String SCHEMA_NAME = "QYWS_EMPI"; + private static final String TABLE_NAME = "STUDENTS"; + private static CustomOracleAntlrDdlParser parser; + + @BeforeAll + public static void setUp() { + parser = new CustomOracleAntlrDdlParser(TablePath.of(PDB_NAME, SCHEMA_NAME, TABLE_NAME)); + parser.setCurrentDatabase(PDB_NAME); + parser.setCurrentSchema(SCHEMA_NAME); + } + + @Test + public void testParseDDLForAddColumn() { + String ddl = + "alter table \"" + + SCHEMA_NAME + + "\".\"" + + TABLE_NAME + + "\" add (" + + "\"col21\" varchar2(20), col22 number(19));"; + parser.parse(ddl, new Tables()); + List addEvent1 = parser.getAndClearParsedEvents(); + Assertions.assertEquals(2, addEvent1.size()); + testColumn(addEvent1.get(0), "col21", "varchar2(20)", "STRING", 20 * 4L, null, true, null); + testColumn( + addEvent1.get(1), + "col22".toUpperCase(), + "number(19, 0)", + "Decimal(19, 0)", + 19L, + null, + true, + null); + + ddl = "alter table " + TABLE_NAME + " add (col23 varchar2(20) not null);"; + parser.parse(ddl, new Tables()); + List addEvent2 = parser.getAndClearParsedEvents(); + Assertions.assertEquals(1, addEvent2.size()); + testColumn( + addEvent2.get(0), + "col23".toUpperCase(), + "varchar2(20)", + "STRING", + 20 * 4L, + null, + false, + null); + + ddl = + "alter table " + + TABLE_NAME + + " add (" + + "col1 numeric(4,2),\n" + + "col2 varchar2(255) default 'debezium' not null ,\n" + + "col3 varchar2(255) default sys_context('userenv','host') not null ,\n" + + "col4 nvarchar2(255) not null,\n" + + "col5 char(4),\n" + + "col6 nchar(4),\n" + + "col7 float default '3.0' not null,\n" + + "col8 date,\n" + + "col9 timestamp(6) default sysdate,\n" + + "col10 blob,\n" + + "col11 clob,\n" + + "col12 number(1,0),\n" + + "col13 timestamp with time zone not null,\n" + + "col14 number default (sysdate-to_date('1970-01-01 08:00:00', 'yyyy-mm-dd hh24:mi:ss'))*86400000,\n" + + "col15 timestamp(9) default to_timestamp('20190101 00:00:00.000000','yyyymmdd hh24:mi:ss.ff6') not null,\n" + + "col16 date default sysdate not null);"; + parser.parse(ddl, new Tables()); + List addEvent3 = parser.getAndClearParsedEvents(); + Assertions.assertEquals(16, addEvent3.size()); + // Special default values are handled for reference: + // io.debezium.connector.oracle.OracleDefaultValueConverter.castTemporalFunctionCall + testColumn( + addEvent3.get(0), + "col1".toUpperCase(), + "number(4, 2)", + "Decimal(4, 2)", + 4L, + 2, + true, + null); + testColumn( + addEvent3.get(1), + "col2".toUpperCase(), + "varchar2(255)", + "STRING", + 255 * 4L, + null, + false, + "'debezium'"); + testColumn( + addEvent3.get(2), + "col3".toUpperCase(), + "varchar2(255)", + "STRING", + 255 * 4L, + null, + false, + "sys_context('userenv','host')"); + testColumn( + addEvent3.get(3), + "col4".toUpperCase(), + "nvarchar2(255)", + "STRING", + 255 * 2L, + null, + false, + null); + testColumn( + addEvent3.get(4), + "col5".toUpperCase(), + "char(4)", + "STRING", + 4 * 4L, + null, + true, + null); + testColumn( + addEvent3.get(5), + "col6".toUpperCase(), + "nchar(4)", + "STRING", + 4 * 2L, + null, + true, + null); + testColumn( + addEvent3.get(6), + "col7".toUpperCase(), + "float", + "Decimal(38, 18)", + 38L, + 18, + false, + "'3.0'"); + testColumn( + addEvent3.get(7), + "col8".toUpperCase(), + "date", + "TIMESTAMP", + null, + null, + true, + null); + testColumn( + addEvent3.get(8), + "col9".toUpperCase(), + "timestamp(6)", + "TIMESTAMP", + null, + 6, + true, + "sysdate"); + testColumn( + addEvent3.get(9), + "col10".toUpperCase(), + "blob", + "BYTES", + OracleTypeConverter.BYTES_4GB - 1, + null, + true, + null); + testColumn( + addEvent3.get(10), + "col11".toUpperCase(), + "clob", + "STRING", + OracleTypeConverter.BYTES_4GB - 1, + null, + true, + null); + testColumn( + addEvent3.get(11), + "col12".toUpperCase(), + "number(1, 0)", + "Decimal(1, 0)", + 1L, + null, + true, + null); + testColumn( + addEvent3.get(12), + "col13".toUpperCase(), + "timestamp with time zone(6)", + "TIMESTAMP", + null, + 6, + false, + null); + testColumn( + addEvent3.get(13), + "col14".toUpperCase(), + "number", + "Decimal(38, 0)", + 38L, + null, + true, + "(sysdate-to_date('1970-01-01 08:00:00','yyyy-mm-dd hh24:mi:ss'))*86400000"); + testColumn( + addEvent3.get(14), + "col15".toUpperCase(), + "timestamp(9)", + "TIMESTAMP", + null, + 9, + false, + "to_timestamp('20190101 00:00:00.000000','yyyymmdd hh24:mi:ss.ff6')"); + testColumn( + addEvent3.get(15), + "col16".toUpperCase(), + "date", + "TIMESTAMP", + null, + null, + false, + "sysdate"); + + ddl = + "ALTER TABLE \"" + + SCHEMA_NAME + + "\".\"" + + TABLE_NAME + + "\" ADD \"ADD_COL2\" TIMESTAMP(6) DEFAULT current_timestamp(6) NOT NULL "; + parser.parse(ddl, new Tables()); + List addEvent4 = parser.getAndClearParsedEvents(); + Assertions.assertEquals(1, addEvent4.size()); + testColumn( + addEvent4.get(0), + "ADD_COL2", + "timestamp(6)", + "TIMESTAMP", + null, + 6, + false, + "current_timestamp(6)"); + } + + @Test + public void testParseDDLForDropColumn() { + String ddl = "ALTER TABLE \"" + SCHEMA_NAME + "\".\"" + TABLE_NAME + "\" DROP (T_VARCHAR2)"; + parser.parse(ddl, new Tables()); + List dropEvent1 = parser.getAndClearParsedEvents(); + Assertions.assertEquals(1, dropEvent1.size()); + Assertions.assertEquals( + "T_VARCHAR2", ((AlterTableDropColumnEvent) dropEvent1.get(0)).getColumn()); + + ddl = "alter table " + TABLE_NAME + " drop (col22, col23);"; + parser.parse(ddl, new Tables()); + List dropEvent2 = parser.getAndClearParsedEvents(); + Assertions.assertEquals(2, dropEvent2.size()); + Assertions.assertEquals( + "col22".toUpperCase(), ((AlterTableDropColumnEvent) dropEvent2.get(0)).getColumn()); + Assertions.assertEquals( + "col23".toUpperCase(), ((AlterTableDropColumnEvent) dropEvent2.get(1)).getColumn()); + + ddl = "alter table " + TABLE_NAME + " drop (\"col22\");"; + parser.parse(ddl, new Tables()); + List dropEvent3 = parser.getAndClearParsedEvents(); + Assertions.assertEquals(1, dropEvent3.size()); + Assertions.assertEquals( + "col22", ((AlterTableDropColumnEvent) dropEvent3.get(0)).getColumn()); + } + + @Test + public void testParseDDLForRenameColumn() { + String ddl = "alter table " + TABLE_NAME + " rename column STUDENT_NAME to STUDENT_NAME1"; + parser.parse(ddl, new Tables()); + List renameEvent1 = parser.getAndClearParsedEvents(); + Assertions.assertEquals(1, renameEvent1.size()); + Assertions.assertEquals( + "STUDENT_NAME", ((AlterTableChangeColumnEvent) renameEvent1.get(0)).getOldColumn()); + Assertions.assertEquals( + "STUDENT_NAME1", + ((AlterTableChangeColumnEvent) renameEvent1.get(0)).getColumn().getName()); + + ddl = + "alter table \"" + + TABLE_NAME + + "\" rename column STUDENT_ID to STUDENT_ID1;\n" + + "alter table \"" + + TABLE_NAME + + "\" rename column CLASS_ID to CLASS_ID1\n"; + + parser.parse(ddl, new Tables()); + List renameEvent2 = parser.getAndClearParsedEvents(); + Assertions.assertEquals(2, renameEvent2.size()); + Assertions.assertEquals( + "STUDENT_ID", ((AlterTableChangeColumnEvent) renameEvent2.get(0)).getOldColumn()); + Assertions.assertEquals( + "STUDENT_ID1", + ((AlterTableChangeColumnEvent) renameEvent2.get(0)).getColumn().getName()); + Assertions.assertEquals( + "CLASS_ID", ((AlterTableChangeColumnEvent) renameEvent2.get(1)).getOldColumn()); + Assertions.assertEquals( + "CLASS_ID1", + ((AlterTableChangeColumnEvent) renameEvent2.get(1)).getColumn().getName()); + } + + @Test + public void testParseDDLForModifyColumn() { + String ddl = "ALTER TABLE " + TABLE_NAME + " MODIFY COL1 varchar2(50) not null;"; + parser.parse(ddl, new Tables()); + List modifyEvent1 = parser.getAndClearParsedEvents(); + Assertions.assertEquals(1, modifyEvent1.size()); + testColumn( + modifyEvent1.get(0), "COL1", "varchar2(50)", "STRING", 50 * 4L, null, false, null); + + ddl = "alter table " + TABLE_NAME + " modify sex char(2) default 'M' not null ;"; + parser.parse(ddl, new Tables()); + List modifyEvent2 = parser.getAndClearParsedEvents(); + Assertions.assertEquals(1, modifyEvent2.size()); + testColumn( + modifyEvent2.get(0), + "sex".toUpperCase(), + "char(2)", + "STRING", + 2 * 4L, + null, + false, + "'M'"); + ddl = + "ALTER TABLE \"" + + SCHEMA_NAME + + "\".\"" + + TABLE_NAME + + "\" MODIFY (ID NUMBER(*,0) NULL);"; + parser.parse(ddl, new Tables()); + List modifyEvent3 = parser.getAndClearParsedEvents(); + Assertions.assertEquals(1, modifyEvent3.size()); + testColumn( + modifyEvent3.get(0), + "ID", + "number(38, 0)", + "Decimal(38, 0)", + 38L, + null, + true, + null); + } + + private void testColumn( + AlterTableColumnEvent alterTableColumnEvent, + String columnName, + String sourceType, + String dataType, + Long columnLength, + Integer scale, + boolean isNullable, + Object defaultValue) { + Column column; + switch (alterTableColumnEvent.getEventType()) { + case SCHEMA_CHANGE_ADD_COLUMN: + column = ((AlterTableAddColumnEvent) alterTableColumnEvent).getColumn(); + break; + case SCHEMA_CHANGE_MODIFY_COLUMN: + column = ((AlterTableModifyColumnEvent) alterTableColumnEvent).getColumn(); + break; + default: + throw new UnsupportedOperationException( + "Unsupported method named getColumn() for the AlterTableColumnEvent: " + + alterTableColumnEvent.getEventType().name()); + } + Assertions.assertEquals(columnName, column.getName()); + Assertions.assertEquals(sourceType.toUpperCase(), column.getSourceType()); + Assertions.assertEquals(dataType, column.getDataType().toString()); + Assertions.assertEquals(columnLength, column.getColumnLength()); + Assertions.assertEquals(scale, column.getScale()); + Assertions.assertEquals(isNullable, column.isNullable()); + Assertions.assertEquals(defaultValue, column.getDefaultValue()); + } +} diff --git a/seatunnel-connectors-v2/connector-clickhouse/pom.xml b/seatunnel-connectors-v2/connector-clickhouse/pom.xml index 2a4b77a3f45..22d2565a63a 100644 --- a/seatunnel-connectors-v2/connector-clickhouse/pom.xml +++ b/seatunnel-connectors-v2/connector-clickhouse/pom.xml @@ -53,7 +53,7 @@ commons-io commons-io - 2.11.0 + 2.14.0 diff --git a/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/sink/ClickhouseSinkFactory.java b/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/sink/ClickhouseSinkFactory.java deleted file mode 100644 index 17ffdd2d406..00000000000 --- a/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/sink/ClickhouseSinkFactory.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.seatunnel.connectors.seatunnel.clickhouse.sink; - -import org.apache.seatunnel.api.configuration.util.OptionRule; -import org.apache.seatunnel.api.table.factory.Factory; -import org.apache.seatunnel.api.table.factory.TableSinkFactory; - -import com.google.auto.service.AutoService; - -import static org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.ALLOW_EXPERIMENTAL_LIGHTWEIGHT_DELETE; -import static org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.BULK_SIZE; -import static org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.CLICKHOUSE_CONFIG; -import static org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.DATABASE; -import static org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.HOST; -import static org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.PASSWORD; -import static org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.PRIMARY_KEY; -import static org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.SHARDING_KEY; -import static org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.SPLIT_MODE; -import static org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.SUPPORT_UPSERT; -import static org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.TABLE; -import static org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.USERNAME; - -@AutoService(Factory.class) -public class ClickhouseSinkFactory implements TableSinkFactory { - @Override - public String factoryIdentifier() { - return "Clickhouse"; - } - - @Override - public OptionRule optionRule() { - return OptionRule.builder() - .required(HOST, DATABASE, TABLE) - .optional( - CLICKHOUSE_CONFIG, - BULK_SIZE, - SPLIT_MODE, - SHARDING_KEY, - PRIMARY_KEY, - SUPPORT_UPSERT, - ALLOW_EXPERIMENTAL_LIGHTWEIGHT_DELETE) - .bundled(USERNAME, PASSWORD) - .build(); - } -} diff --git a/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/sink/client/ClickhouseBatchStatement.java b/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/sink/client/ClickhouseBatchStatement.java index 52397229dbc..04ee5755e59 100644 --- a/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/sink/client/ClickhouseBatchStatement.java +++ b/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/sink/client/ClickhouseBatchStatement.java @@ -18,7 +18,7 @@ package org.apache.seatunnel.connectors.seatunnel.clickhouse.sink.client; import org.apache.seatunnel.connectors.seatunnel.clickhouse.sink.client.executor.JdbcBatchStatementExecutor; -import org.apache.seatunnel.connectors.seatunnel.clickhouse.tool.IntHolder; +import org.apache.seatunnel.connectors.seatunnel.clickhouse.util.IntHolder; import com.clickhouse.jdbc.internal.ClickHouseConnectionImpl; diff --git a/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/sink/client/ClickhouseSink.java b/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/sink/client/ClickhouseSink.java index d2de6fd182b..22f18694e23 100644 --- a/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/sink/client/ClickhouseSink.java +++ b/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/sink/client/ClickhouseSink.java @@ -17,210 +17,35 @@ package org.apache.seatunnel.connectors.seatunnel.clickhouse.sink.client; -import org.apache.seatunnel.shade.com.typesafe.config.Config; -import org.apache.seatunnel.shade.com.typesafe.config.ConfigFactory; - -import org.apache.seatunnel.api.common.PrepareFailException; -import org.apache.seatunnel.api.common.SeaTunnelAPIErrorCode; import org.apache.seatunnel.api.serialization.DefaultSerializer; import org.apache.seatunnel.api.serialization.Serializer; import org.apache.seatunnel.api.sink.SeaTunnelSink; import org.apache.seatunnel.api.sink.SinkWriter; +import org.apache.seatunnel.api.table.catalog.CatalogTable; import org.apache.seatunnel.api.table.type.SeaTunnelRow; -import org.apache.seatunnel.api.table.type.SeaTunnelRowType; -import org.apache.seatunnel.common.config.CheckConfigUtil; -import org.apache.seatunnel.common.config.CheckResult; -import org.apache.seatunnel.common.constants.PluginType; -import org.apache.seatunnel.common.exception.CommonErrorCodeDeprecated; import org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ReaderOption; -import org.apache.seatunnel.connectors.seatunnel.clickhouse.exception.ClickhouseConnectorException; -import org.apache.seatunnel.connectors.seatunnel.clickhouse.shard.Shard; -import org.apache.seatunnel.connectors.seatunnel.clickhouse.shard.ShardMetadata; -import org.apache.seatunnel.connectors.seatunnel.clickhouse.sink.file.ClickhouseTable; import org.apache.seatunnel.connectors.seatunnel.clickhouse.state.CKAggCommitInfo; import org.apache.seatunnel.connectors.seatunnel.clickhouse.state.CKCommitInfo; import org.apache.seatunnel.connectors.seatunnel.clickhouse.state.ClickhouseSinkState; -import org.apache.seatunnel.connectors.seatunnel.clickhouse.util.ClickhouseUtil; - -import com.clickhouse.client.ClickHouseNode; -import com.google.auto.service.AutoService; -import com.google.common.collect.ImmutableMap; import java.io.IOException; import java.util.List; -import java.util.Map; -import java.util.Objects; import java.util.Optional; -import java.util.Properties; - -import static org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.ALLOW_EXPERIMENTAL_LIGHTWEIGHT_DELETE; -import static org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.BULK_SIZE; -import static org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.CLICKHOUSE_CONFIG; -import static org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.DATABASE; -import static org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.HOST; -import static org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.PASSWORD; -import static org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.PRIMARY_KEY; -import static org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.SERVER_TIME_ZONE; -import static org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.SHARDING_KEY; -import static org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.SPLIT_MODE; -import static org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.SUPPORT_UPSERT; -import static org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.TABLE; -import static org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.USERNAME; -@AutoService(SeaTunnelSink.class) public class ClickhouseSink implements SeaTunnelSink { private ReaderOption option; + private CatalogTable catalogTable; - @Override - public String getPluginName() { - return "Clickhouse"; + public ClickhouseSink(ReaderOption option, CatalogTable catalogTable) { + this.option = option; + this.catalogTable = catalogTable; } @Override - public void prepare(Config config) throws PrepareFailException { - CheckResult result = - CheckConfigUtil.checkAllExists(config, HOST.key(), DATABASE.key(), TABLE.key()); - - boolean isCredential = config.hasPath(USERNAME.key()) || config.hasPath(PASSWORD.key()); - - if (isCredential) { - result = CheckConfigUtil.checkAllExists(config, USERNAME.key(), PASSWORD.key()); - } - - if (!result.isSuccess()) { - throw new ClickhouseConnectorException( - SeaTunnelAPIErrorCode.CONFIG_VALIDATION_FAILED, - String.format( - "PluginName: %s, PluginType: %s, Message: %s", - getPluginName(), PluginType.SINK, result.getMsg())); - } - Map defaultConfig = - ImmutableMap.builder() - .put(BULK_SIZE.key(), BULK_SIZE.defaultValue()) - .put(SPLIT_MODE.key(), SPLIT_MODE.defaultValue()) - .put(SERVER_TIME_ZONE.key(), SERVER_TIME_ZONE.defaultValue()) - .build(); - - config = config.withFallback(ConfigFactory.parseMap(defaultConfig)); - - List nodes; - if (!isCredential) { - nodes = - ClickhouseUtil.createNodes( - config.getString(HOST.key()), - config.getString(DATABASE.key()), - config.getString(SERVER_TIME_ZONE.key()), - null, - null, - null); - } else { - nodes = - ClickhouseUtil.createNodes( - config.getString(HOST.key()), - config.getString(DATABASE.key()), - config.getString(SERVER_TIME_ZONE.key()), - config.getString(USERNAME.key()), - config.getString(PASSWORD.key()), - null); - } - - Properties clickhouseProperties = new Properties(); - if (CheckConfigUtil.isValidParam(config, CLICKHOUSE_CONFIG.key())) { - config.getObject(CLICKHOUSE_CONFIG.key()) - .forEach( - (key, value) -> - clickhouseProperties.put( - key, String.valueOf(value.unwrapped()))); - } - - if (isCredential) { - clickhouseProperties.put("user", config.getString(USERNAME.key())); - clickhouseProperties.put("password", config.getString(PASSWORD.key())); - } - - ClickhouseProxy proxy = new ClickhouseProxy(nodes.get(0)); - Map tableSchema = - proxy.getClickhouseTableSchema(config.getString(TABLE.key())); - String shardKey = null; - String shardKeyType = null; - ClickhouseTable table = - proxy.getClickhouseTable( - config.getString(DATABASE.key()), config.getString(TABLE.key())); - if (config.getBoolean(SPLIT_MODE.key())) { - if (!"Distributed".equals(table.getEngine())) { - throw new ClickhouseConnectorException( - CommonErrorCodeDeprecated.ILLEGAL_ARGUMENT, - "split mode only support table which engine is " - + "'Distributed' engine at now"); - } - if (config.hasPath(SHARDING_KEY.key())) { - shardKey = config.getString(SHARDING_KEY.key()); - shardKeyType = tableSchema.get(shardKey); - } - } - ShardMetadata metadata; - - if (isCredential) { - metadata = - new ShardMetadata( - shardKey, - shardKeyType, - table.getSortingKey(), - config.getString(DATABASE.key()), - config.getString(TABLE.key()), - table.getEngine(), - config.getBoolean(SPLIT_MODE.key()), - new Shard(1, 1, nodes.get(0)), - config.getString(USERNAME.key()), - config.getString(PASSWORD.key())); - } else { - metadata = - new ShardMetadata( - shardKey, - shardKeyType, - table.getSortingKey(), - config.getString(DATABASE.key()), - config.getString(TABLE.key()), - table.getEngine(), - config.getBoolean(SPLIT_MODE.key()), - new Shard(1, 1, nodes.get(0))); - } - - proxy.close(); - - String[] primaryKeys = null; - if (config.hasPath(PRIMARY_KEY.key())) { - String primaryKey = config.getString(PRIMARY_KEY.key()); - if (shardKey != null && !Objects.equals(primaryKey, shardKey)) { - throw new ClickhouseConnectorException( - CommonErrorCodeDeprecated.ILLEGAL_ARGUMENT, - "sharding_key and primary_key must be consistent to ensure correct processing of cdc events"); - } - primaryKeys = new String[] {primaryKey}; - } - boolean supportUpsert = SUPPORT_UPSERT.defaultValue(); - if (config.hasPath(SUPPORT_UPSERT.key())) { - supportUpsert = config.getBoolean(SUPPORT_UPSERT.key()); - } - boolean allowExperimentalLightweightDelete = - ALLOW_EXPERIMENTAL_LIGHTWEIGHT_DELETE.defaultValue(); - if (config.hasPath(ALLOW_EXPERIMENTAL_LIGHTWEIGHT_DELETE.key())) { - allowExperimentalLightweightDelete = - config.getBoolean(ALLOW_EXPERIMENTAL_LIGHTWEIGHT_DELETE.key()); - } - this.option = - ReaderOption.builder() - .shardMetadata(metadata) - .properties(clickhouseProperties) - .tableEngine(table.getEngine()) - .tableSchema(tableSchema) - .bulkSize(config.getInt(BULK_SIZE.key())) - .primaryKeys(primaryKeys) - .supportUpsert(supportUpsert) - .allowExperimentalLightweightDelete(allowExperimentalLightweightDelete) - .build(); + public String getPluginName() { + return "Clickhouse"; } @Override @@ -241,7 +66,7 @@ public Optional> getWriterStateSerializer() { } @Override - public void setTypeInfo(SeaTunnelRowType seaTunnelRowType) { - this.option.setSeaTunnelRowType(seaTunnelRowType); + public Optional getWriteCatalogTable() { + return Optional.of(catalogTable); } } diff --git a/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/sink/client/ClickhouseSinkFactory.java b/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/sink/client/ClickhouseSinkFactory.java new file mode 100644 index 00000000000..720efacc321 --- /dev/null +++ b/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/sink/client/ClickhouseSinkFactory.java @@ -0,0 +1,162 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.seatunnel.clickhouse.sink.client; + +import org.apache.seatunnel.api.configuration.ReadonlyConfig; +import org.apache.seatunnel.api.configuration.util.OptionRule; +import org.apache.seatunnel.api.table.catalog.CatalogTable; +import org.apache.seatunnel.api.table.connector.TableSink; +import org.apache.seatunnel.api.table.factory.Factory; +import org.apache.seatunnel.api.table.factory.TableSinkFactory; +import org.apache.seatunnel.api.table.factory.TableSinkFactoryContext; +import org.apache.seatunnel.api.table.type.SeaTunnelRow; +import org.apache.seatunnel.common.exception.CommonErrorCodeDeprecated; +import org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ReaderOption; +import org.apache.seatunnel.connectors.seatunnel.clickhouse.exception.ClickhouseConnectorException; +import org.apache.seatunnel.connectors.seatunnel.clickhouse.shard.Shard; +import org.apache.seatunnel.connectors.seatunnel.clickhouse.shard.ShardMetadata; +import org.apache.seatunnel.connectors.seatunnel.clickhouse.sink.file.ClickhouseTable; +import org.apache.seatunnel.connectors.seatunnel.clickhouse.state.CKAggCommitInfo; +import org.apache.seatunnel.connectors.seatunnel.clickhouse.state.CKCommitInfo; +import org.apache.seatunnel.connectors.seatunnel.clickhouse.state.ClickhouseSinkState; +import org.apache.seatunnel.connectors.seatunnel.clickhouse.util.ClickhouseProxy; +import org.apache.seatunnel.connectors.seatunnel.clickhouse.util.ClickhouseUtil; + +import com.clickhouse.client.ClickHouseNode; +import com.google.auto.service.AutoService; + +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Properties; + +import static org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.ALLOW_EXPERIMENTAL_LIGHTWEIGHT_DELETE; +import static org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.BULK_SIZE; +import static org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.CLICKHOUSE_CONFIG; +import static org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.DATABASE; +import static org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.HOST; +import static org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.PASSWORD; +import static org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.PRIMARY_KEY; +import static org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.SHARDING_KEY; +import static org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.SPLIT_MODE; +import static org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.SUPPORT_UPSERT; +import static org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.TABLE; +import static org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.USERNAME; + +@AutoService(Factory.class) +public class ClickhouseSinkFactory implements TableSinkFactory { + @Override + public String factoryIdentifier() { + return "Clickhouse"; + } + + @Override + public TableSink createSink( + TableSinkFactoryContext context) { + ReadonlyConfig readonlyConfig = context.getOptions(); + CatalogTable catalogTable = context.getCatalogTable(); + List nodes = ClickhouseUtil.createNodes(readonlyConfig); + Properties clickhouseProperties = new Properties(); + readonlyConfig + .get(CLICKHOUSE_CONFIG) + .forEach((key, value) -> clickhouseProperties.put(key, String.valueOf(value))); + + clickhouseProperties.put("user", readonlyConfig.get(USERNAME)); + clickhouseProperties.put("password", readonlyConfig.get(PASSWORD)); + ClickhouseProxy proxy = new ClickhouseProxy(nodes.get(0)); + try { + Map tableSchema = + proxy.getClickhouseTableSchema(readonlyConfig.get(TABLE)); + String shardKey = null; + String shardKeyType = null; + ClickhouseTable table = + proxy.getClickhouseTable( + readonlyConfig.get(DATABASE), readonlyConfig.get(TABLE)); + if (readonlyConfig.get(SPLIT_MODE)) { + if (!"Distributed".equals(table.getEngine())) { + throw new ClickhouseConnectorException( + CommonErrorCodeDeprecated.ILLEGAL_ARGUMENT, + "split mode only support table which engine is " + + "'Distributed' engine at now"); + } + if (readonlyConfig.getOptional(SHARDING_KEY).isPresent()) { + shardKey = readonlyConfig.get(SHARDING_KEY); + shardKeyType = tableSchema.get(shardKey); + } + } + ShardMetadata metadata = + new ShardMetadata( + shardKey, + shardKeyType, + table.getSortingKey(), + readonlyConfig.get(DATABASE), + readonlyConfig.get(TABLE), + table.getEngine(), + readonlyConfig.get(SPLIT_MODE), + new Shard(1, 1, nodes.get(0)), + readonlyConfig.get(USERNAME), + readonlyConfig.get(PASSWORD)); + proxy.close(); + String[] primaryKeys = null; + if (readonlyConfig.getOptional(PRIMARY_KEY).isPresent()) { + String primaryKey = readonlyConfig.get(PRIMARY_KEY); + if (shardKey != null && !Objects.equals(primaryKey, shardKey)) { + throw new ClickhouseConnectorException( + CommonErrorCodeDeprecated.ILLEGAL_ARGUMENT, + "sharding_key and primary_key must be consistent to ensure correct processing of cdc events"); + } + primaryKeys = new String[] {primaryKey}; + } + boolean supportUpsert = readonlyConfig.get(SUPPORT_UPSERT); + boolean allowExperimentalLightweightDelete = + readonlyConfig.get(ALLOW_EXPERIMENTAL_LIGHTWEIGHT_DELETE); + + ReaderOption option = + ReaderOption.builder() + .shardMetadata(metadata) + .properties(clickhouseProperties) + .seaTunnelRowType(catalogTable.getSeaTunnelRowType()) + .tableEngine(table.getEngine()) + .tableSchema(tableSchema) + .bulkSize(readonlyConfig.get(BULK_SIZE)) + .primaryKeys(primaryKeys) + .supportUpsert(supportUpsert) + .allowExperimentalLightweightDelete(allowExperimentalLightweightDelete) + .build(); + return () -> new ClickhouseSink(option, catalogTable); + } finally { + proxy.close(); + } + } + + @Override + public OptionRule optionRule() { + return OptionRule.builder() + .required(HOST, DATABASE, TABLE) + .optional( + CLICKHOUSE_CONFIG, + BULK_SIZE, + SPLIT_MODE, + SHARDING_KEY, + PRIMARY_KEY, + SUPPORT_UPSERT, + ALLOW_EXPERIMENTAL_LIGHTWEIGHT_DELETE) + .bundled(USERNAME, PASSWORD) + .build(); + } +} diff --git a/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/sink/client/ClickhouseSinkWriter.java b/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/sink/client/ClickhouseSinkWriter.java index b5f1505d112..6b7f652aba4 100644 --- a/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/sink/client/ClickhouseSinkWriter.java +++ b/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/sink/client/ClickhouseSinkWriter.java @@ -28,7 +28,8 @@ import org.apache.seatunnel.connectors.seatunnel.clickhouse.sink.client.executor.JdbcBatchStatementExecutorBuilder; import org.apache.seatunnel.connectors.seatunnel.clickhouse.state.CKCommitInfo; import org.apache.seatunnel.connectors.seatunnel.clickhouse.state.ClickhouseSinkState; -import org.apache.seatunnel.connectors.seatunnel.clickhouse.tool.IntHolder; +import org.apache.seatunnel.connectors.seatunnel.clickhouse.util.ClickhouseProxy; +import org.apache.seatunnel.connectors.seatunnel.clickhouse.util.IntHolder; import org.apache.commons.lang3.StringUtils; diff --git a/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/sink/client/ShardRouter.java b/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/sink/client/ShardRouter.java index 140e40b3b13..03f6efec311 100644 --- a/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/sink/client/ShardRouter.java +++ b/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/sink/client/ShardRouter.java @@ -21,7 +21,8 @@ import org.apache.seatunnel.connectors.seatunnel.clickhouse.exception.ClickhouseConnectorException; import org.apache.seatunnel.connectors.seatunnel.clickhouse.shard.Shard; import org.apache.seatunnel.connectors.seatunnel.clickhouse.shard.ShardMetadata; -import org.apache.seatunnel.connectors.seatunnel.clickhouse.sink.DistributedEngine; +import org.apache.seatunnel.connectors.seatunnel.clickhouse.util.ClickhouseProxy; +import org.apache.seatunnel.connectors.seatunnel.clickhouse.util.DistributedEngine; import org.apache.commons.lang3.StringUtils; diff --git a/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/sink/file/ClickhouseFileSink.java b/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/sink/file/ClickhouseFileSink.java index bb445d42820..4a0c80e02c5 100644 --- a/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/sink/file/ClickhouseFileSink.java +++ b/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/sink/file/ClickhouseFileSink.java @@ -38,10 +38,10 @@ import org.apache.seatunnel.connectors.seatunnel.clickhouse.exception.ClickhouseConnectorException; import org.apache.seatunnel.connectors.seatunnel.clickhouse.shard.Shard; import org.apache.seatunnel.connectors.seatunnel.clickhouse.shard.ShardMetadata; -import org.apache.seatunnel.connectors.seatunnel.clickhouse.sink.client.ClickhouseProxy; import org.apache.seatunnel.connectors.seatunnel.clickhouse.state.CKFileAggCommitInfo; import org.apache.seatunnel.connectors.seatunnel.clickhouse.state.CKFileCommitInfo; import org.apache.seatunnel.connectors.seatunnel.clickhouse.state.ClickhouseSinkState; +import org.apache.seatunnel.connectors.seatunnel.clickhouse.util.ClickhouseProxy; import org.apache.seatunnel.connectors.seatunnel.clickhouse.util.ClickhouseUtil; import com.clickhouse.client.ClickHouseNode; diff --git a/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/sink/file/ClickhouseFileSinkAggCommitter.java b/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/sink/file/ClickhouseFileSinkAggCommitter.java index 53e5fcb5ab0..5d69191cac0 100644 --- a/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/sink/file/ClickhouseFileSinkAggCommitter.java +++ b/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/sink/file/ClickhouseFileSinkAggCommitter.java @@ -21,9 +21,9 @@ import org.apache.seatunnel.common.utils.SeaTunnelException; import org.apache.seatunnel.connectors.seatunnel.clickhouse.config.FileReaderOption; import org.apache.seatunnel.connectors.seatunnel.clickhouse.shard.Shard; -import org.apache.seatunnel.connectors.seatunnel.clickhouse.sink.client.ClickhouseProxy; import org.apache.seatunnel.connectors.seatunnel.clickhouse.state.CKFileAggCommitInfo; import org.apache.seatunnel.connectors.seatunnel.clickhouse.state.CKFileCommitInfo; +import org.apache.seatunnel.connectors.seatunnel.clickhouse.util.ClickhouseProxy; import com.clickhouse.client.ClickHouseException; import com.clickhouse.client.ClickHouseRequest; diff --git a/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/sink/file/ClickhouseFileSinkWriter.java b/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/sink/file/ClickhouseFileSinkWriter.java index 2abeb046470..e705acc7683 100644 --- a/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/sink/file/ClickhouseFileSinkWriter.java +++ b/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/sink/file/ClickhouseFileSinkWriter.java @@ -26,10 +26,10 @@ import org.apache.seatunnel.connectors.seatunnel.clickhouse.exception.ClickhouseConnectorErrorCode; import org.apache.seatunnel.connectors.seatunnel.clickhouse.exception.ClickhouseConnectorException; import org.apache.seatunnel.connectors.seatunnel.clickhouse.shard.Shard; -import org.apache.seatunnel.connectors.seatunnel.clickhouse.sink.client.ClickhouseProxy; import org.apache.seatunnel.connectors.seatunnel.clickhouse.sink.client.ShardRouter; import org.apache.seatunnel.connectors.seatunnel.clickhouse.state.CKFileCommitInfo; import org.apache.seatunnel.connectors.seatunnel.clickhouse.state.ClickhouseSinkState; +import org.apache.seatunnel.connectors.seatunnel.clickhouse.util.ClickhouseProxy; import org.apache.commons.io.FileUtils; diff --git a/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/sink/file/ClickhouseTable.java b/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/sink/file/ClickhouseTable.java index 546f1f74660..2525caeb3d0 100644 --- a/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/sink/file/ClickhouseTable.java +++ b/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/sink/file/ClickhouseTable.java @@ -17,7 +17,7 @@ package org.apache.seatunnel.connectors.seatunnel.clickhouse.sink.file; -import org.apache.seatunnel.connectors.seatunnel.clickhouse.sink.DistributedEngine; +import org.apache.seatunnel.connectors.seatunnel.clickhouse.util.DistributedEngine; import lombok.Getter; import lombok.Setter; diff --git a/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/source/ClickhouseSource.java b/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/source/ClickhouseSource.java index 2cc401dce24..d7c6b438564 100644 --- a/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/source/ClickhouseSource.java +++ b/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/source/ClickhouseSource.java @@ -17,142 +17,39 @@ package org.apache.seatunnel.connectors.seatunnel.clickhouse.source; -import org.apache.seatunnel.shade.com.typesafe.config.Config; -import org.apache.seatunnel.shade.com.typesafe.config.ConfigFactory; - -import org.apache.seatunnel.api.common.PrepareFailException; -import org.apache.seatunnel.api.common.SeaTunnelAPIErrorCode; import org.apache.seatunnel.api.source.Boundedness; import org.apache.seatunnel.api.source.SeaTunnelSource; import org.apache.seatunnel.api.source.SourceReader; import org.apache.seatunnel.api.source.SourceSplitEnumerator; import org.apache.seatunnel.api.source.SupportColumnProjection; import org.apache.seatunnel.api.source.SupportParallelism; -import org.apache.seatunnel.api.table.type.SeaTunnelDataType; +import org.apache.seatunnel.api.table.catalog.CatalogTable; import org.apache.seatunnel.api.table.type.SeaTunnelRow; -import org.apache.seatunnel.api.table.type.SeaTunnelRowType; -import org.apache.seatunnel.common.config.CheckConfigUtil; -import org.apache.seatunnel.common.config.CheckResult; -import org.apache.seatunnel.common.constants.PluginType; -import org.apache.seatunnel.common.utils.ExceptionUtils; -import org.apache.seatunnel.connectors.seatunnel.clickhouse.exception.ClickhouseConnectorException; import org.apache.seatunnel.connectors.seatunnel.clickhouse.state.ClickhouseSourceState; -import org.apache.seatunnel.connectors.seatunnel.clickhouse.util.ClickhouseUtil; -import org.apache.seatunnel.connectors.seatunnel.clickhouse.util.TypeConvertUtil; -import com.clickhouse.client.ClickHouseClient; -import com.clickhouse.client.ClickHouseException; -import com.clickhouse.client.ClickHouseFormat; import com.clickhouse.client.ClickHouseNode; -import com.clickhouse.client.ClickHouseResponse; -import com.google.auto.service.AutoService; -import com.google.common.collect.ImmutableMap; +import java.util.Collections; import java.util.List; -import java.util.Map; -import java.util.concurrent.ThreadLocalRandom; -import java.util.stream.Collectors; - -import static org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.CLICKHOUSE_CONFIG; -import static org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.DATABASE; -import static org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.HOST; -import static org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.PASSWORD; -import static org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.SERVER_TIME_ZONE; -import static org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.SQL; -import static org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.USERNAME; -@AutoService(SeaTunnelSource.class) public class ClickhouseSource implements SeaTunnelSource, SupportParallelism, SupportColumnProjection { private List servers; - private SeaTunnelRowType rowTypeInfo; + private CatalogTable catalogTable; private String sql; - @Override - public String getPluginName() { - return "Clickhouse"; + public ClickhouseSource(List servers, CatalogTable catalogTable, String sql) { + this.servers = servers; + this.catalogTable = catalogTable; + this.sql = sql; } @Override - public void prepare(Config config) throws PrepareFailException { - CheckResult result = - CheckConfigUtil.checkAllExists( - config, - HOST.key(), - DATABASE.key(), - SQL.key(), - USERNAME.key(), - PASSWORD.key()); - if (!result.isSuccess()) { - throw new ClickhouseConnectorException( - SeaTunnelAPIErrorCode.CONFIG_VALIDATION_FAILED, - String.format( - "PluginName: %s, PluginType: %s, Message: %s", - getPluginName(), PluginType.SOURCE, result.getMsg())); - } - Map defaultConfig = - ImmutableMap.builder() - .put(SERVER_TIME_ZONE.key(), SERVER_TIME_ZONE.defaultValue()) - .build(); - - config = config.withFallback(ConfigFactory.parseMap(defaultConfig)); - - Map customConfig = null; - - if (CheckConfigUtil.isValidParam(config, CLICKHOUSE_CONFIG.key())) { - customConfig = - config.getObject(CLICKHOUSE_CONFIG.key()).entrySet().stream() - .collect( - Collectors.toMap( - Map.Entry::getKey, - entrySet -> - entrySet.getValue().unwrapped().toString())); - } - - servers = - ClickhouseUtil.createNodes( - config.getString(HOST.key()), - config.getString(DATABASE.key()), - config.getString(SERVER_TIME_ZONE.key()), - config.getString(USERNAME.key()), - config.getString(PASSWORD.key()), - customConfig); - - sql = config.getString(SQL.key()); - ClickHouseNode currentServer = - servers.get(ThreadLocalRandom.current().nextInt(servers.size())); - try (ClickHouseClient client = ClickHouseClient.newInstance(currentServer.getProtocol()); - ClickHouseResponse response = - client.connect(currentServer) - .format(ClickHouseFormat.RowBinaryWithNamesAndTypes) - .query(modifySQLToLimit1(config.getString(SQL.key()))) - .executeAndWait()) { - - int columnSize = response.getColumns().size(); - String[] fieldNames = new String[columnSize]; - SeaTunnelDataType[] seaTunnelDataTypes = new SeaTunnelDataType[columnSize]; - - for (int i = 0; i < columnSize; i++) { - fieldNames[i] = response.getColumns().get(i).getColumnName(); - seaTunnelDataTypes[i] = TypeConvertUtil.convert(response.getColumns().get(i)); - } - - this.rowTypeInfo = new SeaTunnelRowType(fieldNames, seaTunnelDataTypes); - - } catch (ClickHouseException e) { - throw new ClickhouseConnectorException( - SeaTunnelAPIErrorCode.CONFIG_VALIDATION_FAILED, - String.format( - "PluginName: %s, PluginType: %s, Message: %s", - getPluginName(), PluginType.SOURCE, ExceptionUtils.getMessage(e))); - } - } - - private String modifySQLToLimit1(String sql) { - return String.format("SELECT * FROM (%s) s LIMIT 1", sql); + public String getPluginName() { + return "Clickhouse"; } @Override @@ -161,14 +58,15 @@ public Boundedness getBoundedness() { } @Override - public SeaTunnelRowType getProducedType() { - return this.rowTypeInfo; + public List getProducedCatalogTables() { + return Collections.singletonList(catalogTable); } @Override public SourceReader createReader( SourceReader.Context readerContext) throws Exception { - return new ClickhouseSourceReader(servers, readerContext, this.rowTypeInfo, sql); + return new ClickhouseSourceReader( + servers, readerContext, this.catalogTable.getSeaTunnelRowType(), sql); } @Override diff --git a/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/source/ClickhouseSourceFactory.java b/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/source/ClickhouseSourceFactory.java index 4adea4b80ce..bb91d3c05ea 100644 --- a/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/source/ClickhouseSourceFactory.java +++ b/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/source/ClickhouseSourceFactory.java @@ -17,13 +17,37 @@ package org.apache.seatunnel.connectors.seatunnel.clickhouse.source; +import org.apache.seatunnel.api.common.SeaTunnelAPIErrorCode; +import org.apache.seatunnel.api.configuration.ReadonlyConfig; import org.apache.seatunnel.api.configuration.util.OptionRule; import org.apache.seatunnel.api.source.SeaTunnelSource; +import org.apache.seatunnel.api.source.SourceSplit; +import org.apache.seatunnel.api.table.catalog.CatalogTable; +import org.apache.seatunnel.api.table.catalog.PhysicalColumn; +import org.apache.seatunnel.api.table.catalog.TableIdentifier; +import org.apache.seatunnel.api.table.catalog.TableSchema; +import org.apache.seatunnel.api.table.connector.TableSource; import org.apache.seatunnel.api.table.factory.Factory; import org.apache.seatunnel.api.table.factory.TableSourceFactory; +import org.apache.seatunnel.api.table.factory.TableSourceFactoryContext; +import org.apache.seatunnel.common.constants.PluginType; +import org.apache.seatunnel.connectors.seatunnel.clickhouse.exception.ClickhouseConnectorException; +import org.apache.seatunnel.connectors.seatunnel.clickhouse.util.ClickhouseUtil; +import org.apache.seatunnel.connectors.seatunnel.clickhouse.util.TypeConvertUtil; +import com.clickhouse.client.ClickHouseClient; +import com.clickhouse.client.ClickHouseColumn; +import com.clickhouse.client.ClickHouseException; +import com.clickhouse.client.ClickHouseFormat; +import com.clickhouse.client.ClickHouseNode; +import com.clickhouse.client.ClickHouseResponse; import com.google.auto.service.AutoService; +import java.io.Serializable; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.ThreadLocalRandom; + import static org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.CLICKHOUSE_CONFIG; import static org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.DATABASE; import static org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.HOST; @@ -38,6 +62,61 @@ public String factoryIdentifier() { return "Clickhouse"; } + @Override + public + TableSource createSource(TableSourceFactoryContext context) { + ReadonlyConfig readonlyConfig = context.getOptions(); + List nodes = ClickhouseUtil.createNodes(readonlyConfig); + + String sql = readonlyConfig.get(SQL); + ClickHouseNode currentServer = nodes.get(ThreadLocalRandom.current().nextInt(nodes.size())); + try (ClickHouseClient client = ClickHouseClient.newInstance(currentServer.getProtocol()); + ClickHouseResponse response = + client.connect(currentServer) + .format(ClickHouseFormat.RowBinaryWithNamesAndTypes) + .query(modifySQLToLimit1(sql)) + .executeAndWait()) { + TableSchema.Builder builder = TableSchema.builder(); + List columns = response.getColumns(); + columns.forEach( + column -> { + PhysicalColumn physicalColumn = + PhysicalColumn.of( + column.getColumnName(), + TypeConvertUtil.convert(column), + (long) column.getEstimatedLength(), + column.getScale(), + column.isNullable(), + null, + null); + builder.column(physicalColumn); + }); + String catalogName = "clickhouse_catalog"; + CatalogTable catalogTable = + CatalogTable.of( + TableIdentifier.of( + catalogName, readonlyConfig.get(DATABASE), "default"), + builder.build(), + Collections.emptyMap(), + Collections.emptyList(), + "", + catalogName); + return () -> + (SeaTunnelSource) + new ClickhouseSource(nodes, catalogTable, sql); + } catch (ClickHouseException e) { + throw new ClickhouseConnectorException( + SeaTunnelAPIErrorCode.CONFIG_VALIDATION_FAILED, + String.format( + "PluginName: %s, PluginType: %s, Message: %s", + factoryIdentifier(), PluginType.SOURCE, e.getMessage())); + } + } + + private String modifySQLToLimit1(String sql) { + return String.format("SELECT * FROM (%s) s LIMIT 1", sql); + } + @Override public OptionRule optionRule() { return OptionRule.builder() diff --git a/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/source/ClickhouseSourceReader.java b/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/source/ClickhouseSourceReader.java index 591334d9722..3ad0ec041e6 100644 --- a/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/source/ClickhouseSourceReader.java +++ b/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/source/ClickhouseSourceReader.java @@ -17,6 +17,7 @@ package org.apache.seatunnel.connectors.seatunnel.clickhouse.source; +import org.apache.seatunnel.api.source.Boundedness; import org.apache.seatunnel.api.source.Collector; import org.apache.seatunnel.api.source.SourceReader; import org.apache.seatunnel.api.table.type.SeaTunnelRow; @@ -28,6 +29,7 @@ import com.clickhouse.client.ClickHouseNode; import com.clickhouse.client.ClickHouseRequest; import com.clickhouse.client.ClickHouseResponse; +import lombok.extern.slf4j.Slf4j; import java.io.IOException; import java.util.ArrayList; @@ -35,6 +37,7 @@ import java.util.List; import java.util.Random; +@Slf4j public class ClickhouseSourceReader implements SourceReader { private final List servers; @@ -43,6 +46,7 @@ public class ClickhouseSourceReader implements SourceReader request; private final String sql; + private volatile boolean noMoreSplit; private final List splits; @@ -75,31 +79,43 @@ public void close() throws IOException { @Override public void pollNext(Collector output) throws Exception { - if (!splits.isEmpty()) { - try (ClickHouseResponse response = this.request.query(sql).executeAndWait()) { - response.stream() - .forEach( - record -> { - Object[] values = - new Object[this.rowTypeInfo.getFieldNames().length]; - for (int i = 0; i < record.size(); i++) { - if (record.getValue(i).isNullOrEmpty()) { - values[i] = null; - } else { - values[i] = - TypeConvertUtil.valueUnwrap( - this.rowTypeInfo.getFieldType(i), - record.getValue(i)); + synchronized (output.getCheckpointLock()) { + if (!splits.isEmpty()) { + try (ClickHouseResponse response = this.request.query(sql).executeAndWait()) { + response.stream() + .forEach( + record -> { + Object[] values = + new Object[this.rowTypeInfo.getFieldNames().length]; + for (int i = 0; i < record.size(); i++) { + if (record.getValue(i).isNullOrEmpty()) { + values[i] = null; + } else { + values[i] = + TypeConvertUtil.valueUnwrap( + this.rowTypeInfo.getFieldType(i), + record.getValue(i)); + } } - } - output.collect(new SeaTunnelRow(values)); - }); + output.collect(new SeaTunnelRow(values)); + }); + } + signalNoMoreElement(); + } + if (noMoreSplit + && splits.isEmpty() + && Boundedness.BOUNDED.equals(readerContext.getBoundedness())) { + signalNoMoreElement(); } - this.readerContext.signalNoMoreElement(); - this.splits.clear(); } } + private void signalNoMoreElement() { + log.info("Closed the bounded ClickHouse source"); + this.readerContext.signalNoMoreElement(); + this.splits.clear(); + } + @Override public List snapshotState(long checkpointId) throws Exception { return Collections.emptyList(); @@ -111,7 +127,9 @@ public void addSplits(List splits) { } @Override - public void handleNoMoreSplits() {} + public void handleNoMoreSplits() { + noMoreSplit = true; + } @Override public void notifyCheckpointComplete(long checkpointId) throws Exception {} diff --git a/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/source/ClickhouseSourceSplitEnumerator.java b/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/source/ClickhouseSourceSplitEnumerator.java index c0eb4b6c706..f3c1bd0c47b 100644 --- a/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/source/ClickhouseSourceSplitEnumerator.java +++ b/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/source/ClickhouseSourceSplitEnumerator.java @@ -78,6 +78,7 @@ public void registerReader(int subtaskId) { assigned = subtaskId; context.assignSplit(subtaskId, new ClickhouseSourceSplit()); } + context.signalNoMoreSplits(subtaskId); } @Override diff --git a/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/sink/client/ClickhouseProxy.java b/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/util/ClickhouseProxy.java similarity index 98% rename from seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/sink/client/ClickhouseProxy.java rename to seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/util/ClickhouseProxy.java index bf0f9a55520..c4178182578 100644 --- a/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/sink/client/ClickhouseProxy.java +++ b/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/util/ClickhouseProxy.java @@ -15,14 +15,13 @@ * limitations under the License. */ -package org.apache.seatunnel.connectors.seatunnel.clickhouse.sink.client; +package org.apache.seatunnel.connectors.seatunnel.clickhouse.util; import org.apache.seatunnel.api.common.SeaTunnelAPIErrorCode; import org.apache.seatunnel.common.exception.CommonErrorCodeDeprecated; import org.apache.seatunnel.connectors.seatunnel.clickhouse.exception.ClickhouseConnectorErrorCode; import org.apache.seatunnel.connectors.seatunnel.clickhouse.exception.ClickhouseConnectorException; import org.apache.seatunnel.connectors.seatunnel.clickhouse.shard.Shard; -import org.apache.seatunnel.connectors.seatunnel.clickhouse.sink.DistributedEngine; import org.apache.seatunnel.connectors.seatunnel.clickhouse.sink.file.ClickhouseTable; import com.clickhouse.client.ClickHouseClient; diff --git a/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/util/ClickhouseUtil.java b/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/util/ClickhouseUtil.java index f787cf5c8fc..13667d0e407 100644 --- a/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/util/ClickhouseUtil.java +++ b/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/util/ClickhouseUtil.java @@ -17,6 +17,9 @@ package org.apache.seatunnel.connectors.seatunnel.clickhouse.util; +import org.apache.seatunnel.api.configuration.ReadonlyConfig; +import org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig; + import org.apache.commons.collections4.MapUtils; import org.apache.commons.lang3.StringUtils; @@ -31,6 +34,16 @@ public class ClickhouseUtil { + public static List createNodes(ReadonlyConfig config) { + return createNodes( + config.get(ClickhouseConfig.HOST), + config.get(ClickhouseConfig.DATABASE), + config.get(ClickhouseConfig.SERVER_TIME_ZONE), + config.get(ClickhouseConfig.USERNAME), + config.get(ClickhouseConfig.PASSWORD), + null); + } + public static List createNodes( String nodeAddress, String database, diff --git a/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/sink/DistributedEngine.java b/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/util/DistributedEngine.java similarity index 94% rename from seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/sink/DistributedEngine.java rename to seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/util/DistributedEngine.java index 067f09fdbc2..8974b7cd0c3 100644 --- a/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/sink/DistributedEngine.java +++ b/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/util/DistributedEngine.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.seatunnel.connectors.seatunnel.clickhouse.sink; +package org.apache.seatunnel.connectors.seatunnel.clickhouse.util; import lombok.AllArgsConstructor; import lombok.Getter; diff --git a/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/tool/IntHolder.java b/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/util/IntHolder.java similarity index 94% rename from seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/tool/IntHolder.java rename to seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/util/IntHolder.java index 02e7be5966d..9913d7a408e 100644 --- a/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/tool/IntHolder.java +++ b/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/util/IntHolder.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.seatunnel.connectors.seatunnel.clickhouse.tool; +package org.apache.seatunnel.connectors.seatunnel.clickhouse.util; import java.io.Serializable; diff --git a/seatunnel-connectors-v2/connector-clickhouse/src/test/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/ClickhouseFactoryTest.java b/seatunnel-connectors-v2/connector-clickhouse/src/test/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/ClickhouseFactoryTest.java index e6c50b0611a..d193b53ea72 100644 --- a/seatunnel-connectors-v2/connector-clickhouse/src/test/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/ClickhouseFactoryTest.java +++ b/seatunnel-connectors-v2/connector-clickhouse/src/test/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/ClickhouseFactoryTest.java @@ -17,7 +17,7 @@ package org.apache.seatunnel.connectors.seatunnel.clickhouse; -import org.apache.seatunnel.connectors.seatunnel.clickhouse.sink.ClickhouseSinkFactory; +import org.apache.seatunnel.connectors.seatunnel.clickhouse.sink.client.ClickhouseSinkFactory; import org.apache.seatunnel.connectors.seatunnel.clickhouse.sink.file.ClickhouseFileSinkFactory; import org.apache.seatunnel.connectors.seatunnel.clickhouse.source.ClickhouseSourceFactory; diff --git a/seatunnel-connectors-v2/connector-common/src/main/java/org/apache/seatunnel/connectors/seatunnel/common/source/AbstractSingleSplitReader.java b/seatunnel-connectors-v2/connector-common/src/main/java/org/apache/seatunnel/connectors/seatunnel/common/source/AbstractSingleSplitReader.java index 31385d0d470..d8dd6fae1e7 100644 --- a/seatunnel-connectors-v2/connector-common/src/main/java/org/apache/seatunnel/connectors/seatunnel/common/source/AbstractSingleSplitReader.java +++ b/seatunnel-connectors-v2/connector-common/src/main/java/org/apache/seatunnel/connectors/seatunnel/common/source/AbstractSingleSplitReader.java @@ -26,13 +26,11 @@ public abstract class AbstractSingleSplitReader implements SourceReader { - protected final Object lock = new Object(); - protected volatile boolean noMoreSplits = false; @Override public void pollNext(Collector output) throws Exception { - synchronized (lock) { + synchronized (output.getCheckpointLock()) { if (noMoreSplits) { return; } diff --git a/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/backend/BackendClient.java b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/backend/BackendClient.java index 31bdb2a78e7..04f96d2d607 100644 --- a/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/backend/BackendClient.java +++ b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/backend/BackendClient.java @@ -25,7 +25,7 @@ import org.apache.seatunnel.shade.org.apache.thrift.transport.TTransport; import org.apache.seatunnel.shade.org.apache.thrift.transport.TTransportException; -import org.apache.seatunnel.connectors.doris.config.DorisConfig; +import org.apache.seatunnel.connectors.doris.config.DorisSourceConfig; import org.apache.seatunnel.connectors.doris.exception.DorisConnectorErrorCode; import org.apache.seatunnel.connectors.doris.exception.DorisConnectorException; import org.apache.seatunnel.connectors.doris.source.serialization.Routing; @@ -55,7 +55,7 @@ public class BackendClient { private final int socketTimeout; private final int connectTimeout; - public BackendClient(Routing routing, DorisConfig readOptions) { + public BackendClient(Routing routing, DorisSourceConfig readOptions) { this.routing = routing; this.connectTimeout = readOptions.getRequestConnectTimeoutMs(); this.socketTimeout = readOptions.getRequestReadTimeoutMs(); diff --git a/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/catalog/DorisCatalog.java b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/catalog/DorisCatalog.java index a7f5eabf63d..324200e5e4d 100644 --- a/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/catalog/DorisCatalog.java +++ b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/catalog/DorisCatalog.java @@ -37,7 +37,6 @@ import org.apache.seatunnel.common.exception.CommonError; import org.apache.seatunnel.common.exception.CommonErrorCode; import org.apache.seatunnel.common.exception.SeaTunnelRuntimeException; -import org.apache.seatunnel.connectors.doris.config.DorisConfig; import org.apache.seatunnel.connectors.doris.config.DorisOptions; import org.apache.seatunnel.connectors.doris.datatype.DorisTypeConverterFactory; import org.apache.seatunnel.connectors.doris.datatype.DorisTypeConverterV2; @@ -85,7 +84,7 @@ public class DorisCatalog implements Catalog { private Connection conn; - private DorisConfig dorisConfig; + private String createTableTemplate; private String dorisVersion; @@ -110,9 +109,9 @@ public DorisCatalog( Integer queryPort, String username, String password, - DorisConfig config) { + String createTableTemplate) { this(catalogName, frontEndNodes, queryPort, username, password); - this.dorisConfig = config; + this.createTableTemplate = createTableTemplate; } public DorisCatalog( @@ -121,9 +120,9 @@ public DorisCatalog( Integer queryPort, String username, String password, - DorisConfig config, + String createTableTemplate, String defaultDatabase) { - this(catalogName, frontEndNodes, queryPort, username, password, config); + this(catalogName, frontEndNodes, queryPort, username, password, createTableTemplate); this.defaultDatabase = defaultDatabase; } @@ -414,7 +413,7 @@ public void createTable(TablePath tablePath, CatalogTable table, boolean ignoreI String stmt = DorisCatalogUtil.getCreateTableStatement( - dorisConfig.getCreateTableTemplate(), tablePath, table, typeConverter); + createTableTemplate, tablePath, table, typeConverter); try (Statement statement = conn.createStatement()) { statement.execute(stmt); } catch (SQLException e) { @@ -510,7 +509,7 @@ public PreviewResult previewAction( checkArgument(catalogTable.isPresent(), "CatalogTable cannot be null"); return new SQLPreviewResult( DorisCatalogUtil.getCreateTableStatement( - dorisConfig.getCreateTableTemplate(), + createTableTemplate, tablePath, catalogTable.get(), // used for test when typeConverter is null diff --git a/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/catalog/DorisCatalogFactory.java b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/catalog/DorisCatalogFactory.java index 1071b52f05a..7fd1da603e2 100644 --- a/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/catalog/DorisCatalogFactory.java +++ b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/catalog/DorisCatalogFactory.java @@ -22,11 +22,14 @@ import org.apache.seatunnel.api.table.catalog.Catalog; import org.apache.seatunnel.api.table.factory.CatalogFactory; import org.apache.seatunnel.api.table.factory.Factory; -import org.apache.seatunnel.connectors.doris.config.DorisConfig; import org.apache.seatunnel.connectors.doris.config.DorisOptions; +import org.apache.seatunnel.connectors.doris.config.DorisSinkOptions; import com.google.auto.service.AutoService; +import static org.apache.seatunnel.connectors.doris.config.DorisOptions.IDENTIFIER; +import static org.apache.seatunnel.connectors.doris.config.DorisSinkOptions.SAVE_MODE_CREATE_TEMPLATE; + @AutoService(Factory.class) public class DorisCatalogFactory implements CatalogFactory { @@ -38,13 +41,13 @@ public Catalog createCatalog(String catalogName, ReadonlyConfig options) { options.get(DorisOptions.QUERY_PORT), options.get(DorisOptions.USERNAME), options.get(DorisOptions.PASSWORD), - DorisConfig.of(options), - options.get(DorisOptions.DEFAULT_DATABASE)); + options.get(SAVE_MODE_CREATE_TEMPLATE), + options.get(DorisSinkOptions.DEFAULT_DATABASE)); } @Override public String factoryIdentifier() { - return DorisConfig.IDENTIFIER; + return IDENTIFIER; } @Override diff --git a/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/config/DorisConfig.java b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/config/DorisConfig.java deleted file mode 100644 index f7155e8a647..00000000000 --- a/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/config/DorisConfig.java +++ /dev/null @@ -1,160 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.seatunnel.connectors.doris.config; - -import org.apache.seatunnel.shade.com.typesafe.config.Config; - -import org.apache.seatunnel.api.configuration.ReadonlyConfig; - -import lombok.Getter; -import lombok.Setter; -import lombok.ToString; - -import java.io.Serializable; -import java.util.Map; -import java.util.Properties; - -import static org.apache.seatunnel.connectors.doris.config.DorisOptions.DATABASE; -import static org.apache.seatunnel.connectors.doris.config.DorisOptions.DORIS_BATCH_SIZE; -import static org.apache.seatunnel.connectors.doris.config.DorisOptions.DORIS_DESERIALIZE_ARROW_ASYNC; -import static org.apache.seatunnel.connectors.doris.config.DorisOptions.DORIS_DESERIALIZE_QUEUE_SIZE; -import static org.apache.seatunnel.connectors.doris.config.DorisOptions.DORIS_EXEC_MEM_LIMIT; -import static org.apache.seatunnel.connectors.doris.config.DorisOptions.DORIS_FILTER_QUERY; -import static org.apache.seatunnel.connectors.doris.config.DorisOptions.DORIS_READ_FIELD; -import static org.apache.seatunnel.connectors.doris.config.DorisOptions.DORIS_REQUEST_CONNECT_TIMEOUT_MS; -import static org.apache.seatunnel.connectors.doris.config.DorisOptions.DORIS_REQUEST_QUERY_TIMEOUT_S; -import static org.apache.seatunnel.connectors.doris.config.DorisOptions.DORIS_REQUEST_READ_TIMEOUT_MS; -import static org.apache.seatunnel.connectors.doris.config.DorisOptions.DORIS_REQUEST_RETRIES; -import static org.apache.seatunnel.connectors.doris.config.DorisOptions.DORIS_SINK_CONFIG_PREFIX; -import static org.apache.seatunnel.connectors.doris.config.DorisOptions.DORIS_TABLET_SIZE; -import static org.apache.seatunnel.connectors.doris.config.DorisOptions.FENODES; -import static org.apache.seatunnel.connectors.doris.config.DorisOptions.NEEDS_UNSUPPORTED_TYPE_CASTING; -import static org.apache.seatunnel.connectors.doris.config.DorisOptions.PASSWORD; -import static org.apache.seatunnel.connectors.doris.config.DorisOptions.QUERY_PORT; -import static org.apache.seatunnel.connectors.doris.config.DorisOptions.SAVE_MODE_CREATE_TEMPLATE; -import static org.apache.seatunnel.connectors.doris.config.DorisOptions.SINK_BUFFER_COUNT; -import static org.apache.seatunnel.connectors.doris.config.DorisOptions.SINK_BUFFER_SIZE; -import static org.apache.seatunnel.connectors.doris.config.DorisOptions.SINK_CHECK_INTERVAL; -import static org.apache.seatunnel.connectors.doris.config.DorisOptions.SINK_ENABLE_2PC; -import static org.apache.seatunnel.connectors.doris.config.DorisOptions.SINK_ENABLE_DELETE; -import static org.apache.seatunnel.connectors.doris.config.DorisOptions.SINK_LABEL_PREFIX; -import static org.apache.seatunnel.connectors.doris.config.DorisOptions.SINK_MAX_RETRIES; -import static org.apache.seatunnel.connectors.doris.config.DorisOptions.TABLE; -import static org.apache.seatunnel.connectors.doris.config.DorisOptions.USERNAME; - -@Setter -@Getter -@ToString -public class DorisConfig implements Serializable { - - public static final String IDENTIFIER = "Doris"; - - // common option - private String frontends; - private String database; - private String table; - private String username; - private String password; - private Integer queryPort; - private int batchSize; - - // source option - private String readField; - private String filterQuery; - private Integer tabletSize; - private Integer requestConnectTimeoutMs; - private Integer requestReadTimeoutMs; - private Integer requestQueryTimeoutS; - private Integer requestRetries; - private Boolean deserializeArrowAsync; - private int deserializeQueueSize; - private Long execMemLimit; - private boolean useOldApi; - - // sink option - private Boolean enable2PC; - private Boolean enableDelete; - private String labelPrefix; - private Integer checkInterval; - private Integer maxRetries; - private Integer bufferSize; - private Integer bufferCount; - private Properties streamLoadProps; - private boolean needsUnsupportedTypeCasting; - - // create table option - private String createTableTemplate; - - public static DorisConfig of(Config pluginConfig) { - return of(ReadonlyConfig.fromConfig(pluginConfig)); - } - - public static DorisConfig of(ReadonlyConfig config) { - - DorisConfig dorisConfig = new DorisConfig(); - - // common option - dorisConfig.setFrontends(config.get(FENODES)); - dorisConfig.setUsername(config.get(USERNAME)); - dorisConfig.setPassword(config.get(PASSWORD)); - dorisConfig.setQueryPort(config.get(QUERY_PORT)); - dorisConfig.setStreamLoadProps(parseStreamLoadProperties(config)); - dorisConfig.setDatabase(config.get(DATABASE)); - dorisConfig.setTable(config.get(TABLE)); - - // source option - dorisConfig.setReadField(config.get(DORIS_READ_FIELD)); - dorisConfig.setFilterQuery(config.get(DORIS_FILTER_QUERY)); - dorisConfig.setTabletSize(config.get(DORIS_TABLET_SIZE)); - dorisConfig.setRequestConnectTimeoutMs(config.get(DORIS_REQUEST_CONNECT_TIMEOUT_MS)); - dorisConfig.setRequestQueryTimeoutS(config.get(DORIS_REQUEST_QUERY_TIMEOUT_S)); - dorisConfig.setRequestReadTimeoutMs(config.get(DORIS_REQUEST_READ_TIMEOUT_MS)); - dorisConfig.setRequestRetries(config.get(DORIS_REQUEST_RETRIES)); - dorisConfig.setDeserializeArrowAsync(config.get(DORIS_DESERIALIZE_ARROW_ASYNC)); - dorisConfig.setDeserializeQueueSize(config.get(DORIS_DESERIALIZE_QUEUE_SIZE)); - dorisConfig.setBatchSize(config.get(DORIS_BATCH_SIZE)); - dorisConfig.setExecMemLimit(config.get(DORIS_EXEC_MEM_LIMIT)); - - // sink option - dorisConfig.setEnable2PC(config.get(SINK_ENABLE_2PC)); - dorisConfig.setLabelPrefix(config.get(SINK_LABEL_PREFIX)); - dorisConfig.setCheckInterval(config.get(SINK_CHECK_INTERVAL)); - dorisConfig.setMaxRetries(config.get(SINK_MAX_RETRIES)); - dorisConfig.setBufferSize(config.get(SINK_BUFFER_SIZE)); - dorisConfig.setBufferCount(config.get(SINK_BUFFER_COUNT)); - dorisConfig.setEnableDelete(config.get(SINK_ENABLE_DELETE)); - dorisConfig.setNeedsUnsupportedTypeCasting(config.get(NEEDS_UNSUPPORTED_TYPE_CASTING)); - - // create table option - dorisConfig.setCreateTableTemplate(config.get(SAVE_MODE_CREATE_TEMPLATE)); - - return dorisConfig; - } - - private static Properties parseStreamLoadProperties(ReadonlyConfig config) { - Properties streamLoadProps = new Properties(); - if (config.getOptional(DORIS_SINK_CONFIG_PREFIX).isPresent()) { - Map map = config.getOptional(DORIS_SINK_CONFIG_PREFIX).get(); - map.forEach( - (key, value) -> { - streamLoadProps.put(key.toLowerCase(), value); - }); - } - return streamLoadProps; - } -} diff --git a/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/config/DorisOptions.java b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/config/DorisOptions.java index ddf1195b6ed..bcdf24c9d7b 100644 --- a/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/config/DorisOptions.java +++ b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/config/DorisOptions.java @@ -20,32 +20,12 @@ import org.apache.seatunnel.api.configuration.Option; import org.apache.seatunnel.api.configuration.Options; import org.apache.seatunnel.api.configuration.util.OptionRule; -import org.apache.seatunnel.api.sink.DataSaveMode; -import org.apache.seatunnel.api.sink.SaveModePlaceHolder; -import org.apache.seatunnel.api.sink.SchemaSaveMode; - -import java.util.Map; - -import static org.apache.seatunnel.api.sink.SinkCommonOptions.MULTI_TABLE_SINK_REPLICA; public interface DorisOptions { - int DORIS_TABLET_SIZE_MIN = 1; - int DORIS_TABLET_SIZE_DEFAULT = Integer.MAX_VALUE; - int DORIS_REQUEST_CONNECT_TIMEOUT_MS_DEFAULT = 30 * 1000; - int DORIS_REQUEST_READ_TIMEOUT_MS_DEFAULT = 30 * 1000; - int DORIS_REQUEST_QUERY_TIMEOUT_S_DEFAULT = 3600; - int DORIS_REQUEST_RETRIES_DEFAULT = 3; - Boolean DORIS_DESERIALIZE_ARROW_ASYNC_DEFAULT = false; - int DORIS_DESERIALIZE_QUEUE_SIZE_DEFAULT = 64; - int DORIS_BATCH_SIZE_DEFAULT = 1024; - long DORIS_EXEC_MEM_LIMIT_DEFAULT = 2147483648L; - int DEFAULT_SINK_CHECK_INTERVAL = 10000; - int DEFAULT_SINK_MAX_RETRIES = 3; - int DEFAULT_SINK_BUFFER_SIZE = 256 * 1024; - int DEFAULT_SINK_BUFFER_COUNT = 3; - + String IDENTIFIER = "Doris"; String DORIS_DEFAULT_CLUSTER = "default_cluster"; + int DORIS_BATCH_SIZE_DEFAULT = 1024; // common option Option FENODES = @@ -72,6 +52,7 @@ public interface DorisOptions { .stringType() .noDefaultValue() .withDescription("the doris user name."); + Option PASSWORD = Options.key("password") .stringType() @@ -79,202 +60,17 @@ public interface DorisOptions { .withDescription("the doris password."); Option TABLE = - Options.key("table") - .stringType() - .noDefaultValue() - .withDescription("the doris table name."); + Options.key("table").stringType().noDefaultValue().withDescription("table"); + Option DATABASE = - Options.key("database") - .stringType() - .noDefaultValue() - .withDescription("the doris database name."); + Options.key("database").stringType().noDefaultValue().withDescription("database"); + Option DORIS_BATCH_SIZE = Options.key("doris.batch.size") .intType() .defaultValue(DORIS_BATCH_SIZE_DEFAULT) .withDescription("the batch size of the doris read/write."); - // source config options - Option DORIS_READ_FIELD = - Options.key("doris.read.field") - .stringType() - .noDefaultValue() - .withDescription( - "List of column names in the Doris table, separated by commas"); - Option DORIS_FILTER_QUERY = - Options.key("doris.filter.query") - .stringType() - .noDefaultValue() - .withDescription( - "Filter expression of the query, which is transparently transmitted to Doris. Doris uses this expression to complete source-side data filtering"); - Option DORIS_TABLET_SIZE = - Options.key("doris.request.tablet.size") - .intType() - .defaultValue(DORIS_TABLET_SIZE_DEFAULT) - .withDescription(""); - Option DORIS_REQUEST_CONNECT_TIMEOUT_MS = - Options.key("doris.request.connect.timeout.ms") - .intType() - .defaultValue(DORIS_REQUEST_CONNECT_TIMEOUT_MS_DEFAULT) - .withDescription(""); - Option DORIS_REQUEST_READ_TIMEOUT_MS = - Options.key("doris.request.read.timeout.ms") - .intType() - .defaultValue(DORIS_REQUEST_READ_TIMEOUT_MS_DEFAULT) - .withDescription(""); - Option DORIS_REQUEST_QUERY_TIMEOUT_S = - Options.key("doris.request.query.timeout.s") - .intType() - .defaultValue(DORIS_REQUEST_QUERY_TIMEOUT_S_DEFAULT) - .withDescription(""); - Option DORIS_REQUEST_RETRIES = - Options.key("doris.request.retries") - .intType() - .defaultValue(DORIS_REQUEST_RETRIES_DEFAULT) - .withDescription(""); - Option DORIS_DESERIALIZE_ARROW_ASYNC = - Options.key("doris.deserialize.arrow.async") - .booleanType() - .defaultValue(DORIS_DESERIALIZE_ARROW_ASYNC_DEFAULT) - .withDescription(""); - Option DORIS_DESERIALIZE_QUEUE_SIZE = - Options.key("doris.request.retriesdoris.deserialize.queue.size") - .intType() - .defaultValue(DORIS_DESERIALIZE_QUEUE_SIZE_DEFAULT) - .withDescription(""); - - Option DORIS_EXEC_MEM_LIMIT = - Options.key("doris.exec.mem.limit") - .longType() - .defaultValue(DORIS_EXEC_MEM_LIMIT_DEFAULT) - .withDescription(""); - - // sink config options - Option SINK_ENABLE_2PC = - Options.key("sink.enable-2pc") - .booleanType() - .defaultValue(false) - .withDescription("enable 2PC while loading"); - - Option SINK_CHECK_INTERVAL = - Options.key("sink.check-interval") - .intType() - .defaultValue(DEFAULT_SINK_CHECK_INTERVAL) - .withDescription("check exception with the interval while loading"); - Option SINK_MAX_RETRIES = - Options.key("sink.max-retries") - .intType() - .defaultValue(DEFAULT_SINK_MAX_RETRIES) - .withDescription("the max retry times if writing records to database failed."); - Option SINK_BUFFER_SIZE = - Options.key("sink.buffer-size") - .intType() - .defaultValue(DEFAULT_SINK_BUFFER_SIZE) - .withDescription("the buffer size to cache data for stream load."); - Option SINK_BUFFER_COUNT = - Options.key("sink.buffer-count") - .intType() - .defaultValue(DEFAULT_SINK_BUFFER_COUNT) - .withDescription("the buffer count to cache data for stream load."); - Option SINK_LABEL_PREFIX = - Options.key("sink.label-prefix") - .stringType() - .defaultValue("") - .withDescription("the unique label prefix."); - Option SINK_ENABLE_DELETE = - Options.key("sink.enable-delete") - .booleanType() - .defaultValue(false) - .withDescription("whether to enable the delete function"); - - Option> DORIS_SINK_CONFIG_PREFIX = - Options.key("doris.config") - .mapType() - .noDefaultValue() - .withDescription( - "The parameter of the Stream Load data_desc. " - + "The way to specify the parameter is to add the prefix `doris.config` to the original load parameter name "); - - Option DEFAULT_DATABASE = - Options.key("default-database") - .stringType() - .defaultValue("information_schema") - .withDescription(""); - - Option SCHEMA_SAVE_MODE = - Options.key("schema_save_mode") - .enumType(SchemaSaveMode.class) - .defaultValue(SchemaSaveMode.CREATE_SCHEMA_WHEN_NOT_EXIST) - .withDescription("schema_save_mode"); - - Option DATA_SAVE_MODE = - Options.key("data_save_mode") - .enumType(DataSaveMode.class) - .defaultValue(DataSaveMode.APPEND_DATA) - .withDescription("data_save_mode"); - - Option CUSTOM_SQL = - Options.key("custom_sql").stringType().noDefaultValue().withDescription("custom_sql"); - - Option NEEDS_UNSUPPORTED_TYPE_CASTING = - Options.key("needs_unsupported_type_casting") - .booleanType() - .defaultValue(false) - .withDescription( - "Whether to enable the unsupported type casting, such as Decimal64 to Double"); - - // create table - Option SAVE_MODE_CREATE_TEMPLATE = - Options.key("save_mode_create_template") - .stringType() - .defaultValue( - "CREATE TABLE IF NOT EXISTS `" - + SaveModePlaceHolder.DATABASE.getPlaceHolder() - + "`.`" - + SaveModePlaceHolder.TABLE.getPlaceHolder() - + "` (\n" - + SaveModePlaceHolder.ROWTYPE_PRIMARY_KEY.getPlaceHolder() - + ",\n" - + SaveModePlaceHolder.ROWTYPE_FIELDS.getPlaceHolder() - + "\n" - + ") ENGINE=OLAP\n" - + " UNIQUE KEY (" - + SaveModePlaceHolder.ROWTYPE_PRIMARY_KEY.getPlaceHolder() - + ")\n" - + "DISTRIBUTED BY HASH (" - + SaveModePlaceHolder.ROWTYPE_PRIMARY_KEY.getPlaceHolder() - + ")\n " - + "PROPERTIES (\n" - + "\"replication_allocation\" = \"tag.location.default: 1\",\n" - + "\"in_memory\" = \"false\",\n" - + "\"storage_format\" = \"V2\",\n" - + "\"disable_auto_compaction\" = \"false\"\n" - + ")") - .withDescription("Create table statement template, used to create Doris table"); - - OptionRule.Builder SINK_RULE = - OptionRule.builder() - .required( - FENODES, - USERNAME, - PASSWORD, - SINK_LABEL_PREFIX, - DORIS_SINK_CONFIG_PREFIX, - DATA_SAVE_MODE, - SCHEMA_SAVE_MODE) - .optional( - DATABASE, - TABLE, - TABLE_IDENTIFIER, - QUERY_PORT, - DORIS_BATCH_SIZE, - SINK_ENABLE_2PC, - SINK_ENABLE_DELETE, - MULTI_TABLE_SINK_REPLICA, - SAVE_MODE_CREATE_TEMPLATE, - NEEDS_UNSUPPORTED_TYPE_CASTING) - .conditional(DATA_SAVE_MODE, DataSaveMode.CUSTOM_PROCESSING, CUSTOM_SQL); - OptionRule.Builder CATALOG_RULE = OptionRule.builder().required(FENODES, QUERY_PORT, USERNAME, PASSWORD); } diff --git a/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/config/DorisSinkConfig.java b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/config/DorisSinkConfig.java new file mode 100644 index 00000000000..8f0d948042f --- /dev/null +++ b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/config/DorisSinkConfig.java @@ -0,0 +1,123 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.doris.config; + +import org.apache.seatunnel.shade.com.typesafe.config.Config; + +import org.apache.seatunnel.api.configuration.ReadonlyConfig; + +import lombok.Getter; +import lombok.Setter; +import lombok.ToString; + +import java.io.Serializable; +import java.util.Map; +import java.util.Properties; + +import static org.apache.seatunnel.connectors.doris.config.DorisOptions.DATABASE; +import static org.apache.seatunnel.connectors.doris.config.DorisOptions.DORIS_BATCH_SIZE; +import static org.apache.seatunnel.connectors.doris.config.DorisOptions.FENODES; +import static org.apache.seatunnel.connectors.doris.config.DorisOptions.PASSWORD; +import static org.apache.seatunnel.connectors.doris.config.DorisOptions.QUERY_PORT; +import static org.apache.seatunnel.connectors.doris.config.DorisOptions.TABLE; +import static org.apache.seatunnel.connectors.doris.config.DorisOptions.USERNAME; +import static org.apache.seatunnel.connectors.doris.config.DorisSinkOptions.DORIS_SINK_CONFIG_PREFIX; +import static org.apache.seatunnel.connectors.doris.config.DorisSinkOptions.NEEDS_UNSUPPORTED_TYPE_CASTING; +import static org.apache.seatunnel.connectors.doris.config.DorisSinkOptions.SAVE_MODE_CREATE_TEMPLATE; +import static org.apache.seatunnel.connectors.doris.config.DorisSinkOptions.SINK_BUFFER_COUNT; +import static org.apache.seatunnel.connectors.doris.config.DorisSinkOptions.SINK_BUFFER_SIZE; +import static org.apache.seatunnel.connectors.doris.config.DorisSinkOptions.SINK_CHECK_INTERVAL; +import static org.apache.seatunnel.connectors.doris.config.DorisSinkOptions.SINK_ENABLE_2PC; +import static org.apache.seatunnel.connectors.doris.config.DorisSinkOptions.SINK_ENABLE_DELETE; +import static org.apache.seatunnel.connectors.doris.config.DorisSinkOptions.SINK_LABEL_PREFIX; +import static org.apache.seatunnel.connectors.doris.config.DorisSinkOptions.SINK_MAX_RETRIES; + +@Setter +@Getter +@ToString +public class DorisSinkConfig implements Serializable { + + // common option + private String frontends; + private String database; + private String table; + private String username; + private String password; + private Integer queryPort; + private int batchSize; + + // sink option + private Boolean enable2PC; + private Boolean enableDelete; + private String labelPrefix; + private Integer checkInterval; + private Integer maxRetries; + private Integer bufferSize; + private Integer bufferCount; + private Properties streamLoadProps; + private boolean needsUnsupportedTypeCasting; + + // create table option + private String createTableTemplate; + + public static DorisSinkConfig of(Config pluginConfig) { + return of(ReadonlyConfig.fromConfig(pluginConfig)); + } + + public static DorisSinkConfig of(ReadonlyConfig config) { + + DorisSinkConfig dorisSinkConfig = new DorisSinkConfig(); + + // common option + dorisSinkConfig.setFrontends(config.get(FENODES)); + dorisSinkConfig.setUsername(config.get(USERNAME)); + dorisSinkConfig.setPassword(config.get(PASSWORD)); + dorisSinkConfig.setQueryPort(config.get(QUERY_PORT)); + dorisSinkConfig.setStreamLoadProps(parseStreamLoadProperties(config)); + dorisSinkConfig.setDatabase(config.get(DATABASE)); + dorisSinkConfig.setTable(config.get(TABLE)); + dorisSinkConfig.setBatchSize(config.get(DORIS_BATCH_SIZE)); + + // sink option + dorisSinkConfig.setEnable2PC(config.get(SINK_ENABLE_2PC)); + dorisSinkConfig.setLabelPrefix(config.get(SINK_LABEL_PREFIX)); + dorisSinkConfig.setCheckInterval(config.get(SINK_CHECK_INTERVAL)); + dorisSinkConfig.setMaxRetries(config.get(SINK_MAX_RETRIES)); + dorisSinkConfig.setBufferSize(config.get(SINK_BUFFER_SIZE)); + dorisSinkConfig.setBufferCount(config.get(SINK_BUFFER_COUNT)); + dorisSinkConfig.setEnableDelete(config.get(SINK_ENABLE_DELETE)); + dorisSinkConfig.setNeedsUnsupportedTypeCasting(config.get(NEEDS_UNSUPPORTED_TYPE_CASTING)); + + // create table option + dorisSinkConfig.setCreateTableTemplate(config.get(SAVE_MODE_CREATE_TEMPLATE)); + + return dorisSinkConfig; + } + + private static Properties parseStreamLoadProperties(ReadonlyConfig config) { + Properties streamLoadProps = new Properties(); + if (config.getOptional(DORIS_SINK_CONFIG_PREFIX).isPresent()) { + Map map = config.getOptional(DORIS_SINK_CONFIG_PREFIX).get(); + map.forEach( + (key, value) -> { + streamLoadProps.put(key.toLowerCase(), value); + }); + } + return streamLoadProps; + } +} diff --git a/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/config/DorisSinkOptions.java b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/config/DorisSinkOptions.java new file mode 100644 index 00000000000..372418d12a4 --- /dev/null +++ b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/config/DorisSinkOptions.java @@ -0,0 +1,170 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.doris.config; + +import org.apache.seatunnel.api.configuration.Option; +import org.apache.seatunnel.api.configuration.Options; +import org.apache.seatunnel.api.configuration.util.OptionRule; +import org.apache.seatunnel.api.sink.DataSaveMode; +import org.apache.seatunnel.api.sink.SaveModePlaceHolder; +import org.apache.seatunnel.api.sink.SchemaSaveMode; + +import java.util.Map; + +import static org.apache.seatunnel.api.sink.SinkCommonOptions.MULTI_TABLE_SINK_REPLICA; +import static org.apache.seatunnel.connectors.doris.config.DorisOptions.DATABASE; +import static org.apache.seatunnel.connectors.doris.config.DorisOptions.DORIS_BATCH_SIZE; +import static org.apache.seatunnel.connectors.doris.config.DorisOptions.FENODES; +import static org.apache.seatunnel.connectors.doris.config.DorisOptions.PASSWORD; +import static org.apache.seatunnel.connectors.doris.config.DorisOptions.QUERY_PORT; +import static org.apache.seatunnel.connectors.doris.config.DorisOptions.TABLE; +import static org.apache.seatunnel.connectors.doris.config.DorisOptions.TABLE_IDENTIFIER; +import static org.apache.seatunnel.connectors.doris.config.DorisOptions.USERNAME; + +public interface DorisSinkOptions { + + int DEFAULT_SINK_CHECK_INTERVAL = 10000; + int DEFAULT_SINK_MAX_RETRIES = 3; + int DEFAULT_SINK_BUFFER_SIZE = 256 * 1024; + int DEFAULT_SINK_BUFFER_COUNT = 3; + + Option SINK_ENABLE_2PC = + Options.key("sink.enable-2pc") + .booleanType() + .defaultValue(false) + .withDescription("enable 2PC while loading"); + + Option SINK_CHECK_INTERVAL = + Options.key("sink.check-interval") + .intType() + .defaultValue(DEFAULT_SINK_CHECK_INTERVAL) + .withDescription("check exception with the interval while loading"); + Option SINK_MAX_RETRIES = + Options.key("sink.max-retries") + .intType() + .defaultValue(DEFAULT_SINK_MAX_RETRIES) + .withDescription("the max retry times if writing records to database failed."); + Option SINK_BUFFER_SIZE = + Options.key("sink.buffer-size") + .intType() + .defaultValue(DEFAULT_SINK_BUFFER_SIZE) + .withDescription("the buffer size to cache data for stream load."); + Option SINK_BUFFER_COUNT = + Options.key("sink.buffer-count") + .intType() + .defaultValue(DEFAULT_SINK_BUFFER_COUNT) + .withDescription("the buffer count to cache data for stream load."); + Option SINK_LABEL_PREFIX = + Options.key("sink.label-prefix") + .stringType() + .defaultValue("") + .withDescription("the unique label prefix."); + Option SINK_ENABLE_DELETE = + Options.key("sink.enable-delete") + .booleanType() + .defaultValue(false) + .withDescription("whether to enable the delete function"); + + Option> DORIS_SINK_CONFIG_PREFIX = + Options.key("doris.config") + .mapType() + .noDefaultValue() + .withDescription( + "The parameter of the Stream Load data_desc. " + + "The way to specify the parameter is to add the prefix `doris.config` to the original load parameter name "); + + Option DEFAULT_DATABASE = + Options.key("default-database") + .stringType() + .defaultValue("information_schema") + .withDescription(""); + + Option SCHEMA_SAVE_MODE = + Options.key("schema_save_mode") + .enumType(SchemaSaveMode.class) + .defaultValue(SchemaSaveMode.CREATE_SCHEMA_WHEN_NOT_EXIST) + .withDescription("schema_save_mode"); + + Option DATA_SAVE_MODE = + Options.key("data_save_mode") + .enumType(DataSaveMode.class) + .defaultValue(DataSaveMode.APPEND_DATA) + .withDescription("data_save_mode"); + + Option CUSTOM_SQL = + Options.key("custom_sql").stringType().noDefaultValue().withDescription("custom_sql"); + + Option NEEDS_UNSUPPORTED_TYPE_CASTING = + Options.key("needs_unsupported_type_casting") + .booleanType() + .defaultValue(false) + .withDescription( + "Whether to enable the unsupported type casting, such as Decimal64 to Double"); + + // create table + Option SAVE_MODE_CREATE_TEMPLATE = + Options.key("save_mode_create_template") + .stringType() + .defaultValue( + "CREATE TABLE IF NOT EXISTS `" + + SaveModePlaceHolder.DATABASE.getPlaceHolder() + + "`.`" + + SaveModePlaceHolder.TABLE.getPlaceHolder() + + "` (\n" + + SaveModePlaceHolder.ROWTYPE_PRIMARY_KEY.getPlaceHolder() + + ",\n" + + SaveModePlaceHolder.ROWTYPE_FIELDS.getPlaceHolder() + + "\n" + + ") ENGINE=OLAP\n" + + " UNIQUE KEY (" + + SaveModePlaceHolder.ROWTYPE_PRIMARY_KEY.getPlaceHolder() + + ")\n" + + "DISTRIBUTED BY HASH (" + + SaveModePlaceHolder.ROWTYPE_PRIMARY_KEY.getPlaceHolder() + + ")\n " + + "PROPERTIES (\n" + + "\"replication_allocation\" = \"tag.location.default: 1\",\n" + + "\"in_memory\" = \"false\",\n" + + "\"storage_format\" = \"V2\",\n" + + "\"disable_auto_compaction\" = \"false\"\n" + + ")") + .withDescription("Create table statement template, used to create Doris table"); + + OptionRule.Builder SINK_RULE = + OptionRule.builder() + .required( + FENODES, + USERNAME, + PASSWORD, + SINK_LABEL_PREFIX, + DORIS_SINK_CONFIG_PREFIX, + DATA_SAVE_MODE, + SCHEMA_SAVE_MODE) + .optional( + DATABASE, + TABLE, + TABLE_IDENTIFIER, + QUERY_PORT, + DORIS_BATCH_SIZE, + SINK_ENABLE_2PC, + SINK_ENABLE_DELETE, + MULTI_TABLE_SINK_REPLICA, + SAVE_MODE_CREATE_TEMPLATE, + NEEDS_UNSUPPORTED_TYPE_CASTING) + .conditional(DATA_SAVE_MODE, DataSaveMode.CUSTOM_PROCESSING, CUSTOM_SQL); +} diff --git a/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/config/DorisSourceConfig.java b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/config/DorisSourceConfig.java new file mode 100644 index 00000000000..999f8fbfeaa --- /dev/null +++ b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/config/DorisSourceConfig.java @@ -0,0 +1,71 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.doris.config; + +import org.apache.seatunnel.api.configuration.ReadonlyConfig; + +import lombok.Data; +import lombok.experimental.SuperBuilder; + +import java.io.Serializable; +import java.util.List; + +import static org.apache.seatunnel.connectors.doris.config.DorisOptions.FENODES; +import static org.apache.seatunnel.connectors.doris.config.DorisOptions.PASSWORD; +import static org.apache.seatunnel.connectors.doris.config.DorisOptions.QUERY_PORT; +import static org.apache.seatunnel.connectors.doris.config.DorisOptions.USERNAME; +import static org.apache.seatunnel.connectors.doris.config.DorisSourceOptions.DORIS_DESERIALIZE_ARROW_ASYNC; +import static org.apache.seatunnel.connectors.doris.config.DorisSourceOptions.DORIS_DESERIALIZE_QUEUE_SIZE; +import static org.apache.seatunnel.connectors.doris.config.DorisSourceOptions.DORIS_REQUEST_CONNECT_TIMEOUT_MS; +import static org.apache.seatunnel.connectors.doris.config.DorisSourceOptions.DORIS_REQUEST_QUERY_TIMEOUT_S; +import static org.apache.seatunnel.connectors.doris.config.DorisSourceOptions.DORIS_REQUEST_READ_TIMEOUT_MS; +import static org.apache.seatunnel.connectors.doris.config.DorisSourceOptions.DORIS_REQUEST_RETRIES; + +@Data +@SuperBuilder +public class DorisSourceConfig implements Serializable { + + private String frontends; + private Integer queryPort; + private String username; + private String password; + private Integer requestConnectTimeoutMs; + private Integer requestReadTimeoutMs; + private Integer requestQueryTimeoutS; + private Integer requestRetries; + private Boolean deserializeArrowAsync; + private int deserializeQueueSize; + private boolean useOldApi; + private List tableConfigList; + + public static DorisSourceConfig of(ReadonlyConfig config) { + DorisSourceConfigBuilder builder = DorisSourceConfig.builder(); + builder.tableConfigList(DorisTableConfig.of(config)); + builder.frontends(config.get(FENODES)); + builder.queryPort(config.get(QUERY_PORT)); + builder.username(config.get(USERNAME)); + builder.password(config.get(PASSWORD)); + builder.requestConnectTimeoutMs(config.get(DORIS_REQUEST_CONNECT_TIMEOUT_MS)); + builder.requestReadTimeoutMs(config.get(DORIS_REQUEST_READ_TIMEOUT_MS)); + builder.requestQueryTimeoutS(config.get(DORIS_REQUEST_QUERY_TIMEOUT_S)); + builder.requestRetries(config.get(DORIS_REQUEST_RETRIES)); + builder.deserializeArrowAsync(config.get(DORIS_DESERIALIZE_ARROW_ASYNC)); + builder.deserializeQueueSize(config.get(DORIS_DESERIALIZE_QUEUE_SIZE)); + return builder.build(); + } +} diff --git a/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/config/DorisSourceOptions.java b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/config/DorisSourceOptions.java new file mode 100644 index 00000000000..2ee852ffccc --- /dev/null +++ b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/config/DorisSourceOptions.java @@ -0,0 +1,103 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.doris.config; + +import org.apache.seatunnel.api.configuration.Option; +import org.apache.seatunnel.api.configuration.Options; + +import java.util.List; + +public interface DorisSourceOptions { + + int DORIS_TABLET_SIZE_MIN = 1; + int DORIS_TABLET_SIZE_DEFAULT = Integer.MAX_VALUE; + int DORIS_REQUEST_CONNECT_TIMEOUT_MS_DEFAULT = 30 * 1000; + int DORIS_REQUEST_READ_TIMEOUT_MS_DEFAULT = 30 * 1000; + int DORIS_REQUEST_QUERY_TIMEOUT_S_DEFAULT = 3600; + int DORIS_REQUEST_RETRIES_DEFAULT = 3; + Boolean DORIS_DESERIALIZE_ARROW_ASYNC_DEFAULT = false; + int DORIS_DESERIALIZE_QUEUE_SIZE_DEFAULT = 64; + long DORIS_EXEC_MEM_LIMIT_DEFAULT = 2147483648L; + + Option> TABLE_LIST = + Options.key("table_list") + .listType(DorisTableConfig.class) + .noDefaultValue() + .withDescription("table list config."); + + Option DORIS_READ_FIELD = + Options.key("doris.read.field") + .stringType() + .noDefaultValue() + .withDescription( + "List of column names in the Doris table, separated by commas"); + Option DORIS_FILTER_QUERY = + Options.key("doris.filter.query") + .stringType() + .noDefaultValue() + .withDescription( + "Filter expression of the query, which is transparently transmitted to Doris. Doris uses this expression to complete source-side data filtering"); + + Option DORIS_TABLET_SIZE = + Options.key("doris.request.tablet.size") + .intType() + .defaultValue(DORIS_TABLET_SIZE_DEFAULT) + .withDescription(""); + + Option DORIS_REQUEST_CONNECT_TIMEOUT_MS = + Options.key("doris.request.connect.timeout.ms") + .intType() + .defaultValue(DORIS_REQUEST_CONNECT_TIMEOUT_MS_DEFAULT) + .withDescription(""); + + Option DORIS_REQUEST_READ_TIMEOUT_MS = + Options.key("doris.request.read.timeout.ms") + .intType() + .defaultValue(DORIS_REQUEST_READ_TIMEOUT_MS_DEFAULT) + .withDescription(""); + + Option DORIS_REQUEST_QUERY_TIMEOUT_S = + Options.key("doris.request.query.timeout.s") + .intType() + .defaultValue(DORIS_REQUEST_QUERY_TIMEOUT_S_DEFAULT) + .withDescription(""); + + Option DORIS_REQUEST_RETRIES = + Options.key("doris.request.retries") + .intType() + .defaultValue(DORIS_REQUEST_RETRIES_DEFAULT) + .withDescription(""); + + Option DORIS_DESERIALIZE_ARROW_ASYNC = + Options.key("doris.deserialize.arrow.async") + .booleanType() + .defaultValue(DORIS_DESERIALIZE_ARROW_ASYNC_DEFAULT) + .withDescription(""); + + Option DORIS_DESERIALIZE_QUEUE_SIZE = + Options.key("doris.request.retriesdoris.deserialize.queue.size") + .intType() + .defaultValue(DORIS_DESERIALIZE_QUEUE_SIZE_DEFAULT) + .withDescription(""); + + Option DORIS_EXEC_MEM_LIMIT = + Options.key("doris.exec.mem.limit") + .longType() + .defaultValue(DORIS_EXEC_MEM_LIMIT_DEFAULT) + .withDescription(""); +} diff --git a/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/config/DorisTableConfig.java b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/config/DorisTableConfig.java new file mode 100644 index 00000000000..624d25636b2 --- /dev/null +++ b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/config/DorisTableConfig.java @@ -0,0 +1,132 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.doris.config; + +import org.apache.seatunnel.shade.com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import org.apache.seatunnel.shade.com.fasterxml.jackson.annotation.JsonProperty; + +import org.apache.seatunnel.api.configuration.ReadonlyConfig; + +import org.apache.commons.lang3.StringUtils; + +import lombok.Builder; +import lombok.Data; +import lombok.experimental.Tolerate; + +import java.io.Serializable; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.stream.Collectors; + +import static org.apache.seatunnel.connectors.doris.config.DorisOptions.DATABASE; +import static org.apache.seatunnel.connectors.doris.config.DorisOptions.DORIS_BATCH_SIZE; +import static org.apache.seatunnel.connectors.doris.config.DorisOptions.TABLE; +import static org.apache.seatunnel.connectors.doris.config.DorisSourceOptions.DORIS_EXEC_MEM_LIMIT; +import static org.apache.seatunnel.connectors.doris.config.DorisSourceOptions.DORIS_FILTER_QUERY; +import static org.apache.seatunnel.connectors.doris.config.DorisSourceOptions.DORIS_READ_FIELD; +import static org.apache.seatunnel.connectors.doris.config.DorisSourceOptions.DORIS_TABLET_SIZE; +import static org.apache.seatunnel.connectors.doris.config.DorisSourceOptions.TABLE_LIST; + +@Data +@Builder +@JsonIgnoreProperties(ignoreUnknown = true) +public class DorisTableConfig implements Serializable { + + @JsonProperty("table") + private String table; + + @JsonProperty("database") + private String database; + + @JsonProperty("doris.read.field") + private String readField; + + @JsonProperty("doris.filter.query") + private String filterQuery; + + @JsonProperty("doris.batch.size") + private int batchSize; + + @JsonProperty("doris.request.tablet.size") + private int tabletSize; + + @JsonProperty("doris.exec.mem.limit") + private long execMemLimit; + + @Tolerate + public DorisTableConfig() {} + + public static List of(ReadonlyConfig connectorConfig) { + List tableList; + if (connectorConfig.getOptional(TABLE_LIST).isPresent()) { + tableList = connectorConfig.get(TABLE_LIST); + } else { + DorisTableConfig tableProperty = + DorisTableConfig.builder() + .table(connectorConfig.get(TABLE)) + .database(connectorConfig.get(DATABASE)) + .readField(connectorConfig.get(DORIS_READ_FIELD)) + .filterQuery(connectorConfig.get(DORIS_FILTER_QUERY)) + .batchSize(connectorConfig.get(DORIS_BATCH_SIZE)) + .tabletSize(connectorConfig.get(DORIS_TABLET_SIZE)) + .execMemLimit(connectorConfig.get(DORIS_EXEC_MEM_LIMIT)) + .build(); + tableList = Collections.singletonList(tableProperty); + } + + if (tableList.size() > 1) { + List tableIds = + tableList.stream() + .map(DorisTableConfig::getTableIdentifier) + .collect(Collectors.toList()); + Set tableIdSet = new HashSet<>(tableIds); + if (tableIdSet.size() < tableList.size() - 1) { + throw new IllegalArgumentException( + "Please configure unique `database`.`table`, not allow null/duplicate: " + + tableIds); + } + } + + for (DorisTableConfig dorisTableConfig : tableList) { + if (StringUtils.isBlank(dorisTableConfig.getDatabase())) { + throw new IllegalArgumentException( + "Please configure `database`, not allow null database in config."); + } + if (StringUtils.isBlank(dorisTableConfig.getTable())) { + throw new IllegalArgumentException( + "Please configure `table`, not allow null table in config."); + } + if (dorisTableConfig.getBatchSize() <= 0) { + dorisTableConfig.setBatchSize(DORIS_BATCH_SIZE.defaultValue()); + } + if (dorisTableConfig.getExecMemLimit() <= 0) { + dorisTableConfig.setExecMemLimit(DORIS_EXEC_MEM_LIMIT.defaultValue()); + } + if (dorisTableConfig.getTabletSize() <= 0) { + dorisTableConfig.setTabletSize(DORIS_TABLET_SIZE.defaultValue()); + } + } + return tableList; + } + + public String getTableIdentifier() { + return String.format("%s.%s", database, table); + } +} diff --git a/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/datatype/AbstractDorisTypeConverter.java b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/datatype/AbstractDorisTypeConverter.java index e6b9b95361d..67266b453f5 100644 --- a/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/datatype/AbstractDorisTypeConverter.java +++ b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/datatype/AbstractDorisTypeConverter.java @@ -26,12 +26,13 @@ import org.apache.seatunnel.api.table.type.DecimalType; import org.apache.seatunnel.api.table.type.SeaTunnelDataType; import org.apache.seatunnel.common.exception.CommonError; -import org.apache.seatunnel.connectors.doris.config.DorisConfig; import lombok.extern.slf4j.Slf4j; import java.util.Locale; +import static org.apache.seatunnel.connectors.doris.config.DorisOptions.IDENTIFIER; + @Slf4j public abstract class AbstractDorisTypeConverter implements TypeConverter { public static final String DORIS_NULL = "NULL"; @@ -186,7 +187,7 @@ public void sampleTypeConverter( break; default: throw CommonError.convertToSeaTunnelTypeError( - DorisConfig.IDENTIFIER, dorisColumnType, typeDefine.getName()); + IDENTIFIER, dorisColumnType, typeDefine.getName()); } } @@ -234,7 +235,7 @@ protected void sampleReconvertString( } throw CommonError.convertToConnectorTypeError( - DorisConfig.IDENTIFIER, column.getDataType().getSqlType().name(), column.getName()); + IDENTIFIER, column.getDataType().getSqlType().name(), column.getName()); } protected BasicTypeDefine sampleReconvert( @@ -366,9 +367,7 @@ protected BasicTypeDefine sampleReconvert( break; default: throw CommonError.convertToConnectorTypeError( - DorisConfig.IDENTIFIER, - column.getDataType().getSqlType().name(), - column.getName()); + IDENTIFIER, column.getDataType().getSqlType().name(), column.getName()); } return builder.build(); } @@ -430,7 +429,7 @@ private void reconvertBuildArrayInternal( break; default: throw CommonError.convertToConnectorTypeError( - DorisConfig.IDENTIFIER, elementType.getSqlType().name(), columnName); + IDENTIFIER, elementType.getSqlType().name(), columnName); } } diff --git a/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/datatype/DorisTypeConverterV1.java b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/datatype/DorisTypeConverterV1.java index fb129249702..9b7e98368fb 100644 --- a/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/datatype/DorisTypeConverterV1.java +++ b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/datatype/DorisTypeConverterV1.java @@ -23,11 +23,12 @@ import org.apache.seatunnel.api.table.converter.TypeConverter; import org.apache.seatunnel.api.table.type.DecimalType; import org.apache.seatunnel.api.table.type.LocalTimeType; -import org.apache.seatunnel.connectors.doris.config.DorisConfig; import com.google.auto.service.AutoService; import lombok.extern.slf4j.Slf4j; +import static org.apache.seatunnel.connectors.doris.config.DorisOptions.IDENTIFIER; + /** Doris type converter for version 1.2.x */ @Slf4j @AutoService(TypeConverter.class) @@ -42,7 +43,7 @@ public class DorisTypeConverterV1 extends AbstractDorisTypeConverter { @Override public String identifier() { - return DorisConfig.IDENTIFIER; + return IDENTIFIER; } @Override diff --git a/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/datatype/DorisTypeConverterV2.java b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/datatype/DorisTypeConverterV2.java index 3b5ebde0f47..46ae79251e0 100644 --- a/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/datatype/DorisTypeConverterV2.java +++ b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/datatype/DorisTypeConverterV2.java @@ -28,7 +28,6 @@ import org.apache.seatunnel.api.table.type.MapType; import org.apache.seatunnel.api.table.type.SeaTunnelDataType; import org.apache.seatunnel.common.exception.CommonError; -import org.apache.seatunnel.connectors.doris.config.DorisConfig; import com.google.auto.service.AutoService; import lombok.extern.slf4j.Slf4j; @@ -37,6 +36,8 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; +import static org.apache.seatunnel.connectors.doris.config.DorisOptions.IDENTIFIER; + /** Doris type converter for version 2.x */ @Slf4j @AutoService(TypeConverter.class) @@ -62,7 +63,7 @@ public class DorisTypeConverterV2 extends AbstractDorisTypeConverter { @Override public String identifier() { - return DorisConfig.IDENTIFIER; + return IDENTIFIER; } @Override @@ -166,7 +167,7 @@ private void convertArray( DecimalArrayType decimalArray = new DecimalArrayType(new DecimalType(20, 0)); builder.dataType(decimalArray); } else { - throw CommonError.convertToSeaTunnelTypeError(DorisConfig.IDENTIFIER, columnType, name); + throw CommonError.convertToSeaTunnelTypeError(IDENTIFIER, columnType, name); } } diff --git a/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/rest/RestService.java b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/rest/RestService.java index b516157443a..97fd3ca78e9 100644 --- a/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/rest/RestService.java +++ b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/rest/RestService.java @@ -18,12 +18,13 @@ package org.apache.seatunnel.connectors.doris.rest; import org.apache.seatunnel.api.table.type.SeaTunnelRowType; -import org.apache.seatunnel.connectors.doris.config.DorisConfig; -import org.apache.seatunnel.connectors.doris.config.DorisOptions; +import org.apache.seatunnel.connectors.doris.config.DorisSourceConfig; +import org.apache.seatunnel.connectors.doris.config.DorisSourceOptions; import org.apache.seatunnel.connectors.doris.exception.DorisConnectorErrorCode; import org.apache.seatunnel.connectors.doris.exception.DorisConnectorException; import org.apache.seatunnel.connectors.doris.rest.models.QueryPlan; import org.apache.seatunnel.connectors.doris.rest.models.Tablet; +import org.apache.seatunnel.connectors.doris.source.DorisSourceTable; import org.apache.seatunnel.connectors.doris.util.ErrorMessages; import org.apache.commons.io.IOUtils; @@ -69,11 +70,12 @@ public class RestService implements Serializable { private static final String QUERY_PLAN = "_query_plan"; private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); - private static String send(DorisConfig dorisConfig, HttpRequestBase request, Logger logger) + private static String send( + DorisSourceConfig dorisSourceConfig, HttpRequestBase request, Logger logger) throws DorisConnectorException { - int connectTimeout = dorisConfig.getRequestConnectTimeoutMs(); - int socketTimeout = dorisConfig.getRequestReadTimeoutMs(); - int retries = dorisConfig.getRequestRetries(); + int connectTimeout = dorisSourceConfig.getRequestConnectTimeoutMs(); + int socketTimeout = dorisSourceConfig.getRequestReadTimeoutMs(); + int retries = dorisSourceConfig.getRequestRetries(); logger.trace( "connect timeout set to '{}'. socket timeout set to '{}'. retries set to '{}'.", connectTimeout, @@ -90,7 +92,7 @@ private static String send(DorisConfig dorisConfig, HttpRequestBase request, Log logger.info( "Send request to Doris FE '{}' with user '{}'.", request.getURI(), - dorisConfig.getUsername()); + dorisSourceConfig.getUsername()); IOException ex = null; int statusCode = -1; @@ -102,15 +104,15 @@ private static String send(DorisConfig dorisConfig, HttpRequestBase request, Log response = getConnectionGet( request.getURI().toString(), - dorisConfig.getUsername(), - dorisConfig.getPassword(), + dorisSourceConfig.getUsername(), + dorisSourceConfig.getPassword(), logger); } else { response = getConnectionPost( request, - dorisConfig.getUsername(), - dorisConfig.getPassword(), + dorisSourceConfig.getUsername(), + dorisSourceConfig.getPassword(), logger); } if (StringUtils.isEmpty(response)) { @@ -251,11 +253,16 @@ public static String randomEndpoint(String feNodes, Logger logger) } @VisibleForTesting - static String getUriStr(DorisConfig dorisConfig, Logger logger) throws DorisConnectorException { - String tableIdentifier = dorisConfig.getDatabase() + "." + dorisConfig.getTable(); + static String getUriStr( + DorisSourceConfig dorisSourceConfig, DorisSourceTable dorisSourceTable, Logger logger) + throws DorisConnectorException { + String tableIdentifier = + dorisSourceTable.getTablePath().getDatabaseName() + + "." + + dorisSourceTable.getTablePath().getTableName(); String[] identifier = parseIdentifier(tableIdentifier, logger); return "http://" - + randomEndpoint(dorisConfig.getFrontends(), logger) + + randomEndpoint(dorisSourceConfig.getFrontends(), logger) + API_PREFIX + "/" + identifier[0] @@ -265,9 +272,13 @@ static String getUriStr(DorisConfig dorisConfig, Logger logger) throws DorisConn } public static List findPartitions( - SeaTunnelRowType rowType, DorisConfig dorisConfig, Logger logger) + DorisSourceConfig dorisSourceConfig, DorisSourceTable dorisSourceTable, Logger logger) throws DorisConnectorException { - String tableIdentifier = dorisConfig.getDatabase() + "." + dorisConfig.getTable(); + String tableIdentifier = + dorisSourceTable.getTablePath().getDatabaseName() + + "." + + dorisSourceTable.getTablePath().getTableName(); + SeaTunnelRowType rowType = dorisSourceTable.getCatalogTable().getSeaTunnelRowType(); String[] tableIdentifiers = parseIdentifier(tableIdentifier, logger); String readFields = "*"; if (rowType.getFieldNames().length != 0) { @@ -281,12 +292,13 @@ public static List findPartitions( + "`.`" + tableIdentifiers[1] + "`"; - if (!StringUtils.isEmpty(dorisConfig.getFilterQuery())) { - sql += " where " + dorisConfig.getFilterQuery(); + if (!StringUtils.isEmpty(dorisSourceTable.getFilterQuery())) { + sql += " where " + dorisSourceTable.getFilterQuery(); } logger.debug("Query SQL Sending to Doris FE is: '{}'.", sql); - HttpPost httpPost = new HttpPost(getUriStr(dorisConfig, logger) + QUERY_PLAN); + HttpPost httpPost = + new HttpPost(getUriStr(dorisSourceConfig, dorisSourceTable, logger) + QUERY_PLAN); String entity = "{\"sql\": \"" + sql + "\"}"; logger.debug("Post body Sending to Doris FE is: '{}'.", entity); StringEntity stringEntity = new StringEntity(entity, StandardCharsets.UTF_8); @@ -294,12 +306,12 @@ public static List findPartitions( stringEntity.setContentType("application/json"); httpPost.setEntity(stringEntity); - String resStr = send(dorisConfig, httpPost, logger); + String resStr = send(dorisSourceConfig, httpPost, logger); logger.debug("Find partition response is '{}'.", resStr); QueryPlan queryPlan = getQueryPlan(resStr, logger); Map> be2Tablets = selectBeForTablet(queryPlan, logger); return tabletsMapToPartition( - dorisConfig, + dorisSourceTable, be2Tablets, queryPlan.getOpaqued_query_plan(), tableIdentifiers[0], @@ -397,18 +409,18 @@ static Map> selectBeForTablet(QueryPlan queryPlan, Logger log } @VisibleForTesting - static int tabletCountLimitForOnePartition(DorisConfig dorisConfig, Logger logger) { - int tabletsSize = DorisOptions.DORIS_TABLET_SIZE_DEFAULT; - if (dorisConfig.getTabletSize() != null) { - tabletsSize = dorisConfig.getTabletSize(); + static int tabletCountLimitForOnePartition(DorisSourceTable dorisSourceTable, Logger logger) { + int tabletsSize = DorisSourceOptions.DORIS_TABLET_SIZE_DEFAULT; + if (dorisSourceTable.getTabletSize() != null) { + tabletsSize = dorisSourceTable.getTabletSize(); } - if (tabletsSize < DorisOptions.DORIS_TABLET_SIZE_MIN) { + if (tabletsSize < DorisSourceOptions.DORIS_TABLET_SIZE_MIN) { logger.warn( "{} is less than {}, set to default value {}.", - DorisOptions.DORIS_TABLET_SIZE, - DorisOptions.DORIS_TABLET_SIZE_MIN, - DorisOptions.DORIS_TABLET_SIZE_MIN); - tabletsSize = DorisOptions.DORIS_TABLET_SIZE_MIN; + DorisSourceOptions.DORIS_TABLET_SIZE, + DorisSourceOptions.DORIS_TABLET_SIZE_MIN, + DorisSourceOptions.DORIS_TABLET_SIZE_MIN); + tabletsSize = DorisSourceOptions.DORIS_TABLET_SIZE_MIN; } logger.debug("Tablet size is set to {}.", tabletsSize); return tabletsSize; @@ -416,14 +428,14 @@ static int tabletCountLimitForOnePartition(DorisConfig dorisConfig, Logger logge @VisibleForTesting static List tabletsMapToPartition( - DorisConfig dorisConfig, + DorisSourceTable dorisSourceTable, Map> be2Tablets, String opaquedQueryPlan, String database, String table, Logger logger) throws DorisConnectorException { - int tabletsSize = tabletCountLimitForOnePartition(dorisConfig, logger); + int tabletsSize = tabletCountLimitForOnePartition(dorisSourceTable, logger); List partitions = new ArrayList<>(); for (Map.Entry> beInfo : be2Tablets.entrySet()) { logger.debug("Generate partition with beInfo: '{}'.", beInfo); diff --git a/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/sink/DorisSink.java b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/sink/DorisSink.java index c0a9a2a5a17..deb88a51b11 100644 --- a/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/sink/DorisSink.java +++ b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/sink/DorisSink.java @@ -33,8 +33,8 @@ import org.apache.seatunnel.api.table.factory.CatalogFactory; import org.apache.seatunnel.api.table.type.SeaTunnelRow; import org.apache.seatunnel.common.constants.PluginType; -import org.apache.seatunnel.connectors.doris.config.DorisConfig; -import org.apache.seatunnel.connectors.doris.config.DorisOptions; +import org.apache.seatunnel.connectors.doris.config.DorisSinkConfig; +import org.apache.seatunnel.connectors.doris.config.DorisSinkOptions; import org.apache.seatunnel.connectors.doris.exception.DorisConnectorException; import org.apache.seatunnel.connectors.doris.sink.committer.DorisCommitInfo; import org.apache.seatunnel.connectors.doris.sink.committer.DorisCommitInfoSerializer; @@ -55,7 +55,7 @@ public class DorisSink SupportSaveMode, SupportMultiTableSink { - private final DorisConfig dorisConfig; + private final DorisSinkConfig dorisSinkConfig; private final ReadonlyConfig config; private final CatalogTable catalogTable; private String jobId; @@ -63,7 +63,7 @@ public class DorisSink public DorisSink(ReadonlyConfig config, CatalogTable catalogTable) { this.config = config; this.catalogTable = catalogTable; - this.dorisConfig = DorisConfig.of(config); + this.dorisSinkConfig = DorisSinkConfig.of(config); } @Override @@ -79,13 +79,13 @@ public void setJobContext(JobContext jobContext) { @Override public DorisSinkWriter createWriter(SinkWriter.Context context) throws IOException { return new DorisSinkWriter( - context, Collections.emptyList(), catalogTable, dorisConfig, jobId); + context, Collections.emptyList(), catalogTable, dorisSinkConfig, jobId); } @Override public SinkWriter restoreWriter( SinkWriter.Context context, List states) throws IOException { - return new DorisSinkWriter(context, states, catalogTable, dorisConfig, jobId); + return new DorisSinkWriter(context, states, catalogTable, dorisSinkConfig, jobId); } @Override @@ -95,7 +95,7 @@ public Optional> getWriterStateSerializer() { @Override public Optional> createCommitter() throws IOException { - return Optional.of(new DorisCommitter(dorisConfig)); + return Optional.of(new DorisCommitter(dorisSinkConfig)); } @Override @@ -127,11 +127,11 @@ public Optional getSaveModeHandler() { Catalog catalog = catalogFactory.createCatalog(catalogFactory.factoryIdentifier(), config); return Optional.of( new DefaultSaveModeHandler( - config.get(DorisOptions.SCHEMA_SAVE_MODE), - config.get(DorisOptions.DATA_SAVE_MODE), + config.get(DorisSinkOptions.SCHEMA_SAVE_MODE), + config.get(DorisSinkOptions.DATA_SAVE_MODE), catalog, catalogTable, - config.get(DorisOptions.CUSTOM_SQL))); + config.get(DorisSinkOptions.CUSTOM_SQL))); } @Override diff --git a/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/sink/DorisSinkFactory.java b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/sink/DorisSinkFactory.java index e1849c39341..9a2ce67be27 100644 --- a/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/sink/DorisSinkFactory.java +++ b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/sink/DorisSinkFactory.java @@ -26,7 +26,7 @@ import org.apache.seatunnel.api.table.factory.TableSinkFactory; import org.apache.seatunnel.api.table.factory.TableSinkFactoryContext; import org.apache.seatunnel.api.table.type.SeaTunnelRow; -import org.apache.seatunnel.connectors.doris.config.DorisOptions; +import org.apache.seatunnel.connectors.doris.config.DorisSinkOptions; import org.apache.seatunnel.connectors.doris.sink.committer.DorisCommitInfo; import org.apache.seatunnel.connectors.doris.sink.writer.DorisSinkState; import org.apache.seatunnel.connectors.doris.util.UnsupportedTypeConverterUtils; @@ -39,15 +39,14 @@ import java.util.List; import static org.apache.seatunnel.connectors.doris.config.DorisOptions.DATABASE; -import static org.apache.seatunnel.connectors.doris.config.DorisOptions.NEEDS_UNSUPPORTED_TYPE_CASTING; +import static org.apache.seatunnel.connectors.doris.config.DorisOptions.IDENTIFIER; import static org.apache.seatunnel.connectors.doris.config.DorisOptions.TABLE; import static org.apache.seatunnel.connectors.doris.config.DorisOptions.TABLE_IDENTIFIER; +import static org.apache.seatunnel.connectors.doris.config.DorisSinkOptions.NEEDS_UNSUPPORTED_TYPE_CASTING; @AutoService(Factory.class) public class DorisSinkFactory implements TableSinkFactory { - public static final String IDENTIFIER = "Doris"; - @Override public String factoryIdentifier() { return IDENTIFIER; @@ -55,12 +54,12 @@ public String factoryIdentifier() { @Override public OptionRule optionRule() { - return DorisOptions.SINK_RULE.build(); + return DorisSinkOptions.SINK_RULE.build(); } @Override public List excludeTablePlaceholderReplaceKeys() { - return Arrays.asList(DorisOptions.SAVE_MODE_CREATE_TEMPLATE.key()); + return Arrays.asList(DorisSinkOptions.SAVE_MODE_CREATE_TEMPLATE.key()); } @Override diff --git a/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/sink/committer/DorisCommitter.java b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/sink/committer/DorisCommitter.java index 5c6e81ba7e2..b92f2869bc9 100644 --- a/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/sink/committer/DorisCommitter.java +++ b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/sink/committer/DorisCommitter.java @@ -18,7 +18,7 @@ package org.apache.seatunnel.connectors.doris.sink.committer; import org.apache.seatunnel.api.sink.SinkCommitter; -import org.apache.seatunnel.connectors.doris.config.DorisConfig; +import org.apache.seatunnel.connectors.doris.config.DorisSinkConfig; import org.apache.seatunnel.connectors.doris.exception.DorisConnectorErrorCode; import org.apache.seatunnel.connectors.doris.exception.DorisConnectorException; import org.apache.seatunnel.connectors.doris.sink.HttpPutBuilder; @@ -46,15 +46,15 @@ public class DorisCommitter implements SinkCommitter { private static final String COMMIT_PATTERN = "http://%s/api/%s/_stream_load_2pc"; private static final int HTTP_TEMPORARY_REDIRECT = 200; private final CloseableHttpClient httpClient; - private final DorisConfig dorisConfig; + private final DorisSinkConfig dorisSinkConfig; int maxRetry; - public DorisCommitter(DorisConfig dorisConfig) { - this(dorisConfig, new HttpUtil().getHttpClient()); + public DorisCommitter(DorisSinkConfig dorisSinkConfig) { + this(dorisSinkConfig, new HttpUtil().getHttpClient()); } - public DorisCommitter(DorisConfig dorisConfig, CloseableHttpClient client) { - this.dorisConfig = dorisConfig; + public DorisCommitter(DorisSinkConfig dorisSinkConfig, CloseableHttpClient client) { + this.dorisSinkConfig = dorisSinkConfig; this.httpClient = client; } @@ -80,11 +80,11 @@ private void commitTransaction(DorisCommitInfo committable) int retry = 0; String hostPort = committable.getHostPort(); CloseableHttpResponse response = null; - while (retry++ <= dorisConfig.getMaxRetries()) { + while (retry++ <= dorisSinkConfig.getMaxRetries()) { HttpPutBuilder putBuilder = new HttpPutBuilder(); putBuilder .setUrl(String.format(COMMIT_PATTERN, hostPort, committable.getDb())) - .baseAuth(dorisConfig.getUsername(), dorisConfig.getPassword()) + .baseAuth(dorisSinkConfig.getUsername(), dorisSinkConfig.getPassword()) .addCommonHeader() .addTxnId(committable.getTxbID()) .setEmptyEntity() @@ -93,14 +93,14 @@ private void commitTransaction(DorisCommitInfo committable) response = httpClient.execute(putBuilder.build()); } catch (IOException e) { log.error("commit transaction failed: ", e); - hostPort = dorisConfig.getFrontends(); + hostPort = dorisSinkConfig.getFrontends(); continue; } statusCode = response.getStatusLine().getStatusCode(); reasonPhrase = response.getStatusLine().getReasonPhrase(); if (statusCode != HTTP_TEMPORARY_REDIRECT) { log.warn("commit failed with {}, reason {}", hostPort, reasonPhrase); - hostPort = dorisConfig.getFrontends(); + hostPort = dorisSinkConfig.getFrontends(); } else { break; } @@ -139,7 +139,7 @@ private void abortTransaction(DorisCommitInfo committable) while (retry++ <= maxRetry) { HttpPutBuilder builder = new HttpPutBuilder(); builder.setUrl(String.format(COMMIT_PATTERN, hostPort, committable.getDb())) - .baseAuth(dorisConfig.getUsername(), dorisConfig.getPassword()) + .baseAuth(dorisSinkConfig.getUsername(), dorisSinkConfig.getPassword()) .addCommonHeader() .addTxnId(committable.getTxbID()) .setEmptyEntity() diff --git a/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/sink/writer/DorisSinkWriter.java b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/sink/writer/DorisSinkWriter.java index b5aa5274216..f6dfae55346 100644 --- a/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/sink/writer/DorisSinkWriter.java +++ b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/sink/writer/DorisSinkWriter.java @@ -22,7 +22,7 @@ import org.apache.seatunnel.api.table.catalog.CatalogTable; import org.apache.seatunnel.api.table.type.SeaTunnelRow; import org.apache.seatunnel.api.table.type.SeaTunnelRowType; -import org.apache.seatunnel.connectors.doris.config.DorisConfig; +import org.apache.seatunnel.connectors.doris.config.DorisSinkConfig; import org.apache.seatunnel.connectors.doris.exception.DorisConnectorErrorCode; import org.apache.seatunnel.connectors.doris.exception.DorisConnectorException; import org.apache.seatunnel.connectors.doris.rest.RestService; @@ -59,7 +59,7 @@ public class DorisSinkWriter new ArrayList<>(Arrays.asList(LoadStatus.SUCCESS, LoadStatus.PUBLISH_TIMEOUT)); private long lastCheckpointId; private DorisStreamLoad dorisStreamLoad; - private final DorisConfig dorisConfig; + private final DorisSinkConfig dorisSinkConfig; private final String labelPrefix; private final LabelGenerator labelGenerator; private final int intervalTime; @@ -72,41 +72,41 @@ public DorisSinkWriter( SinkWriter.Context context, List state, CatalogTable catalogTable, - DorisConfig dorisConfig, + DorisSinkConfig dorisSinkConfig, String jobId) { - this.dorisConfig = dorisConfig; + this.dorisSinkConfig = dorisSinkConfig; this.catalogTable = catalogTable; this.lastCheckpointId = !state.isEmpty() ? state.get(0).getCheckpointId() : 0; log.info("restore checkpointId {}", lastCheckpointId); - log.info("labelPrefix " + dorisConfig.getLabelPrefix()); + log.info("labelPrefix " + dorisSinkConfig.getLabelPrefix()); this.labelPrefix = - dorisConfig.getLabelPrefix() + dorisSinkConfig.getLabelPrefix() + "_" + catalogTable.getTablePath().getFullName().replaceAll("\\.", "_") + "_" + jobId + "_" + context.getIndexOfSubtask(); - this.labelGenerator = new LabelGenerator(labelPrefix, dorisConfig.getEnable2PC()); + this.labelGenerator = new LabelGenerator(labelPrefix, dorisSinkConfig.getEnable2PC()); this.scheduledExecutorService = new ScheduledThreadPoolExecutor( 1, new ThreadFactoryBuilder().setNameFormat("stream-load-check").build()); - this.serializer = createSerializer(dorisConfig, catalogTable.getSeaTunnelRowType()); - this.intervalTime = dorisConfig.getCheckInterval(); + this.serializer = createSerializer(dorisSinkConfig, catalogTable.getSeaTunnelRowType()); + this.intervalTime = dorisSinkConfig.getCheckInterval(); this.initializeLoad(); } private void initializeLoad() { - String backend = RestService.randomEndpoint(dorisConfig.getFrontends(), log); + String backend = RestService.randomEndpoint(dorisSinkConfig.getFrontends(), log); try { this.dorisStreamLoad = new DorisStreamLoad( backend, catalogTable.getTablePath(), - dorisConfig, + dorisSinkConfig, labelGenerator, new HttpUtil().getHttpClient()); - if (dorisConfig.getEnable2PC()) { + if (dorisSinkConfig.getEnable2PC()) { dorisStreamLoad.abortPreCommit(labelPrefix, lastCheckpointId + 1); } } catch (Exception e) { @@ -124,15 +124,15 @@ public void write(SeaTunnelRow element) throws IOException { checkLoadException(); byte[] serialize = serializer.serialize( - dorisConfig.isNeedsUnsupportedTypeCasting() + dorisSinkConfig.isNeedsUnsupportedTypeCasting() ? UnsupportedTypeConverterUtils.convertRow(element) : element); if (Objects.isNull(serialize)) { return; } dorisStreamLoad.writeRecord(serialize); - if (!dorisConfig.getEnable2PC() - && dorisStreamLoad.getRecordCount() >= dorisConfig.getBatchSize()) { + if (!dorisSinkConfig.getEnable2PC() + && dorisStreamLoad.getRecordCount() >= dorisSinkConfig.getBatchSize()) { flush(); startLoad(labelGenerator.generateLabel(lastCheckpointId)); } @@ -141,7 +141,7 @@ public void write(SeaTunnelRow element) throws IOException { @Override public Optional prepareCommit() throws IOException { RespContent respContent = flush(); - if (!dorisConfig.getEnable2PC() || respContent == null) { + if (!dorisSinkConfig.getEnable2PC() || respContent == null) { return Optional.empty(); } long txnId = respContent.getTxnId(); @@ -178,7 +178,7 @@ private void startLoad(String label) { @Override public void abortPrepare() { - if (dorisConfig.getEnable2PC()) { + if (dorisSinkConfig.getEnable2PC()) { try { dorisStreamLoad.abortPreCommit(labelPrefix, lastCheckpointId + 1); } catch (Exception e) { @@ -208,7 +208,7 @@ private void checkLoadException() { @Override public void close() throws IOException { - if (!dorisConfig.getEnable2PC()) { + if (!dorisSinkConfig.getEnable2PC()) { flush(); } if (scheduledExecutorService != null) { @@ -220,14 +220,14 @@ public void close() throws IOException { } private DorisSerializer createSerializer( - DorisConfig dorisConfig, SeaTunnelRowType seaTunnelRowType) { + DorisSinkConfig dorisSinkConfig, SeaTunnelRowType seaTunnelRowType) { return new SeaTunnelRowSerializer( - dorisConfig + dorisSinkConfig .getStreamLoadProps() .getProperty(LoadConstants.FORMAT_KEY) .toLowerCase(), seaTunnelRowType, - dorisConfig.getStreamLoadProps().getProperty(LoadConstants.FIELD_DELIMITER_KEY), - dorisConfig.getEnableDelete()); + dorisSinkConfig.getStreamLoadProps().getProperty(LoadConstants.FIELD_DELIMITER_KEY), + dorisSinkConfig.getEnableDelete()); } } diff --git a/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/sink/writer/DorisStreamLoad.java b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/sink/writer/DorisStreamLoad.java index 8ec59e81ece..1e0ee7b9c2e 100644 --- a/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/sink/writer/DorisStreamLoad.java +++ b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/sink/writer/DorisStreamLoad.java @@ -22,7 +22,7 @@ import org.apache.seatunnel.api.table.catalog.TablePath; import org.apache.seatunnel.common.utils.ExceptionUtils; import org.apache.seatunnel.common.utils.JsonUtils; -import org.apache.seatunnel.connectors.doris.config.DorisConfig; +import org.apache.seatunnel.connectors.doris.config.DorisSinkConfig; import org.apache.seatunnel.connectors.doris.exception.DorisConnectorErrorCode; import org.apache.seatunnel.connectors.doris.exception.DorisConnectorException; import org.apache.seatunnel.connectors.doris.rest.models.RespContent; @@ -89,20 +89,20 @@ public class DorisStreamLoad implements Serializable { public DorisStreamLoad( String hostPort, TablePath tablePath, - DorisConfig dorisConfig, + DorisSinkConfig dorisSinkConfig, LabelGenerator labelGenerator, CloseableHttpClient httpClient) { this.hostPort = hostPort; this.db = tablePath.getDatabaseName(); this.table = tablePath.getTableName(); - this.user = dorisConfig.getUsername(); - this.passwd = dorisConfig.getPassword(); + this.user = dorisSinkConfig.getUsername(); + this.passwd = dorisSinkConfig.getPassword(); this.labelGenerator = labelGenerator; this.loadUrlStr = String.format(LOAD_URL_PATTERN, hostPort, db, table); this.abortUrlStr = String.format(ABORT_URL_PATTERN, hostPort, db); - this.enable2PC = dorisConfig.getEnable2PC(); - this.streamLoadProp = dorisConfig.getStreamLoadProps(); - this.enableDelete = dorisConfig.getEnableDelete(); + this.enable2PC = dorisSinkConfig.getEnable2PC(); + this.streamLoadProp = dorisSinkConfig.getStreamLoadProps(); + this.enableDelete = dorisSinkConfig.getEnableDelete(); this.httpClient = httpClient; this.executorService = new ThreadPoolExecutor( @@ -113,7 +113,7 @@ public DorisStreamLoad( new LinkedBlockingQueue<>(), new ThreadFactoryBuilder().setNameFormat("stream-load-upload").build()); this.recordStream = - new RecordStream(dorisConfig.getBufferSize(), dorisConfig.getBufferCount()); + new RecordStream(dorisSinkConfig.getBufferSize(), dorisSinkConfig.getBufferCount()); lineDelimiter = streamLoadProp.getProperty(LINE_DELIMITER_KEY, LINE_DELIMITER_DEFAULT).getBytes(); loadBatchFirstRecord = true; diff --git a/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/source/DorisSource.java b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/source/DorisSource.java index c04f074021a..8b5f168a2d5 100644 --- a/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/source/DorisSource.java +++ b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/source/DorisSource.java @@ -17,34 +17,36 @@ package org.apache.seatunnel.connectors.doris.source; -import org.apache.seatunnel.api.configuration.ReadonlyConfig; import org.apache.seatunnel.api.source.Boundedness; import org.apache.seatunnel.api.source.SeaTunnelSource; import org.apache.seatunnel.api.source.SourceReader; import org.apache.seatunnel.api.source.SourceSplitEnumerator; import org.apache.seatunnel.api.table.catalog.CatalogTable; +import org.apache.seatunnel.api.table.catalog.TablePath; import org.apache.seatunnel.api.table.type.SeaTunnelRow; -import org.apache.seatunnel.connectors.doris.config.DorisConfig; +import org.apache.seatunnel.connectors.doris.config.DorisSourceConfig; import org.apache.seatunnel.connectors.doris.source.reader.DorisSourceReader; import org.apache.seatunnel.connectors.doris.source.split.DorisSourceSplit; import org.apache.seatunnel.connectors.doris.source.split.DorisSourceSplitEnumerator; import lombok.extern.slf4j.Slf4j; -import java.util.Collections; import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; @Slf4j public class DorisSource implements SeaTunnelSource { private static final long serialVersionUID = 6139826339248788618L; - private final DorisConfig config; - private final CatalogTable catalogTable; + private final DorisSourceConfig config; + private final Map dorisSourceTables; - public DorisSource(ReadonlyConfig config, CatalogTable catalogTable) { - this.config = DorisConfig.of(config); - this.catalogTable = catalogTable; + public DorisSource( + DorisSourceConfig config, Map dorisSourceTables) { + this.config = config; + this.dorisSourceTables = dorisSourceTables; } @Override @@ -59,20 +61,21 @@ public Boundedness getBoundedness() { @Override public List getProducedCatalogTables() { - return Collections.singletonList(catalogTable); + return dorisSourceTables.values().stream() + .map(DorisSourceTable::getCatalogTable) + .collect(Collectors.toList()); } @Override public SourceReader createReader( SourceReader.Context readerContext) { - return new DorisSourceReader(readerContext, config, catalogTable.getSeaTunnelRowType()); + return new DorisSourceReader(readerContext, config, dorisSourceTables); } @Override public SourceSplitEnumerator createEnumerator( SourceSplitEnumerator.Context enumeratorContext) { - return new DorisSourceSplitEnumerator( - enumeratorContext, config, catalogTable.getSeaTunnelRowType()); + return new DorisSourceSplitEnumerator(enumeratorContext, config, dorisSourceTables); } @Override @@ -80,6 +83,6 @@ public SourceSplitEnumerator restoreEnumerat SourceSplitEnumerator.Context enumeratorContext, DorisSourceState checkpointState) { return new DorisSourceSplitEnumerator( - enumeratorContext, config, catalogTable.getSeaTunnelRowType(), checkpointState); + enumeratorContext, config, dorisSourceTables, checkpointState); } } diff --git a/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/source/DorisSourceFactory.java b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/source/DorisSourceFactory.java index 75cc266edad..506a7c97dc8 100644 --- a/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/source/DorisSourceFactory.java +++ b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/source/DorisSourceFactory.java @@ -17,7 +17,6 @@ package org.apache.seatunnel.connectors.doris.source; -import org.apache.seatunnel.api.configuration.ReadonlyConfig; import org.apache.seatunnel.api.configuration.util.OptionRule; import org.apache.seatunnel.api.source.SeaTunnelSource; import org.apache.seatunnel.api.source.SourceSplit; @@ -29,8 +28,8 @@ import org.apache.seatunnel.api.table.factory.TableSourceFactoryContext; import org.apache.seatunnel.connectors.doris.catalog.DorisCatalog; import org.apache.seatunnel.connectors.doris.catalog.DorisCatalogFactory; -import org.apache.seatunnel.connectors.doris.config.DorisConfig; -import org.apache.seatunnel.connectors.doris.config.DorisOptions; +import org.apache.seatunnel.connectors.doris.config.DorisSourceConfig; +import org.apache.seatunnel.connectors.doris.config.DorisTableConfig; import org.apache.commons.lang3.StringUtils; @@ -39,62 +38,88 @@ import java.io.Serializable; import java.util.Arrays; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.stream.Collectors; +import static org.apache.seatunnel.connectors.doris.config.DorisOptions.DATABASE; +import static org.apache.seatunnel.connectors.doris.config.DorisOptions.DORIS_BATCH_SIZE; +import static org.apache.seatunnel.connectors.doris.config.DorisOptions.FENODES; +import static org.apache.seatunnel.connectors.doris.config.DorisOptions.IDENTIFIER; +import static org.apache.seatunnel.connectors.doris.config.DorisOptions.PASSWORD; +import static org.apache.seatunnel.connectors.doris.config.DorisOptions.QUERY_PORT; +import static org.apache.seatunnel.connectors.doris.config.DorisOptions.TABLE; +import static org.apache.seatunnel.connectors.doris.config.DorisOptions.USERNAME; +import static org.apache.seatunnel.connectors.doris.config.DorisSourceOptions.DORIS_FILTER_QUERY; +import static org.apache.seatunnel.connectors.doris.config.DorisSourceOptions.DORIS_READ_FIELD; +import static org.apache.seatunnel.connectors.doris.config.DorisSourceOptions.TABLE_LIST; + @Slf4j @AutoService(Factory.class) public class DorisSourceFactory implements TableSourceFactory { @Override public String factoryIdentifier() { - return DorisConfig.IDENTIFIER; + return IDENTIFIER; } @Override public OptionRule optionRule() { return OptionRule.builder() - .required( - DorisOptions.FENODES, - DorisOptions.USERNAME, - DorisOptions.PASSWORD, - DorisOptions.DATABASE, - DorisOptions.TABLE) - .optional(DorisOptions.DORIS_FILTER_QUERY) - .optional(DorisOptions.DORIS_READ_FIELD) - .optional(DorisOptions.QUERY_PORT) - .optional(DorisOptions.DORIS_BATCH_SIZE) + .required(FENODES, USERNAME, PASSWORD) + .optional(TABLE_LIST) + .optional(DATABASE) + .optional(TABLE) + .optional(DORIS_FILTER_QUERY) + .optional(DORIS_READ_FIELD) + .optional(QUERY_PORT) + .optional(DORIS_BATCH_SIZE) .build(); } @Override public TableSource createSource(TableSourceFactoryContext context) { - ReadonlyConfig options = context.getOptions(); - CatalogTable table; - DorisCatalogFactory dorisCatalogFactory = new DorisCatalogFactory(); - DorisCatalog catalog = (DorisCatalog) dorisCatalogFactory.createCatalog("doris", options); - catalog.open(); - String tableIdentifier = - options.get(DorisOptions.DATABASE) + "." + options.get(DorisOptions.TABLE); - TablePath tablePath = TablePath.of(tableIdentifier); + DorisSourceConfig dorisSourceConfig = DorisSourceConfig.of(context.getOptions()); + List dorisTableConfigList = dorisSourceConfig.getTableConfigList(); + Map dorisSourceTables = new HashMap<>(); + for (DorisTableConfig dorisTableConfig : dorisTableConfigList) { + CatalogTable table; + DorisCatalogFactory dorisCatalogFactory = new DorisCatalogFactory(); + DorisCatalog catalog = + (DorisCatalog) dorisCatalogFactory.createCatalog("doris", context.getOptions()); + catalog.open(); + TablePath tablePath = TablePath.of(dorisTableConfig.getTableIdentifier()); + String readFields = dorisTableConfig.getReadField(); + try { + List readFiledList = null; + if (StringUtils.isNotBlank(readFields)) { + readFiledList = + Arrays.stream(readFields.split(",")) + .map(String::trim) + .collect(Collectors.toList()); + } - try { - String read_fields = options.get(DorisOptions.DORIS_READ_FIELD); - List readFiledList = null; - if (StringUtils.isNotBlank(read_fields)) { - readFiledList = - Arrays.stream(read_fields.split(",")) - .map(String::trim) - .collect(Collectors.toList()); + table = catalog.getTable(tablePath, readFiledList); + } catch (Exception e) { + log.error("create source error"); + throw e; } - - table = catalog.getTable(tablePath, readFiledList); - } catch (Exception e) { - log.error("create source error"); - throw e; + dorisSourceTables.put( + tablePath, + DorisSourceTable.builder() + .catalogTable(table) + .tablePath(tablePath) + .readField(readFields) + .filterQuery(dorisTableConfig.getFilterQuery()) + .batchSize(dorisTableConfig.getBatchSize()) + .tabletSize(dorisTableConfig.getTabletSize()) + .execMemLimit(dorisTableConfig.getExecMemLimit()) + .build()); } - CatalogTable finalTable = table; - return () -> (SeaTunnelSource) new DorisSource(options, finalTable); + return () -> + (SeaTunnelSource) + new DorisSource(dorisSourceConfig, dorisSourceTables); } @Override diff --git a/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/source/DorisSourceTable.java b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/source/DorisSourceTable.java new file mode 100644 index 00000000000..b09568db9ed --- /dev/null +++ b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/source/DorisSourceTable.java @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.doris.source; + +import org.apache.seatunnel.api.table.catalog.CatalogTable; +import org.apache.seatunnel.api.table.catalog.TablePath; + +import lombok.Builder; +import lombok.Data; + +import java.io.Serializable; + +@Data +@Builder +public class DorisSourceTable implements Serializable { + private static final long serialVersionUID = 1L; + + private final TablePath tablePath; + private String readField; + private String filterQuery; + private int batchSize; + private Integer tabletSize; + private Long execMemLimit; + private final CatalogTable catalogTable; +} diff --git a/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/source/reader/DorisSourceReader.java b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/source/reader/DorisSourceReader.java index 66c4e1f269f..ffe1d0e54a0 100644 --- a/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/source/reader/DorisSourceReader.java +++ b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/source/reader/DorisSourceReader.java @@ -20,10 +20,13 @@ import org.apache.seatunnel.api.source.Boundedness; import org.apache.seatunnel.api.source.Collector; import org.apache.seatunnel.api.source.SourceReader; +import org.apache.seatunnel.api.table.catalog.TablePath; import org.apache.seatunnel.api.table.type.SeaTunnelRow; -import org.apache.seatunnel.api.table.type.SeaTunnelRowType; -import org.apache.seatunnel.connectors.doris.config.DorisConfig; +import org.apache.seatunnel.connectors.doris.config.DorisSourceConfig; +import org.apache.seatunnel.connectors.doris.exception.DorisConnectorErrorCode; +import org.apache.seatunnel.connectors.doris.exception.DorisConnectorException; import org.apache.seatunnel.connectors.doris.rest.PartitionDefinition; +import org.apache.seatunnel.connectors.doris.source.DorisSourceTable; import org.apache.seatunnel.connectors.doris.source.split.DorisSourceSplit; import lombok.extern.slf4j.Slf4j; @@ -32,27 +35,30 @@ import java.util.ArrayDeque; import java.util.ArrayList; import java.util.List; +import java.util.Map; import java.util.Queue; @Slf4j public class DorisSourceReader implements SourceReader { private final Context context; - private final DorisConfig dorisConfig; + private final DorisSourceConfig dorisSourceConfig; private final Queue splitsQueue; private volatile boolean noMoreSplits; private DorisValueReader valueReader; - private SeaTunnelRowType seaTunnelRowType; + private final Map tables; public DorisSourceReader( - Context context, DorisConfig dorisConfig, SeaTunnelRowType seaTunnelRowType) { + Context context, + DorisSourceConfig dorisSourceConfig, + Map tables) { this.splitsQueue = new ArrayDeque<>(); this.context = context; - this.dorisConfig = dorisConfig; - this.seaTunnelRowType = seaTunnelRowType; + this.dorisSourceConfig = dorisSourceConfig; + this.tables = tables; } @Override @@ -71,7 +77,16 @@ public void pollNext(Collector output) throws Exception { DorisSourceSplit nextSplit = splitsQueue.poll(); if (nextSplit != null) { PartitionDefinition partition = nextSplit.getPartitionDefinition(); - valueReader = new DorisValueReader(partition, dorisConfig, seaTunnelRowType); + DorisSourceTable dorisSourceTable = + tables.get(TablePath.of(partition.getDatabase(), partition.getTable())); + if (dorisSourceTable == null) { + throw new DorisConnectorException( + DorisConnectorErrorCode.SHOULD_NEVER_HAPPEN, + String.format( + "the table '%s.%s' cannot be found in table_list of job configuration.", + partition.getDatabase(), partition.getTable())); + } + valueReader = new DorisValueReader(partition, dorisSourceConfig, dorisSourceTable); while (valueReader.hasNext()) { SeaTunnelRow record = valueReader.next(); output.collect(record); diff --git a/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/source/reader/DorisValueReader.java b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/source/reader/DorisValueReader.java index 18d3d004d94..68d2eecfb51 100644 --- a/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/source/reader/DorisValueReader.java +++ b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/source/reader/DorisValueReader.java @@ -20,11 +20,12 @@ import org.apache.seatunnel.api.table.type.SeaTunnelRow; import org.apache.seatunnel.api.table.type.SeaTunnelRowType; import org.apache.seatunnel.connectors.doris.backend.BackendClient; -import org.apache.seatunnel.connectors.doris.config.DorisConfig; +import org.apache.seatunnel.connectors.doris.config.DorisSourceConfig; import org.apache.seatunnel.connectors.doris.exception.DorisConnectorErrorCode; import org.apache.seatunnel.connectors.doris.exception.DorisConnectorException; import org.apache.seatunnel.connectors.doris.rest.PartitionDefinition; import org.apache.seatunnel.connectors.doris.rest.models.Schema; +import org.apache.seatunnel.connectors.doris.source.DorisSourceTable; import org.apache.seatunnel.connectors.doris.source.serialization.Routing; import org.apache.seatunnel.connectors.doris.source.serialization.RowBatch; import org.apache.seatunnel.connectors.doris.util.SchemaUtils; @@ -54,7 +55,8 @@ public class DorisValueReader { protected Lock clientLock = new ReentrantLock(); private PartitionDefinition partition; - private DorisConfig config; + private DorisSourceTable dorisSourceTable; + private DorisSourceConfig config; protected int offset = 0; protected AtomicBoolean eos = new AtomicBoolean(false); @@ -72,12 +74,15 @@ public class DorisValueReader { protected boolean asyncThreadStarted; public DorisValueReader( - PartitionDefinition partition, DorisConfig config, SeaTunnelRowType seaTunnelRowType) { + PartitionDefinition partition, + DorisSourceConfig config, + DorisSourceTable dorisSourceTable) { this.partition = partition; this.config = config; + this.dorisSourceTable = dorisSourceTable; this.client = backendClient(); this.deserializeArrowToRowBatchAsync = config.getDeserializeArrowAsync(); - this.seaTunnelRowType = seaTunnelRowType; + this.seaTunnelRowType = dorisSourceTable.getCatalogTable().getSeaTunnelRowType(); int blockingQueueSize = config.getDeserializeQueueSize(); if (this.deserializeArrowToRowBatchAsync) { this.rowBatchBlockingQueue = new ArrayBlockingQueue<>(blockingQueueSize); @@ -117,9 +122,9 @@ private TScanOpenParams openParams() { params.tablet_ids = Arrays.asList(partition.getTabletIds().toArray(new Long[] {})); params.opaqued_query_plan = partition.getQueryPlan(); // max row number of one read batch - Integer batchSize = config.getBatchSize(); + Integer batchSize = dorisSourceTable.getBatchSize(); Integer queryDorisTimeout = config.getRequestQueryTimeoutS(); - Long execMemLimit = config.getExecMemLimit(); + Long execMemLimit = dorisSourceTable.getExecMemLimit(); params.setBatchSize(batchSize); params.setQueryTimeout(queryDorisTimeout); params.setMemLimit(execMemLimit); @@ -250,7 +255,9 @@ public SeaTunnelRow next() { throw new DorisConnectorException( DorisConnectorErrorCode.SHOULD_NEVER_HAPPEN, "never happen error."); } - return rowBatch.next(); + SeaTunnelRow next = rowBatch.next(); + next.setTableId(dorisSourceTable.getTablePath().toString()); + return next; } public void close() { diff --git a/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/source/split/DorisSourceSplitEnumerator.java b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/source/split/DorisSourceSplitEnumerator.java index d2d2e61d7e1..1aa10a88b54 100644 --- a/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/source/split/DorisSourceSplitEnumerator.java +++ b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/source/split/DorisSourceSplitEnumerator.java @@ -18,13 +18,14 @@ package org.apache.seatunnel.connectors.doris.source.split; import org.apache.seatunnel.api.source.SourceSplitEnumerator; -import org.apache.seatunnel.api.table.type.SeaTunnelRowType; +import org.apache.seatunnel.api.table.catalog.TablePath; import org.apache.seatunnel.common.exception.CommonErrorCodeDeprecated; -import org.apache.seatunnel.connectors.doris.config.DorisConfig; +import org.apache.seatunnel.connectors.doris.config.DorisSourceConfig; import org.apache.seatunnel.connectors.doris.exception.DorisConnectorException; import org.apache.seatunnel.connectors.doris.rest.PartitionDefinition; import org.apache.seatunnel.connectors.doris.rest.RestService; import org.apache.seatunnel.connectors.doris.source.DorisSourceState; +import org.apache.seatunnel.connectors.doris.source.DorisSourceTable; import lombok.extern.slf4j.Slf4j; @@ -41,31 +42,31 @@ public class DorisSourceSplitEnumerator implements SourceSplitEnumerator { - private Context context; - private DorisConfig dorisConfig; + private final Context context; + private final DorisSourceConfig dorisSourceConfig; private volatile boolean shouldEnumerate; private final Map> pendingSplit; - private SeaTunnelRowType seaTunnelRowType; + private final Map dorisSourceTables; private final Object stateLock = new Object(); public DorisSourceSplitEnumerator( Context context, - DorisConfig dorisConfig, - SeaTunnelRowType seaTunnelRowType) { - this(context, dorisConfig, seaTunnelRowType, null); + DorisSourceConfig dorisSourceConfig, + Map dorisSourceTables) { + this(context, dorisSourceConfig, dorisSourceTables, null); } public DorisSourceSplitEnumerator( Context context, - DorisConfig dorisConfig, - SeaTunnelRowType rowType, + DorisSourceConfig dorisSourceConfig, + Map dorisSourceTables, DorisSourceState dorisSourceState) { this.context = context; - this.dorisConfig = dorisConfig; - this.seaTunnelRowType = rowType; + this.dorisSourceConfig = dorisSourceConfig; + this.dorisSourceTables = dorisSourceTables; this.pendingSplit = new ConcurrentHashMap<>(); this.shouldEnumerate = (dorisSourceState == null); if (dorisSourceState != null) { @@ -149,10 +150,12 @@ public void notifyCheckpointComplete(long checkpointId) {} private List getDorisSourceSplit() { List splits = new ArrayList<>(); - List partitions = - RestService.findPartitions(seaTunnelRowType, dorisConfig, log); - for (PartitionDefinition partition : partitions) { - splits.add(new DorisSourceSplit(partition, String.valueOf(partition.hashCode()))); + for (DorisSourceTable dorisSourceTable : dorisSourceTables.values()) { + List partitions = + RestService.findPartitions(dorisSourceConfig, dorisSourceTable, log); + for (PartitionDefinition partition : partitions) { + splits.add(new DorisSourceSplit(partition, String.valueOf(partition.hashCode()))); + } } return splits; } diff --git a/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/util/DorisCatalogUtil.java b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/util/DorisCatalogUtil.java index 5025caed21c..53b38049f98 100644 --- a/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/util/DorisCatalogUtil.java +++ b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/util/DorisCatalogUtil.java @@ -24,7 +24,7 @@ import org.apache.seatunnel.api.table.catalog.TableSchema; import org.apache.seatunnel.api.table.converter.BasicTypeDefine; import org.apache.seatunnel.api.table.converter.TypeConverter; -import org.apache.seatunnel.connectors.doris.config.DorisOptions; +import org.apache.seatunnel.connectors.doris.config.DorisSinkOptions; import org.apache.seatunnel.connectors.seatunnel.common.sql.template.SqlTemplate; import org.apache.commons.lang3.StringUtils; @@ -155,7 +155,7 @@ public static String getCreateTableStatement( SaveModePlaceHolder.ROWTYPE_PRIMARY_KEY.getPlaceHolder(), primaryKey, tablePath.getFullName(), - DorisOptions.SAVE_MODE_CREATE_TEMPLATE.key()); + DorisSinkOptions.SAVE_MODE_CREATE_TEMPLATE.key()); template = template.replaceAll( SaveModePlaceHolder.ROWTYPE_PRIMARY_KEY.getReplacePlaceHolder(), @@ -165,7 +165,7 @@ public static String getCreateTableStatement( SaveModePlaceHolder.ROWTYPE_UNIQUE_KEY.getPlaceHolder(), uniqueKey, tablePath.getFullName(), - DorisOptions.SAVE_MODE_CREATE_TEMPLATE.key()); + DorisSinkOptions.SAVE_MODE_CREATE_TEMPLATE.key()); template = template.replaceAll( SaveModePlaceHolder.ROWTYPE_UNIQUE_KEY.getReplacePlaceHolder(), uniqueKey); @@ -174,7 +174,7 @@ public static String getCreateTableStatement( SaveModePlaceHolder.ROWTYPE_DUPLICATE_KEY.getPlaceHolder(), dupKey, tablePath.getFullName(), - DorisOptions.SAVE_MODE_CREATE_TEMPLATE.key()); + DorisSinkOptions.SAVE_MODE_CREATE_TEMPLATE.key()); template = template.replaceAll( SaveModePlaceHolder.ROWTYPE_DUPLICATE_KEY.getReplacePlaceHolder(), dupKey); @@ -252,9 +252,12 @@ private static String columnToDorisType( Column column, TypeConverter typeConverter) { checkNotNull(column, "The column is required."); return String.format( - "`%s` %s %s ", + "`%s` %s %s %s", column.getName(), typeConverter.reconvert(column).getColumnType(), - column.isNullable() ? "NULL" : "NOT NULL"); + column.isNullable() ? "NULL" : "NOT NULL", + StringUtils.isEmpty(column.getComment()) + ? "" + : "COMMENT '" + column.getComment() + "'"); } } diff --git a/seatunnel-connectors-v2/connector-doris/src/test/java/org/apache/seatunnel/connectors/doris/catalog/DorisCreateTableTest.java b/seatunnel-connectors-v2/connector-doris/src/test/java/org/apache/seatunnel/connectors/doris/catalog/DorisCreateTableTest.java index 09a5b6a3293..cdaa55487c6 100644 --- a/seatunnel-connectors-v2/connector-doris/src/test/java/org/apache/seatunnel/connectors/doris/catalog/DorisCreateTableTest.java +++ b/seatunnel-connectors-v2/connector-doris/src/test/java/org/apache/seatunnel/connectors/doris/catalog/DorisCreateTableTest.java @@ -31,7 +31,7 @@ import org.apache.seatunnel.api.table.type.LocalTimeType; import org.apache.seatunnel.common.exception.CommonError; import org.apache.seatunnel.common.exception.SeaTunnelRuntimeException; -import org.apache.seatunnel.connectors.doris.config.DorisOptions; +import org.apache.seatunnel.connectors.doris.config.DorisSinkOptions; import org.apache.seatunnel.connectors.doris.datatype.DorisTypeConverterV1; import org.apache.seatunnel.connectors.doris.util.DorisCatalogUtil; @@ -57,7 +57,9 @@ public void test() { columns.add(PhysicalColumn.of("id", BasicType.LONG_TYPE, (Long) null, true, null, "")); columns.add(PhysicalColumn.of("name", BasicType.STRING_TYPE, (Long) null, true, null, "")); - columns.add(PhysicalColumn.of("age", BasicType.INT_TYPE, (Long) null, true, null, "")); + columns.add( + PhysicalColumn.of( + "age", BasicType.INT_TYPE, (Long) null, true, null, "test comment")); columns.add(PhysicalColumn.of("score", BasicType.INT_TYPE, (Long) null, true, null, "")); columns.add(PhysicalColumn.of("gender", BasicType.BYTE_TYPE, (Long) null, true, null, "")); columns.add( @@ -122,7 +124,7 @@ public void test() { Assertions.assertEquals( result, "CREATE TABLE IF NOT EXISTS `test1`.`test2` ( \n" - + "`id` BIGINT NULL ,`age` INT NULL , \n" + + "`id` BIGINT NULL ,`age` INT NULL COMMENT 'test comment' , \n" + "`name` STRING NULL ,`score` INT NULL , \n" + "`create_time` DATETIME NOT NULL , \n" + "`gender` TINYINT NULL \n" @@ -139,7 +141,7 @@ public void test() { + "\"disable_auto_compaction\" = \"false\"\n" + ")"); - String createTemplate = DorisOptions.SAVE_MODE_CREATE_TEMPLATE.defaultValue(); + String createTemplate = DorisSinkOptions.SAVE_MODE_CREATE_TEMPLATE.defaultValue(); CatalogTable catalogTable = CatalogTable.of( TableIdentifier.of("test", "test1", "test2"), @@ -169,7 +171,7 @@ public void test() { SaveModePlaceHolder.getDisplay(primaryKeyHolder), createTemplate, primaryKeyHolder, - DorisOptions.SAVE_MODE_CREATE_TEMPLATE.key()); + DorisSinkOptions.SAVE_MODE_CREATE_TEMPLATE.key()); Assertions.assertEquals( exceptSeaTunnelRuntimeException.getMessage(), actualSeaTunnelRuntimeException.getMessage()); diff --git a/seatunnel-connectors-v2/connector-file/connector-file-base-hadoop/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/hdfs/source/BaseHdfsFileSource.java b/seatunnel-connectors-v2/connector-file/connector-file-base-hadoop/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/hdfs/source/BaseHdfsFileSource.java index 9af2721e220..9cf7cace0ba 100644 --- a/seatunnel-connectors-v2/connector-file/connector-file-base-hadoop/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/hdfs/source/BaseHdfsFileSource.java +++ b/seatunnel-connectors-v2/connector-file/connector-file-base-hadoop/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/hdfs/source/BaseHdfsFileSource.java @@ -21,9 +21,9 @@ import org.apache.seatunnel.api.common.PrepareFailException; import org.apache.seatunnel.api.common.SeaTunnelAPIErrorCode; +import org.apache.seatunnel.api.table.catalog.CatalogTable; import org.apache.seatunnel.api.table.catalog.CatalogTableUtil; import org.apache.seatunnel.api.table.catalog.schema.TableSchemaOptions; -import org.apache.seatunnel.api.table.type.SeaTunnelRowType; import org.apache.seatunnel.common.config.CheckConfigUtil; import org.apache.seatunnel.common.config.CheckResult; import org.apache.seatunnel.common.constants.PluginType; @@ -74,6 +74,10 @@ public void prepare(Config pluginConfig) throws PrepareFailException { pluginConfig.getString(HdfsSourceConfigOptions.REMOTE_USER.key())); } + if (pluginConfig.hasPath(HdfsSourceConfigOptions.KRB5_PATH.key())) { + hadoopConf.setKrb5Path(pluginConfig.getString(HdfsSourceConfigOptions.KRB5_PATH.key())); + } + if (pluginConfig.hasPath(HdfsSourceConfigOptions.KERBEROS_PRINCIPAL.key())) { hadoopConf.setKerberosPrincipal( pluginConfig.getString(HdfsSourceConfigOptions.KERBEROS_PRINCIPAL.key())); @@ -109,9 +113,9 @@ public void prepare(Config pluginConfig) throws PrepareFailException { case JSON: case EXCEL: case XML: - SeaTunnelRowType userDefinedSchema = - CatalogTableUtil.buildWithConfig(pluginConfig).getSeaTunnelRowType(); - readStrategy.setSeaTunnelRowTypeInfo(userDefinedSchema); + CatalogTable userDefinedCatalogTable = + CatalogTableUtil.buildWithConfig(pluginConfig); + readStrategy.setCatalogTable(userDefinedCatalogTable); rowType = readStrategy.getActualSeaTunnelRowTypeInfo(); break; case ORC: diff --git a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/config/BaseFileSourceConfig.java b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/config/BaseFileSourceConfig.java index 373ada564a8..10b969b0086 100644 --- a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/config/BaseFileSourceConfig.java +++ b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/config/BaseFileSourceConfig.java @@ -95,7 +95,7 @@ private CatalogTable parseCatalogTable(ReadonlyConfig readonlyConfig) { case JSON: case EXCEL: case XML: - readStrategy.setSeaTunnelRowTypeInfo(catalogTable.getSeaTunnelRowType()); + readStrategy.setCatalogTable(catalogTable); return newCatalogTable(catalogTable, readStrategy.getActualSeaTunnelRowTypeInfo()); case ORC: case PARQUET: diff --git a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/hadoop/HadoopFileSystemProxy.java b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/hadoop/HadoopFileSystemProxy.java index d4f1791a4e8..8f8aef05481 100644 --- a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/hadoop/HadoopFileSystemProxy.java +++ b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/hadoop/HadoopFileSystemProxy.java @@ -64,6 +64,10 @@ public boolean fileExist(@NonNull String filePath) throws IOException { return execute(() -> getFileSystem().exists(new Path(filePath))); } + public boolean isFile(@NonNull String filePath) throws IOException { + return execute(() -> getFileSystem().getFileStatus(new Path(filePath)).isFile()); + } + public void createFile(@NonNull String filePath) throws IOException { execute( () -> { diff --git a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/sink/BaseFileSink.java b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/sink/BaseFileSink.java index 6686da98806..af6003c79ce 100644 --- a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/sink/BaseFileSink.java +++ b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/sink/BaseFileSink.java @@ -26,6 +26,8 @@ import org.apache.seatunnel.api.sink.SeaTunnelSink; import org.apache.seatunnel.api.sink.SinkAggregatedCommitter; import org.apache.seatunnel.api.sink.SinkWriter; +import org.apache.seatunnel.api.table.catalog.CatalogTableUtil; +import org.apache.seatunnel.api.table.catalog.TablePath; import org.apache.seatunnel.api.table.type.SeaTunnelRow; import org.apache.seatunnel.api.table.type.SeaTunnelRowType; import org.apache.seatunnel.connectors.seatunnel.file.config.HadoopConf; @@ -110,7 +112,9 @@ public void prepare(Config pluginConfig) throws PrepareFailException { protected WriteStrategy createWriteStrategy() { WriteStrategy writeStrategy = WriteStrategyFactory.of(fileSinkConfig.getFileFormat(), fileSinkConfig); - writeStrategy.setSeaTunnelRowTypeInfo(seaTunnelRowType); + writeStrategy.setCatalogTable( + CatalogTableUtil.getCatalogTable( + "file", null, null, TablePath.DEFAULT.getTableName(), seaTunnelRowType)); return writeStrategy; } } diff --git a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/sink/BaseMultipleTableFileSink.java b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/sink/BaseMultipleTableFileSink.java index a48368be448..b35c113f8da 100644 --- a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/sink/BaseMultipleTableFileSink.java +++ b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/sink/BaseMultipleTableFileSink.java @@ -112,7 +112,7 @@ public Optional> getWriterStateSerializer() { protected WriteStrategy createWriteStrategy() { WriteStrategy writeStrategy = WriteStrategyFactory.of(fileSinkConfig.getFileFormat(), fileSinkConfig); - writeStrategy.setSeaTunnelRowTypeInfo(catalogTable.getSeaTunnelRowType()); + writeStrategy.setCatalogTable(catalogTable); return writeStrategy; } diff --git a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/sink/writer/AbstractWriteStrategy.java b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/sink/writer/AbstractWriteStrategy.java index 68476488a55..dd49c7f2d0c 100644 --- a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/sink/writer/AbstractWriteStrategy.java +++ b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/sink/writer/AbstractWriteStrategy.java @@ -17,6 +17,7 @@ package org.apache.seatunnel.connectors.seatunnel.file.sink.writer; +import org.apache.seatunnel.api.table.catalog.CatalogTable; import org.apache.seatunnel.api.table.type.SeaTunnelDataType; import org.apache.seatunnel.api.table.type.SeaTunnelRow; import org.apache.seatunnel.api.table.type.SeaTunnelRowType; @@ -150,11 +151,11 @@ public Configuration getConfiguration(HadoopConf hadoopConf) { /** * set seaTunnelRowTypeInfo in writer * - * @param seaTunnelRowType seaTunnelRowType + * @param catalogTable seaTunnelRowType */ @Override - public void setSeaTunnelRowTypeInfo(SeaTunnelRowType seaTunnelRowType) { - this.seaTunnelRowType = seaTunnelRowType; + public void setCatalogTable(CatalogTable catalogTable) { + this.seaTunnelRowType = catalogTable.getSeaTunnelRowType(); } /** diff --git a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/sink/writer/BinaryWriteStrategy.java b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/sink/writer/BinaryWriteStrategy.java index 7f496b2927d..06d05d62505 100644 --- a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/sink/writer/BinaryWriteStrategy.java +++ b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/sink/writer/BinaryWriteStrategy.java @@ -17,8 +17,8 @@ package org.apache.seatunnel.connectors.seatunnel.file.sink.writer; +import org.apache.seatunnel.api.table.catalog.CatalogTable; import org.apache.seatunnel.api.table.type.SeaTunnelRow; -import org.apache.seatunnel.api.table.type.SeaTunnelRowType; import org.apache.seatunnel.common.exception.CommonError; import org.apache.seatunnel.common.exception.CommonErrorCodeDeprecated; import org.apache.seatunnel.connectors.seatunnel.file.exception.FileConnectorErrorCode; @@ -46,9 +46,9 @@ public BinaryWriteStrategy(FileSinkConfig fileSinkConfig) { } @Override - public void setSeaTunnelRowTypeInfo(SeaTunnelRowType seaTunnelRowType) { - super.setSeaTunnelRowTypeInfo(seaTunnelRowType); - if (!seaTunnelRowType.equals(BinaryReadStrategy.binaryRowType)) { + public void setCatalogTable(CatalogTable catalogTable) { + super.setCatalogTable(catalogTable); + if (!catalogTable.getSeaTunnelRowType().equals(BinaryReadStrategy.binaryRowType)) { throw new FileConnectorException( FileConnectorErrorCode.FORMAT_NOT_SUPPORT, "BinaryWriteStrategy only supports binary format, please read file with `BINARY` format, and do not change schema in the transform."); diff --git a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/sink/writer/JsonWriteStrategy.java b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/sink/writer/JsonWriteStrategy.java index f95973f4cfc..23fb7893a8f 100644 --- a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/sink/writer/JsonWriteStrategy.java +++ b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/sink/writer/JsonWriteStrategy.java @@ -18,8 +18,8 @@ package org.apache.seatunnel.connectors.seatunnel.file.sink.writer; import org.apache.seatunnel.api.serialization.SerializationSchema; +import org.apache.seatunnel.api.table.catalog.CatalogTable; import org.apache.seatunnel.api.table.type.SeaTunnelRow; -import org.apache.seatunnel.api.table.type.SeaTunnelRowType; import org.apache.seatunnel.common.exception.CommonError; import org.apache.seatunnel.common.exception.CommonErrorCodeDeprecated; import org.apache.seatunnel.common.utils.EncodingUtils; @@ -55,11 +55,13 @@ public JsonWriteStrategy(FileSinkConfig textFileSinkConfig) { } @Override - public void setSeaTunnelRowTypeInfo(SeaTunnelRowType seaTunnelRowType) { - super.setSeaTunnelRowTypeInfo(seaTunnelRowType); + public void setCatalogTable(CatalogTable catalogTable) { + super.setCatalogTable(catalogTable); this.serializationSchema = new JsonSerializationSchema( - buildSchemaWithRowType(seaTunnelRowType, sinkColumnsIndexInRow), charset); + buildSchemaWithRowType( + catalogTable.getSeaTunnelRowType(), sinkColumnsIndexInRow), + charset); } @Override diff --git a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/sink/writer/TextWriteStrategy.java b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/sink/writer/TextWriteStrategy.java index 621048fb39a..77e2eb5c5b0 100644 --- a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/sink/writer/TextWriteStrategy.java +++ b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/sink/writer/TextWriteStrategy.java @@ -18,8 +18,8 @@ package org.apache.seatunnel.connectors.seatunnel.file.sink.writer; import org.apache.seatunnel.api.serialization.SerializationSchema; +import org.apache.seatunnel.api.table.catalog.CatalogTable; import org.apache.seatunnel.api.table.type.SeaTunnelRow; -import org.apache.seatunnel.api.table.type.SeaTunnelRowType; import org.apache.seatunnel.common.exception.CommonError; import org.apache.seatunnel.common.exception.CommonErrorCodeDeprecated; import org.apache.seatunnel.common.utils.DateTimeUtils; @@ -71,12 +71,13 @@ public TextWriteStrategy(FileSinkConfig fileSinkConfig) { } @Override - public void setSeaTunnelRowTypeInfo(SeaTunnelRowType seaTunnelRowType) { - super.setSeaTunnelRowTypeInfo(seaTunnelRowType); + public void setCatalogTable(CatalogTable catalogTable) { + super.setCatalogTable(catalogTable); this.serializationSchema = TextSerializationSchema.builder() .seaTunnelRowType( - buildSchemaWithRowType(seaTunnelRowType, sinkColumnsIndexInRow)) + buildSchemaWithRowType( + catalogTable.getSeaTunnelRowType(), sinkColumnsIndexInRow)) .delimiter(fieldDelimiter) .dateFormatter(dateFormat) .dateTimeFormatter(dateTimeFormat) diff --git a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/sink/writer/WriteStrategy.java b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/sink/writer/WriteStrategy.java index 6a1b1840b4d..24b23c9bfc3 100644 --- a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/sink/writer/WriteStrategy.java +++ b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/sink/writer/WriteStrategy.java @@ -17,8 +17,8 @@ package org.apache.seatunnel.connectors.seatunnel.file.sink.writer; +import org.apache.seatunnel.api.table.catalog.CatalogTable; import org.apache.seatunnel.api.table.type.SeaTunnelRow; -import org.apache.seatunnel.api.table.type.SeaTunnelRowType; import org.apache.seatunnel.connectors.seatunnel.file.config.HadoopConf; import org.apache.seatunnel.connectors.seatunnel.file.exception.FileConnectorException; import org.apache.seatunnel.connectors.seatunnel.file.hadoop.HadoopFileSystemProxy; @@ -56,11 +56,11 @@ public interface WriteStrategy extends Transaction, Serializable, Closeable { void write(SeaTunnelRow seaTunnelRow) throws FileConnectorException; /** - * set seaTunnelRowTypeInfo in writer + * set catalog table to write strategy * - * @param seaTunnelRowType seaTunnelRowType + * @param catalogTable catalogTable */ - void setSeaTunnelRowTypeInfo(SeaTunnelRowType seaTunnelRowType); + void setCatalogTable(CatalogTable catalogTable); /** * use seaTunnelRow generate partition directory diff --git a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/source/reader/AbstractReadStrategy.java b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/source/reader/AbstractReadStrategy.java index 3e71a3b2932..00d90d84195 100644 --- a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/source/reader/AbstractReadStrategy.java +++ b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/source/reader/AbstractReadStrategy.java @@ -20,6 +20,7 @@ import org.apache.seatunnel.shade.com.typesafe.config.Config; import org.apache.seatunnel.api.source.Collector; +import org.apache.seatunnel.api.table.catalog.CatalogTable; import org.apache.seatunnel.api.table.type.BasicType; import org.apache.seatunnel.api.table.type.SeaTunnelDataType; import org.apache.seatunnel.api.table.type.SeaTunnelRow; @@ -92,10 +93,10 @@ public void init(HadoopConf conf) { } @Override - public void setSeaTunnelRowTypeInfo(SeaTunnelRowType seaTunnelRowType) { - this.seaTunnelRowType = seaTunnelRowType; + public void setCatalogTable(CatalogTable catalogTable) { + this.seaTunnelRowType = catalogTable.getSeaTunnelRowType(); this.seaTunnelRowTypeWithPartition = - mergePartitionTypes(fileNames.get(0), seaTunnelRowType); + mergePartitionTypes(fileNames.get(0), catalogTable.getSeaTunnelRowType()); } boolean checkFileType(String path) { diff --git a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/source/reader/BinaryReadStrategy.java b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/source/reader/BinaryReadStrategy.java index 3bbb90c774b..7849415b32d 100644 --- a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/source/reader/BinaryReadStrategy.java +++ b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/source/reader/BinaryReadStrategy.java @@ -55,7 +55,7 @@ public void read(String path, String tableId, Collector output) throws IOException, FileConnectorException { try (InputStream inputStream = hadoopFileSystemProxy.getInputStream(path)) { String relativePath; - if (basePath.isFile()) { + if (hadoopFileSystemProxy.isFile(basePath.getAbsolutePath())) { relativePath = basePath.getName(); } else { relativePath = diff --git a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/source/reader/ExcelReadStrategy.java b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/source/reader/ExcelReadStrategy.java index c90b6d6659b..d7dfe206ab5 100644 --- a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/source/reader/ExcelReadStrategy.java +++ b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/source/reader/ExcelReadStrategy.java @@ -21,6 +21,7 @@ import org.apache.seatunnel.api.configuration.ReadonlyConfig; import org.apache.seatunnel.api.source.Collector; +import org.apache.seatunnel.api.table.catalog.CatalogTable; import org.apache.seatunnel.api.table.type.SeaTunnelDataType; import org.apache.seatunnel.api.table.type.SeaTunnelRow; import org.apache.seatunnel.api.table.type.SeaTunnelRowType; @@ -145,15 +146,15 @@ protected void readProcess( } @Override - public void setSeaTunnelRowTypeInfo(SeaTunnelRowType seaTunnelRowType) { - if (isNullOrEmpty(seaTunnelRowType.getFieldNames()) - || isNullOrEmpty(seaTunnelRowType.getFieldTypes())) { + public void setCatalogTable(CatalogTable catalogTable) { + SeaTunnelRowType rowType = catalogTable.getSeaTunnelRowType(); + if (isNullOrEmpty(rowType.getFieldNames()) || isNullOrEmpty(rowType.getFieldTypes())) { throw new FileConnectorException( CommonErrorCodeDeprecated.UNSUPPORTED_OPERATION, "Schema information is not set or incorrect Schema settings"); } SeaTunnelRowType userDefinedRowTypeWithPartition = - mergePartitionTypes(fileNames.get(0), seaTunnelRowType); + mergePartitionTypes(fileNames.get(0), rowType); // column projection if (pluginConfig.hasPath(BaseSourceConfigOptions.READ_COLUMNS.key())) { // get the read column index from user-defined row type @@ -161,15 +162,15 @@ public void setSeaTunnelRowTypeInfo(SeaTunnelRowType seaTunnelRowType) { String[] fields = new String[readColumns.size()]; SeaTunnelDataType[] types = new SeaTunnelDataType[readColumns.size()]; for (int i = 0; i < indexes.length; i++) { - indexes[i] = seaTunnelRowType.indexOf(readColumns.get(i)); - fields[i] = seaTunnelRowType.getFieldName(indexes[i]); - types[i] = seaTunnelRowType.getFieldType(indexes[i]); + indexes[i] = rowType.indexOf(readColumns.get(i)); + fields[i] = rowType.getFieldName(indexes[i]); + types[i] = rowType.getFieldType(indexes[i]); } this.seaTunnelRowType = new SeaTunnelRowType(fields, types); this.seaTunnelRowTypeWithPartition = mergePartitionTypes(fileNames.get(0), this.seaTunnelRowType); } else { - this.seaTunnelRowType = seaTunnelRowType; + this.seaTunnelRowType = rowType; this.seaTunnelRowTypeWithPartition = userDefinedRowTypeWithPartition; } } diff --git a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/source/reader/JsonReadStrategy.java b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/source/reader/JsonReadStrategy.java index 982419266f5..dfd57363d9d 100644 --- a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/source/reader/JsonReadStrategy.java +++ b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/source/reader/JsonReadStrategy.java @@ -20,6 +20,7 @@ import org.apache.seatunnel.api.configuration.ReadonlyConfig; import org.apache.seatunnel.api.serialization.DeserializationSchema; import org.apache.seatunnel.api.source.Collector; +import org.apache.seatunnel.api.table.catalog.CatalogTable; import org.apache.seatunnel.api.table.type.SeaTunnelRow; import org.apache.seatunnel.api.table.type.SeaTunnelRowType; import org.apache.seatunnel.common.exception.CommonErrorCodeDeprecated; @@ -62,8 +63,8 @@ public void init(HadoopConf conf) { } @Override - public void setSeaTunnelRowTypeInfo(SeaTunnelRowType seaTunnelRowType) { - super.setSeaTunnelRowTypeInfo(seaTunnelRowType); + public void setCatalogTable(CatalogTable catalogTable) { + super.setCatalogTable(catalogTable); if (isMergePartition) { deserializationSchema = new JsonDeserializationSchema(false, false, this.seaTunnelRowTypeWithPartition); diff --git a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/source/reader/ReadStrategy.java b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/source/reader/ReadStrategy.java index c5bdf281244..9389223814a 100644 --- a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/source/reader/ReadStrategy.java +++ b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/source/reader/ReadStrategy.java @@ -20,6 +20,7 @@ import org.apache.seatunnel.shade.com.typesafe.config.Config; import org.apache.seatunnel.api.source.Collector; +import org.apache.seatunnel.api.table.catalog.CatalogTable; import org.apache.seatunnel.api.table.catalog.TablePath; import org.apache.seatunnel.api.table.type.SeaTunnelRow; import org.apache.seatunnel.api.table.type.SeaTunnelRowType; @@ -56,8 +57,7 @@ default SeaTunnelRowType getSeaTunnelRowTypeInfoWithUserConfigRowType( return getSeaTunnelRowTypeInfo(path); } - // todo: use CatalogTable - void setSeaTunnelRowTypeInfo(SeaTunnelRowType seaTunnelRowType); + void setCatalogTable(CatalogTable catalogTable); List getFileNamesByPath(String path) throws IOException; diff --git a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/source/reader/TextReadStrategy.java b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/source/reader/TextReadStrategy.java index 2b722593770..1a7a7398a4f 100644 --- a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/source/reader/TextReadStrategy.java +++ b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/source/reader/TextReadStrategy.java @@ -21,6 +21,7 @@ import org.apache.seatunnel.api.configuration.ReadonlyConfig; import org.apache.seatunnel.api.serialization.DeserializationSchema; import org.apache.seatunnel.api.source.Collector; +import org.apache.seatunnel.api.table.catalog.CatalogTable; import org.apache.seatunnel.api.table.catalog.CatalogTableUtil; import org.apache.seatunnel.api.table.type.SeaTunnelDataType; import org.apache.seatunnel.api.table.type.SeaTunnelRow; @@ -170,9 +171,10 @@ public SeaTunnelRowType getSeaTunnelRowTypeInfo(String path) { } @Override - public void setSeaTunnelRowTypeInfo(SeaTunnelRowType seaTunnelRowType) { + public void setCatalogTable(CatalogTable catalogTable) { + SeaTunnelRowType rowType = catalogTable.getSeaTunnelRowType(); SeaTunnelRowType userDefinedRowTypeWithPartition = - mergePartitionTypes(fileNames.get(0), seaTunnelRowType); + mergePartitionTypes(fileNames.get(0), rowType); Optional fieldDelimiterOptional = ReadonlyConfig.fromConfig(pluginConfig) .getOptional(BaseSourceConfigOptions.FIELD_DELIMITER); @@ -201,7 +203,7 @@ public void setSeaTunnelRowTypeInfo(SeaTunnelRowType seaTunnelRowType) { deserializationSchema = builder.seaTunnelRowType(userDefinedRowTypeWithPartition).build(); } else { - deserializationSchema = builder.seaTunnelRowType(seaTunnelRowType).build(); + deserializationSchema = builder.seaTunnelRowType(rowType).build(); } // column projection if (pluginConfig.hasPath(BaseSourceConfigOptions.READ_COLUMNS.key())) { @@ -210,15 +212,15 @@ public void setSeaTunnelRowTypeInfo(SeaTunnelRowType seaTunnelRowType) { String[] fields = new String[readColumns.size()]; SeaTunnelDataType[] types = new SeaTunnelDataType[readColumns.size()]; for (int i = 0; i < indexes.length; i++) { - indexes[i] = seaTunnelRowType.indexOf(readColumns.get(i)); - fields[i] = seaTunnelRowType.getFieldName(indexes[i]); - types[i] = seaTunnelRowType.getFieldType(indexes[i]); + indexes[i] = rowType.indexOf(readColumns.get(i)); + fields[i] = rowType.getFieldName(indexes[i]); + types[i] = rowType.getFieldType(indexes[i]); } this.seaTunnelRowType = new SeaTunnelRowType(fields, types); this.seaTunnelRowTypeWithPartition = mergePartitionTypes(fileNames.get(0), this.seaTunnelRowType); } else { - this.seaTunnelRowType = seaTunnelRowType; + this.seaTunnelRowType = rowType; this.seaTunnelRowTypeWithPartition = userDefinedRowTypeWithPartition; } } diff --git a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/source/reader/XmlReadStrategy.java b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/source/reader/XmlReadStrategy.java index a553a4f9d06..e012c46bdf5 100644 --- a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/source/reader/XmlReadStrategy.java +++ b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/source/reader/XmlReadStrategy.java @@ -23,6 +23,7 @@ import org.apache.seatunnel.api.configuration.Option; import org.apache.seatunnel.api.configuration.ReadonlyConfig; import org.apache.seatunnel.api.source.Collector; +import org.apache.seatunnel.api.table.catalog.CatalogTable; import org.apache.seatunnel.api.table.type.SeaTunnelDataType; import org.apache.seatunnel.api.table.type.SeaTunnelRow; import org.apache.seatunnel.api.table.type.SeaTunnelRowType; @@ -173,20 +174,20 @@ public SeaTunnelRowType getSeaTunnelRowTypeInfo(String path) throws FileConnecto } @Override - public void setSeaTunnelRowTypeInfo(SeaTunnelRowType seaTunnelRowType) { - if (ArrayUtils.isEmpty(seaTunnelRowType.getFieldNames()) - || ArrayUtils.isEmpty(seaTunnelRowType.getFieldTypes())) { + public void setCatalogTable(CatalogTable catalogTable) { + SeaTunnelRowType rowType = catalogTable.getSeaTunnelRowType(); + if (ArrayUtils.isEmpty(rowType.getFieldNames()) + || ArrayUtils.isEmpty(rowType.getFieldTypes())) { throw new FileConnectorException( CommonErrorCodeDeprecated.ILLEGAL_ARGUMENT, "Schema information is undefined or misconfigured, please check your configuration file."); } if (readColumns.isEmpty()) { - this.seaTunnelRowType = seaTunnelRowType; - this.seaTunnelRowTypeWithPartition = - mergePartitionTypes(fileNames.get(0), seaTunnelRowType); + this.seaTunnelRowType = rowType; + this.seaTunnelRowTypeWithPartition = mergePartitionTypes(fileNames.get(0), rowType); } else { - if (readColumns.retainAll(Arrays.asList(seaTunnelRowType.getFieldNames()))) { + if (readColumns.retainAll(Arrays.asList(rowType.getFieldNames()))) { log.warn( "The read columns configuration will be filtered by the schema configuration, this may cause the actual results to be inconsistent with expectations. This is due to read columns not being a subset of the schema, " + "maybe you should check the schema and read_columns!"); @@ -195,9 +196,9 @@ public void setSeaTunnelRowTypeInfo(SeaTunnelRowType seaTunnelRowType) { String[] fields = new String[readColumns.size()]; SeaTunnelDataType[] types = new SeaTunnelDataType[readColumns.size()]; for (int i = 0; i < readColumns.size(); i++) { - indexes[i] = seaTunnelRowType.indexOf(readColumns.get(i)); - fields[i] = seaTunnelRowType.getFieldName(indexes[i]); - types[i] = seaTunnelRowType.getFieldType(indexes[i]); + indexes[i] = rowType.indexOf(readColumns.get(i)); + fields[i] = rowType.getFieldName(indexes[i]); + types[i] = rowType.getFieldType(indexes[i]); } this.seaTunnelRowType = new SeaTunnelRowType(fields, types); this.seaTunnelRowTypeWithPartition = diff --git a/seatunnel-connectors-v2/connector-file/connector-file-base/src/test/java/org/apache/seatunnel/connectors/seatunnel/file/writer/ExcelReadStrategyTest.java b/seatunnel-connectors-v2/connector-file/connector-file-base/src/test/java/org/apache/seatunnel/connectors/seatunnel/file/writer/ExcelReadStrategyTest.java index 8aa43a03bdc..149ee7648d5 100644 --- a/seatunnel-connectors-v2/connector-file/connector-file-base/src/test/java/org/apache/seatunnel/connectors/seatunnel/file/writer/ExcelReadStrategyTest.java +++ b/seatunnel-connectors-v2/connector-file/connector-file-base/src/test/java/org/apache/seatunnel/connectors/seatunnel/file/writer/ExcelReadStrategyTest.java @@ -21,9 +21,9 @@ import org.apache.seatunnel.shade.com.typesafe.config.ConfigFactory; import org.apache.seatunnel.api.source.Collector; +import org.apache.seatunnel.api.table.catalog.CatalogTable; import org.apache.seatunnel.api.table.catalog.CatalogTableUtil; import org.apache.seatunnel.api.table.type.SeaTunnelRow; -import org.apache.seatunnel.api.table.type.SeaTunnelRowType; import org.apache.seatunnel.common.utils.DateTimeUtils; import org.apache.seatunnel.common.utils.DateUtils; import org.apache.seatunnel.common.utils.TimeUtils; @@ -72,9 +72,8 @@ private void testExcelRead(String filePath) throws IOException, URISyntaxExcepti excelReadStrategy.init(localConf); List fileNamesByPath = excelReadStrategy.getFileNamesByPath(excelFilePath); - SeaTunnelRowType userDefinedSchema = - CatalogTableUtil.buildWithConfig(pluginConfig).getSeaTunnelRowType(); - excelReadStrategy.setSeaTunnelRowTypeInfo(userDefinedSchema); + CatalogTable userDefinedCatalogTable = CatalogTableUtil.buildWithConfig(pluginConfig); + excelReadStrategy.setCatalogTable(userDefinedCatalogTable); TestCollector testCollector = new TestCollector(); excelReadStrategy.read(fileNamesByPath.get(0), "", testCollector); for (SeaTunnelRow seaTunnelRow : testCollector.getRows()) { diff --git a/seatunnel-connectors-v2/connector-file/connector-file-base/src/test/java/org/apache/seatunnel/connectors/seatunnel/file/writer/ParquetWriteStrategyTest.java b/seatunnel-connectors-v2/connector-file/connector-file-base/src/test/java/org/apache/seatunnel/connectors/seatunnel/file/writer/ParquetWriteStrategyTest.java index 236d6f5a037..e692d7294b7 100644 --- a/seatunnel-connectors-v2/connector-file/connector-file-base/src/test/java/org/apache/seatunnel/connectors/seatunnel/file/writer/ParquetWriteStrategyTest.java +++ b/seatunnel-connectors-v2/connector-file/connector-file-base/src/test/java/org/apache/seatunnel/connectors/seatunnel/file/writer/ParquetWriteStrategyTest.java @@ -20,6 +20,7 @@ import org.apache.seatunnel.shade.com.typesafe.config.ConfigFactory; import org.apache.seatunnel.api.source.Collector; +import org.apache.seatunnel.api.table.catalog.CatalogTableUtil; import org.apache.seatunnel.api.table.type.BasicType; import org.apache.seatunnel.api.table.type.LocalTimeType; import org.apache.seatunnel.api.table.type.PrimitiveByteArrayType; @@ -82,7 +83,8 @@ public void testParquetWriteInt96() throws Exception { ParquetWriteStrategy writeStrategy = new ParquetWriteStrategy(writeSinkConfig); ParquetReadStrategyTest.LocalConf hadoopConf = new ParquetReadStrategyTest.LocalConf(FS_DEFAULT_NAME_DEFAULT); - writeStrategy.setSeaTunnelRowTypeInfo(writeRowType); + writeStrategy.setCatalogTable( + CatalogTableUtil.getCatalogTable("test", null, null, "test", writeRowType)); writeStrategy.init(hadoopConf, "test1", "test1", 0); writeStrategy.beginTransaction(1L); writeStrategy.write( diff --git a/seatunnel-connectors-v2/connector-file/connector-file-base/src/test/java/org/apache/seatunnel/connectors/seatunnel/file/writer/ReadStrategyEncodingTest.java b/seatunnel-connectors-v2/connector-file/connector-file-base/src/test/java/org/apache/seatunnel/connectors/seatunnel/file/writer/ReadStrategyEncodingTest.java index 736ae590963..ad23dd0186f 100644 --- a/seatunnel-connectors-v2/connector-file/connector-file-base/src/test/java/org/apache/seatunnel/connectors/seatunnel/file/writer/ReadStrategyEncodingTest.java +++ b/seatunnel-connectors-v2/connector-file/connector-file-base/src/test/java/org/apache/seatunnel/connectors/seatunnel/file/writer/ReadStrategyEncodingTest.java @@ -21,9 +21,9 @@ import org.apache.seatunnel.shade.com.typesafe.config.ConfigFactory; import org.apache.seatunnel.api.source.Collector; +import org.apache.seatunnel.api.table.catalog.CatalogTable; import org.apache.seatunnel.api.table.catalog.CatalogTableUtil; import org.apache.seatunnel.api.table.type.SeaTunnelRow; -import org.apache.seatunnel.api.table.type.SeaTunnelRowType; import org.apache.seatunnel.connectors.seatunnel.file.config.HadoopConf; import org.apache.seatunnel.connectors.seatunnel.file.source.reader.AbstractReadStrategy; import org.apache.seatunnel.connectors.seatunnel.file.source.reader.JsonReadStrategy; @@ -121,11 +121,10 @@ private static void testRead( readStrategy.init(localConf); readStrategy.getFileNamesByPath(sourceFilePath); testCollector = new TestCollector(); - SeaTunnelRowType seaTunnelRowTypeInfo = - CatalogTableUtil.buildWithConfig(pluginConfig).getSeaTunnelRowType(); - Assertions.assertNotNull(seaTunnelRowTypeInfo); - readStrategy.setSeaTunnelRowTypeInfo(seaTunnelRowTypeInfo); - log.info(seaTunnelRowTypeInfo.toString()); + CatalogTable catalogTable = CatalogTableUtil.buildWithConfig(pluginConfig); + Assertions.assertNotNull(catalogTable.getSeaTunnelRowType()); + readStrategy.setCatalogTable(catalogTable); + log.info(catalogTable.getSeaTunnelRowType().toString()); readStrategy.read(sourceFilePath, "", testCollector); assertRows(testCollector); } diff --git a/seatunnel-connectors-v2/connector-file/connector-file-base/src/test/java/org/apache/seatunnel/connectors/seatunnel/file/writer/XmlReadStrategyTest.java b/seatunnel-connectors-v2/connector-file/connector-file-base/src/test/java/org/apache/seatunnel/connectors/seatunnel/file/writer/XmlReadStrategyTest.java index 8bb2e483896..fca8f68fd2c 100644 --- a/seatunnel-connectors-v2/connector-file/connector-file-base/src/test/java/org/apache/seatunnel/connectors/seatunnel/file/writer/XmlReadStrategyTest.java +++ b/seatunnel-connectors-v2/connector-file/connector-file-base/src/test/java/org/apache/seatunnel/connectors/seatunnel/file/writer/XmlReadStrategyTest.java @@ -21,9 +21,9 @@ import org.apache.seatunnel.shade.com.typesafe.config.ConfigFactory; import org.apache.seatunnel.api.source.Collector; +import org.apache.seatunnel.api.table.catalog.CatalogTable; import org.apache.seatunnel.api.table.catalog.CatalogTableUtil; import org.apache.seatunnel.api.table.type.SeaTunnelRow; -import org.apache.seatunnel.api.table.type.SeaTunnelRowType; import org.apache.seatunnel.common.utils.DateTimeUtils; import org.apache.seatunnel.common.utils.DateUtils; import org.apache.seatunnel.common.utils.TimeUtils; @@ -66,9 +66,8 @@ public void testXmlRead() throws IOException, URISyntaxException { xmlReadStrategy.setPluginConfig(pluginConfig); xmlReadStrategy.init(localConf); List fileNamesByPath = xmlReadStrategy.getFileNamesByPath(xmlFilePath); - SeaTunnelRowType userDefinedSchema = - CatalogTableUtil.buildWithConfig(pluginConfig).getSeaTunnelRowType(); - xmlReadStrategy.setSeaTunnelRowTypeInfo(userDefinedSchema); + CatalogTable catalogTable = CatalogTableUtil.buildWithConfig(pluginConfig); + xmlReadStrategy.setCatalogTable(catalogTable); TestCollector testCollector = new TestCollector(); xmlReadStrategy.read(fileNamesByPath.get(0), "", testCollector); for (SeaTunnelRow seaTunnelRow : testCollector.getRows()) { diff --git a/seatunnel-connectors-v2/connector-file/connector-file-cos/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/cos/source/CosFileSource.java b/seatunnel-connectors-v2/connector-file/connector-file-cos/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/cos/source/CosFileSource.java index 0690b2acebb..bd8df0261cb 100644 --- a/seatunnel-connectors-v2/connector-file/connector-file-cos/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/cos/source/CosFileSource.java +++ b/seatunnel-connectors-v2/connector-file/connector-file-cos/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/cos/source/CosFileSource.java @@ -22,9 +22,9 @@ import org.apache.seatunnel.api.common.PrepareFailException; import org.apache.seatunnel.api.common.SeaTunnelAPIErrorCode; import org.apache.seatunnel.api.source.SeaTunnelSource; +import org.apache.seatunnel.api.table.catalog.CatalogTable; import org.apache.seatunnel.api.table.catalog.CatalogTableUtil; import org.apache.seatunnel.api.table.catalog.schema.TableSchemaOptions; -import org.apache.seatunnel.api.table.type.SeaTunnelRowType; import org.apache.seatunnel.common.config.CheckConfigUtil; import org.apache.seatunnel.common.config.CheckResult; import org.apache.seatunnel.common.constants.PluginType; @@ -95,9 +95,9 @@ public void prepare(Config pluginConfig) throws PrepareFailException { case JSON: case EXCEL: case XML: - SeaTunnelRowType userDefinedSchema = - CatalogTableUtil.buildWithConfig(pluginConfig).getSeaTunnelRowType(); - readStrategy.setSeaTunnelRowTypeInfo(userDefinedSchema); + CatalogTable userDefinedCatalogTable = + CatalogTableUtil.buildWithConfig(pluginConfig); + readStrategy.setCatalogTable(userDefinedCatalogTable); rowType = readStrategy.getActualSeaTunnelRowTypeInfo(); break; case ORC: diff --git a/seatunnel-connectors-v2/connector-file/connector-file-ftp/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/ftp/config/FtpConfigOptions.java b/seatunnel-connectors-v2/connector-file/connector-file-ftp/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/ftp/config/FtpConfigOptions.java index 1f00a56abfd..645225b9eac 100644 --- a/seatunnel-connectors-v2/connector-file/connector-file-ftp/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/ftp/config/FtpConfigOptions.java +++ b/seatunnel-connectors-v2/connector-file/connector-file-ftp/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/ftp/config/FtpConfigOptions.java @@ -22,7 +22,7 @@ import org.apache.seatunnel.connectors.seatunnel.file.config.BaseSourceConfigOptions; import org.apache.seatunnel.connectors.seatunnel.file.ftp.system.FtpConnectionMode; -import static org.apache.seatunnel.connectors.seatunnel.file.ftp.system.FtpConnectionMode.ACTIVE_LOCAL_DATA_CONNECTION_MODE; +import static org.apache.seatunnel.connectors.seatunnel.file.ftp.system.FtpConnectionMode.ACTIVE_LOCAL; public class FtpConfigOptions extends BaseSourceConfigOptions { public static final Option FTP_PASSWORD = @@ -42,6 +42,6 @@ public class FtpConfigOptions extends BaseSourceConfigOptions { public static final Option FTP_CONNECTION_MODE = Options.key("connection_mode") .enumType(FtpConnectionMode.class) - .defaultValue(ACTIVE_LOCAL_DATA_CONNECTION_MODE) + .defaultValue(ACTIVE_LOCAL) .withDescription("FTP server connection mode "); } diff --git a/seatunnel-connectors-v2/connector-file/connector-file-ftp/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/ftp/system/FtpConnectionMode.java b/seatunnel-connectors-v2/connector-file/connector-file-ftp/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/ftp/system/FtpConnectionMode.java index 068aa5974c1..44f2264fb2c 100644 --- a/seatunnel-connectors-v2/connector-file/connector-file-ftp/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/ftp/system/FtpConnectionMode.java +++ b/seatunnel-connectors-v2/connector-file/connector-file-ftp/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/ftp/system/FtpConnectionMode.java @@ -21,10 +21,10 @@ public enum FtpConnectionMode { /** ACTIVE_LOCAL_DATA_CONNECTION_MODE */ - ACTIVE_LOCAL_DATA_CONNECTION_MODE("active_local"), + ACTIVE_LOCAL("active_local"), /** PASSIVE_LOCAL_DATA_CONNECTION_MODE */ - PASSIVE_LOCAL_DATA_CONNECTION_MODE("passive_local"); + PASSIVE_LOCAL("passive_local"); private final String mode; @@ -38,7 +38,7 @@ public String getMode() { public static FtpConnectionMode fromMode(String mode) { for (FtpConnectionMode ftpConnectionModeEnum : FtpConnectionMode.values()) { - if (ftpConnectionModeEnum.getMode().equals(mode)) { + if (ftpConnectionModeEnum.getMode().equals(mode.toLowerCase())) { return ftpConnectionModeEnum; } } diff --git a/seatunnel-connectors-v2/connector-file/connector-file-ftp/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/ftp/system/SeaTunnelFTPFileSystem.java b/seatunnel-connectors-v2/connector-file/connector-file-ftp/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/ftp/system/SeaTunnelFTPFileSystem.java index 04ba218e455..029890918da 100644 --- a/seatunnel-connectors-v2/connector-file/connector-file-ftp/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/ftp/system/SeaTunnelFTPFileSystem.java +++ b/seatunnel-connectors-v2/connector-file/connector-file-ftp/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/ftp/system/SeaTunnelFTPFileSystem.java @@ -40,6 +40,8 @@ import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.util.Progressable; +import lombok.extern.slf4j.Slf4j; + import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; @@ -52,6 +54,7 @@ */ @InterfaceAudience.Public @InterfaceStability.Stable +@Slf4j public class SeaTunnelFTPFileSystem extends FileSystem { public static final Log LOG = LogFactory.getLog(SeaTunnelFTPFileSystem.class); @@ -156,10 +159,7 @@ private FTPClient connect() throws IOException { } setFsFtpConnectionMode( - client, - conf.get( - FS_FTP_CONNECTION_MODE, - FtpConnectionMode.ACTIVE_LOCAL_DATA_CONNECTION_MODE.getMode())); + client, conf.get(FS_FTP_CONNECTION_MODE, FtpConnectionMode.ACTIVE_LOCAL.getMode())); return client; } @@ -172,13 +172,18 @@ private FTPClient connect() throws IOException { */ private void setFsFtpConnectionMode(FTPClient client, String mode) { switch (FtpConnectionMode.fromMode(mode)) { - case ACTIVE_LOCAL_DATA_CONNECTION_MODE: - client.enterLocalActiveMode(); - break; - case PASSIVE_LOCAL_DATA_CONNECTION_MODE: + case PASSIVE_LOCAL: client.enterLocalPassiveMode(); break; + case ACTIVE_LOCAL: + client.enterLocalActiveMode(); + break; default: + log.warn( + "Unsupported FTP connection mode: " + mode, + " Using default FTP connection mode: " + + FtpConnectionMode.ACTIVE_LOCAL.getMode()); + client.enterLocalActiveMode(); break; } } diff --git a/seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/jindo/source/OssFileSource.java b/seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/jindo/source/OssFileSource.java index 335e3967808..ed9807729f1 100644 --- a/seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/jindo/source/OssFileSource.java +++ b/seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/jindo/source/OssFileSource.java @@ -22,9 +22,9 @@ import org.apache.seatunnel.api.common.PrepareFailException; import org.apache.seatunnel.api.common.SeaTunnelAPIErrorCode; import org.apache.seatunnel.api.source.SeaTunnelSource; +import org.apache.seatunnel.api.table.catalog.CatalogTable; import org.apache.seatunnel.api.table.catalog.CatalogTableUtil; import org.apache.seatunnel.api.table.catalog.schema.TableSchemaOptions; -import org.apache.seatunnel.api.table.type.SeaTunnelRowType; import org.apache.seatunnel.common.config.CheckConfigUtil; import org.apache.seatunnel.common.config.CheckResult; import org.apache.seatunnel.common.constants.PluginType; @@ -96,9 +96,9 @@ public void prepare(Config pluginConfig) throws PrepareFailException { case JSON: case EXCEL: case XML: - SeaTunnelRowType userDefinedSchema = - CatalogTableUtil.buildWithConfig(pluginConfig).getSeaTunnelRowType(); - readStrategy.setSeaTunnelRowTypeInfo(userDefinedSchema); + CatalogTable userDefinedCatalogTable = + CatalogTableUtil.buildWithConfig(pluginConfig); + readStrategy.setCatalogTable(userDefinedCatalogTable); rowType = readStrategy.getActualSeaTunnelRowTypeInfo(); break; case ORC: diff --git a/seatunnel-connectors-v2/connector-file/connector-file-obs/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/obs/source/ObsFileSource.java b/seatunnel-connectors-v2/connector-file/connector-file-obs/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/obs/source/ObsFileSource.java index cf3061a44a3..8d2ae3d90bb 100644 --- a/seatunnel-connectors-v2/connector-file/connector-file-obs/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/obs/source/ObsFileSource.java +++ b/seatunnel-connectors-v2/connector-file/connector-file-obs/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/obs/source/ObsFileSource.java @@ -22,9 +22,9 @@ import org.apache.seatunnel.api.common.PrepareFailException; import org.apache.seatunnel.api.common.SeaTunnelAPIErrorCode; import org.apache.seatunnel.api.source.SeaTunnelSource; +import org.apache.seatunnel.api.table.catalog.CatalogTable; import org.apache.seatunnel.api.table.catalog.CatalogTableUtil; import org.apache.seatunnel.api.table.catalog.schema.TableSchemaOptions; -import org.apache.seatunnel.api.table.type.SeaTunnelRowType; import org.apache.seatunnel.common.config.CheckConfigUtil; import org.apache.seatunnel.common.config.CheckResult; import org.apache.seatunnel.common.constants.PluginType; @@ -91,9 +91,9 @@ public void prepare(Config pluginConfig) throws PrepareFailException { case TEXT: case JSON: case EXCEL: - SeaTunnelRowType userDefinedSchema = - CatalogTableUtil.buildWithConfig(pluginConfig).getSeaTunnelRowType(); - readStrategy.setSeaTunnelRowTypeInfo(userDefinedSchema); + CatalogTable userDefinedCatalogTable = + CatalogTableUtil.buildWithConfig(pluginConfig); + readStrategy.setCatalogTable(userDefinedCatalogTable); rowType = readStrategy.getActualSeaTunnelRowTypeInfo(); break; case ORC: diff --git a/seatunnel-connectors-v2/connector-file/connector-file-s3/pom.xml b/seatunnel-connectors-v2/connector-file/connector-file-s3/pom.xml index fbf0016fced..82ddae46e6c 100644 --- a/seatunnel-connectors-v2/connector-file/connector-file-s3/pom.xml +++ b/seatunnel-connectors-v2/connector-file/connector-file-s3/pom.xml @@ -29,21 +29,6 @@ connector-file-s3 SeaTunnel : Connectors V2 : File : S3 - - 3.1.4 - 27.0-jre - - - - - - com.google.guava - guava - ${guava.version} - - - - @@ -67,14 +52,10 @@ - com.google.guava - guava - - - - org.apache.hadoop - hadoop-aws - ${hadoop-aws.version} + org.apache.seatunnel + seatunnel-hadoop-aws + ${project.version} + optional provided diff --git a/seatunnel-connectors-v2/connector-hive/src/main/java/org/apache/seatunnel/connectors/seatunnel/hive/sink/HiveSink.java b/seatunnel-connectors-v2/connector-hive/src/main/java/org/apache/seatunnel/connectors/seatunnel/hive/sink/HiveSink.java index 997c42f9faf..6e91baf0013 100644 --- a/seatunnel-connectors-v2/connector-hive/src/main/java/org/apache/seatunnel/connectors/seatunnel/hive/sink/HiveSink.java +++ b/seatunnel-connectors-v2/connector-hive/src/main/java/org/apache/seatunnel/connectors/seatunnel/hive/sink/HiveSink.java @@ -240,7 +240,7 @@ private Table getTableInformation() { private WriteStrategy getWriteStrategy() { if (writeStrategy == null) { writeStrategy = WriteStrategyFactory.of(fileSinkConfig.getFileFormat(), fileSinkConfig); - writeStrategy.setSeaTunnelRowTypeInfo(catalogTable.getSeaTunnelRowType()); + writeStrategy.setCatalogTable(catalogTable); } return writeStrategy; } diff --git a/seatunnel-connectors-v2/connector-hive/src/main/java/org/apache/seatunnel/connectors/seatunnel/hive/source/config/HiveSourceConfig.java b/seatunnel-connectors-v2/connector-hive/src/main/java/org/apache/seatunnel/connectors/seatunnel/hive/source/config/HiveSourceConfig.java index e98143fcf0e..eba9b5a15be 100644 --- a/seatunnel-connectors-v2/connector-hive/src/main/java/org/apache/seatunnel/connectors/seatunnel/hive/source/config/HiveSourceConfig.java +++ b/seatunnel-connectors-v2/connector-hive/src/main/java/org/apache/seatunnel/connectors/seatunnel/hive/source/config/HiveSourceConfig.java @@ -279,7 +279,9 @@ private CatalogTable parseCatalogTableFromTable( } SeaTunnelRowType seaTunnelRowType = new SeaTunnelRowType(fieldNames, fieldTypes); - readStrategy.setSeaTunnelRowTypeInfo(seaTunnelRowType); + readStrategy.setCatalogTable( + CatalogTableUtil.getCatalogTable( + "hive", table.getDbName(), null, table.getTableName(), seaTunnelRowType)); final SeaTunnelRowType finalSeatunnelRowType = readStrategy.getActualSeaTunnelRowTypeInfo(); CatalogTable catalogTable = buildEmptyCatalogTable(readonlyConfig, table); diff --git a/seatunnel-connectors-v2/connector-http/connector-http-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/http/client/HttpClientProvider.java b/seatunnel-connectors-v2/connector-http/connector-http-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/http/client/HttpClientProvider.java index b666058ce13..cbea79a15a4 100644 --- a/seatunnel-connectors-v2/connector-http/connector-http-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/http/client/HttpClientProvider.java +++ b/seatunnel-connectors-v2/connector-http/connector-http-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/http/client/HttpClientProvider.java @@ -33,6 +33,7 @@ import org.apache.http.client.methods.HttpPut; import org.apache.http.client.methods.HttpRequestBase; import org.apache.http.client.utils.URIBuilder; +import org.apache.http.entity.ByteArrayEntity; import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; import org.apache.http.impl.client.CloseableHttpClient; @@ -263,6 +264,31 @@ public HttpResponse doPost(String url, Map headers, String body) return getResponse(httpPost); } + /** + * Send a post request with request headers and request body + * + * @param url request address + * @param headers request header map + * @param byteArrayEntity request snappy body content + * @return http response result + * @throws Exception information + */ + public HttpResponse doPost( + String url, Map headers, ByteArrayEntity byteArrayEntity) + throws Exception { + // create a new http post + HttpPost httpPost = new HttpPost(url); + // set default request config + httpPost.setConfig(requestConfig); + // set request header + addHeaders(httpPost, headers); + // add body in request + httpPost.getRequestLine(); + httpPost.setEntity(byteArrayEntity); + // return http response + return getResponse(httpPost); + } + /** * Send a post request with request headers , request parameters and request body * diff --git a/seatunnel-connectors-v2/connector-hudi/pom.xml b/seatunnel-connectors-v2/connector-hudi/pom.xml index 35fc0b0459a..1a11d34f47e 100644 --- a/seatunnel-connectors-v2/connector-hudi/pom.xml +++ b/seatunnel-connectors-v2/connector-hudi/pom.xml @@ -102,4 +102,27 @@ + + + + maven-shade-plugin + + + + shade + + package + + + + org.apache.avro + ${seatunnel.shade.package}.${connector.name}.org.apache.avro + + + + + + + + diff --git a/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/catalog/HudiCatalog.java b/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/catalog/HudiCatalog.java index e0a25bfd85b..0d238c193d8 100644 --- a/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/catalog/HudiCatalog.java +++ b/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/catalog/HudiCatalog.java @@ -35,6 +35,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hudi.avro.AvroSchemaUtils; import org.apache.hudi.common.model.HoodieAvroPayload; import org.apache.hudi.common.model.HoodieTableType; import org.apache.hudi.common.table.HoodieTableConfig; @@ -53,6 +54,7 @@ import java.util.stream.Collectors; import static org.apache.hbase.thirdparty.com.google.common.base.Preconditions.checkNotNull; +import static org.apache.seatunnel.connectors.seatunnel.hudi.config.HudiTableOptions.CDC_ENABLED; import static org.apache.seatunnel.connectors.seatunnel.hudi.config.HudiTableOptions.RECORD_KEY_FIELDS; import static org.apache.seatunnel.connectors.seatunnel.hudi.config.HudiTableOptions.TABLE_TYPE; import static org.apache.seatunnel.connectors.seatunnel.hudi.sink.convert.AvroSchemaConverter.convertToSchema; @@ -195,6 +197,7 @@ public CatalogTable getTable(TablePath tablePath) String.join(",", tableConfig.getRecordKeyFields().get())); } options.put(TABLE_TYPE.key(), tableType.name()); + options.put(CDC_ENABLED.key(), String.valueOf(tableConfig.isCDCEnabled())); return CatalogTable.of( TableIdentifier.of( catalogName, tablePath.getDatabaseName(), tablePath.getTableName()), @@ -218,10 +221,16 @@ public void createTable(TablePath tablePath, CatalogTable table, boolean ignoreI .setTableType(table.getOptions().get(TABLE_TYPE.key())) .setRecordKeyFields(table.getOptions().get(RECORD_KEY_FIELDS.key())) .setTableCreateSchema( - convertToSchema(table.getSeaTunnelRowType()).toString()) + convertToSchema( + table.getSeaTunnelRowType(), + AvroSchemaUtils.getAvroRecordQualifiedName( + table.getTableId().getTableName())) + .toString()) .setTableName(tablePath.getTableName()) .setPartitionFields(String.join(",", table.getPartitionKeys())) .setPayloadClassName(HoodieAvroPayload.class.getName()) + .setCDCEnabled( + Boolean.parseBoolean(table.getOptions().get(CDC_ENABLED.key()))) .initTable(new HadoopStorageConfiguration(hadoopConf), tablePathStr); } } catch (IOException e) { diff --git a/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/config/HudiOptions.java b/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/config/HudiOptions.java index 38450e2dfdd..745e78eaf93 100644 --- a/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/config/HudiOptions.java +++ b/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/config/HudiOptions.java @@ -44,12 +44,6 @@ public interface HudiOptions { .noDefaultValue() .withDescription("table_list"); - Option AUTO_COMMIT = - Options.key("auto_commit") - .booleanType() - .defaultValue(true) - .withDescription("auto commit"); - Option SCHEMA_SAVE_MODE = Options.key("schema_save_mode") .enumType(SchemaSaveMode.class) diff --git a/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/config/HudiSinkConfig.java b/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/config/HudiSinkConfig.java index 06650e87c03..bcb4efe77b8 100644 --- a/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/config/HudiSinkConfig.java +++ b/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/config/HudiSinkConfig.java @@ -40,15 +40,12 @@ public class HudiSinkConfig implements Serializable { private String confFilesPath; - private boolean autoCommit; - private SchemaSaveMode schemaSaveMode; private DataSaveMode dataSaveMode; public static HudiSinkConfig of(ReadonlyConfig config) { Builder builder = HudiSinkConfig.builder(); - Optional optionalAutoCommit = config.getOptional(HudiOptions.AUTO_COMMIT); Optional optionalSchemaSaveMode = config.getOptional(HudiOptions.SCHEMA_SAVE_MODE); Optional optionalDataSaveMode = @@ -58,7 +55,6 @@ public static HudiSinkConfig of(ReadonlyConfig config) { builder.confFilesPath(config.get(HudiOptions.CONF_FILES_PATH)); builder.tableList(HudiTableConfig.of(config)); - builder.autoCommit(optionalAutoCommit.orElseGet(HudiOptions.AUTO_COMMIT::defaultValue)); builder.schemaSaveMode( optionalSchemaSaveMode.orElseGet(HudiOptions.SCHEMA_SAVE_MODE::defaultValue)); builder.dataSaveMode( diff --git a/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/config/HudiTableConfig.java b/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/config/HudiTableConfig.java index ba0ae33efdb..1ae612c9cb5 100644 --- a/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/config/HudiTableConfig.java +++ b/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/config/HudiTableConfig.java @@ -40,6 +40,7 @@ import static org.apache.seatunnel.connectors.seatunnel.hudi.config.HudiTableOptions.BATCH_INTERVAL_MS; import static org.apache.seatunnel.connectors.seatunnel.hudi.config.HudiTableOptions.BATCH_SIZE; +import static org.apache.seatunnel.connectors.seatunnel.hudi.config.HudiTableOptions.CDC_ENABLED; import static org.apache.seatunnel.connectors.seatunnel.hudi.config.HudiTableOptions.DATABASE; import static org.apache.seatunnel.connectors.seatunnel.hudi.config.HudiTableOptions.INDEX_CLASS_NAME; import static org.apache.seatunnel.connectors.seatunnel.hudi.config.HudiTableOptions.INDEX_TYPE; @@ -108,6 +109,9 @@ public HudiTableConfig() {} @JsonProperty("max_commits_to_keep") private int maxCommitsToKeep; + @JsonProperty("cdc_enabled") + private boolean cdcEnabled; + public static List of(ReadonlyConfig connectorConfig) { List tableList; if (connectorConfig.getOptional(HudiOptions.TABLE_LIST).isPresent()) { @@ -132,6 +136,7 @@ public static List of(ReadonlyConfig connectorConfig) { connectorConfig.get(UPSERT_SHUFFLE_PARALLELISM)) .minCommitsToKeep(connectorConfig.get(MIN_COMMITS_TO_KEEP)) .maxCommitsToKeep(connectorConfig.get(MAX_COMMITS_TO_KEEP)) + .cdcEnabled(connectorConfig.get(CDC_ENABLED)) .build(); tableList = Collections.singletonList(hudiTableConfig); } diff --git a/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/config/HudiTableOptions.java b/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/config/HudiTableOptions.java index e48ef7be56e..2a2c7e01b35 100644 --- a/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/config/HudiTableOptions.java +++ b/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/config/HudiTableOptions.java @@ -46,6 +46,13 @@ public interface HudiTableOptions { .defaultValue(HoodieTableType.COPY_ON_WRITE) .withDescription("hudi table type"); + Option CDC_ENABLED = + Options.key("cdc_enabled") + .booleanType() + .defaultValue(false) + .withDescription( + "When enable, persist the change data if necessary, and can be queried as a CDC query mode."); + Option RECORD_KEY_FIELDS = Options.key("record_key_fields") .stringType() @@ -76,7 +83,7 @@ public interface HudiTableOptions { Options.key("record_byte_size") .intType() .defaultValue(1024) - .withDescription("auto commit"); + .withDescription("The byte size of each record"); Option OP_TYPE = Options.key("op_type") diff --git a/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/sink/HudiSink.java b/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/sink/HudiSink.java index 13c245336aa..11a402ab101 100644 --- a/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/sink/HudiSink.java +++ b/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/sink/HudiSink.java @@ -24,7 +24,6 @@ import org.apache.seatunnel.api.sink.DefaultSaveModeHandler; import org.apache.seatunnel.api.sink.SaveModeHandler; import org.apache.seatunnel.api.sink.SeaTunnelSink; -import org.apache.seatunnel.api.sink.SinkAggregatedCommitter; import org.apache.seatunnel.api.sink.SinkWriter; import org.apache.seatunnel.api.sink.SupportMultiTableSink; import org.apache.seatunnel.api.sink.SupportSaveMode; @@ -38,14 +37,12 @@ import org.apache.seatunnel.connectors.seatunnel.hudi.config.HudiSinkConfig; import org.apache.seatunnel.connectors.seatunnel.hudi.config.HudiTableConfig; import org.apache.seatunnel.connectors.seatunnel.hudi.exception.HudiConnectorException; -import org.apache.seatunnel.connectors.seatunnel.hudi.sink.commiter.HudiSinkAggregatedCommitter; import org.apache.seatunnel.connectors.seatunnel.hudi.sink.state.HudiAggregatedCommitInfo; import org.apache.seatunnel.connectors.seatunnel.hudi.sink.state.HudiCommitInfo; import org.apache.seatunnel.connectors.seatunnel.hudi.sink.state.HudiSinkState; import org.apache.seatunnel.connectors.seatunnel.hudi.sink.writer.HudiSinkWriter; import java.io.IOException; -import java.util.ArrayList; import java.util.List; import java.util.Optional; @@ -82,15 +79,13 @@ public String getPluginName() { @Override public HudiSinkWriter createWriter(SinkWriter.Context context) throws IOException { - return new HudiSinkWriter( - context, seaTunnelRowType, hudiSinkConfig, hudiTableConfig, new ArrayList<>()); + return new HudiSinkWriter(context, seaTunnelRowType, hudiSinkConfig, hudiTableConfig); } @Override public SinkWriter restoreWriter( SinkWriter.Context context, List states) throws IOException { - return new HudiSinkWriter( - context, seaTunnelRowType, hudiSinkConfig, hudiTableConfig, states); + return SeaTunnelSink.super.restoreWriter(context, states); } @Override @@ -103,18 +98,6 @@ public Optional> getCommitInfoSerializer() { return Optional.of(new DefaultSerializer<>()); } - @Override - public Optional> - createAggregatedCommitter() throws IOException { - return Optional.of( - new HudiSinkAggregatedCommitter(hudiTableConfig, hudiSinkConfig, seaTunnelRowType)); - } - - @Override - public Optional> getAggregatedCommitInfoSerializer() { - return Optional.of(new DefaultSerializer<>()); - } - @Override public Optional getSaveModeHandler() { TablePath tablePath = diff --git a/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/sink/HudiSinkFactory.java b/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/sink/HudiSinkFactory.java index ed21b15166a..7e6d9826d95 100644 --- a/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/sink/HudiSinkFactory.java +++ b/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/sink/HudiSinkFactory.java @@ -37,12 +37,12 @@ import java.util.List; import java.util.Optional; -import static org.apache.seatunnel.connectors.seatunnel.hudi.config.HudiOptions.AUTO_COMMIT; import static org.apache.seatunnel.connectors.seatunnel.hudi.config.HudiOptions.CONF_FILES_PATH; import static org.apache.seatunnel.connectors.seatunnel.hudi.config.HudiOptions.TABLE_DFS_PATH; import static org.apache.seatunnel.connectors.seatunnel.hudi.config.HudiOptions.TABLE_LIST; import static org.apache.seatunnel.connectors.seatunnel.hudi.config.HudiTableOptions.BATCH_INTERVAL_MS; import static org.apache.seatunnel.connectors.seatunnel.hudi.config.HudiTableOptions.BATCH_SIZE; +import static org.apache.seatunnel.connectors.seatunnel.hudi.config.HudiTableOptions.CDC_ENABLED; import static org.apache.seatunnel.connectors.seatunnel.hudi.config.HudiTableOptions.INDEX_CLASS_NAME; import static org.apache.seatunnel.connectors.seatunnel.hudi.config.HudiTableOptions.INDEX_TYPE; import static org.apache.seatunnel.connectors.seatunnel.hudi.config.HudiTableOptions.INSERT_SHUFFLE_PARALLELISM; @@ -85,7 +85,7 @@ public OptionRule optionRule() { UPSERT_SHUFFLE_PARALLELISM, MIN_COMMITS_TO_KEEP, MAX_COMMITS_TO_KEEP, - AUTO_COMMIT, + CDC_ENABLED, SinkCommonOptions.MULTI_TABLE_SINK_REPLICA) .build(); } @@ -121,6 +121,10 @@ public TableSink createSink(TableSinkFactoryContext context) { } // table type catalogTable.getOptions().put(TABLE_TYPE.key(), hudiTableConfig.getTableType().name()); + // cdc enabled + catalogTable + .getOptions() + .put(CDC_ENABLED.key(), String.valueOf(hudiTableConfig.isCdcEnabled())); catalogTable = CatalogTable.of( newTableId, diff --git a/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/sink/commiter/HudiSinkAggregatedCommitter.java b/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/sink/commiter/HudiSinkAggregatedCommitter.java deleted file mode 100644 index beba719c76d..00000000000 --- a/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/sink/commiter/HudiSinkAggregatedCommitter.java +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.seatunnel.connectors.seatunnel.hudi.sink.commiter; - -import org.apache.seatunnel.api.sink.SinkAggregatedCommitter; -import org.apache.seatunnel.api.table.type.SeaTunnelRowType; -import org.apache.seatunnel.connectors.seatunnel.hudi.config.HudiSinkConfig; -import org.apache.seatunnel.connectors.seatunnel.hudi.config.HudiTableConfig; -import org.apache.seatunnel.connectors.seatunnel.hudi.sink.client.HudiWriteClientProvider; -import org.apache.seatunnel.connectors.seatunnel.hudi.sink.state.HudiAggregatedCommitInfo; -import org.apache.seatunnel.connectors.seatunnel.hudi.sink.state.HudiCommitInfo; - -import lombok.extern.slf4j.Slf4j; - -import java.io.IOException; -import java.util.List; -import java.util.Stack; -import java.util.stream.Collectors; - -@Slf4j -public class HudiSinkAggregatedCommitter - implements SinkAggregatedCommitter { - - private final HudiTableConfig tableConfig; - - private final HudiWriteClientProvider writeClientProvider; - - public HudiSinkAggregatedCommitter( - HudiTableConfig tableConfig, - HudiSinkConfig sinkConfig, - SeaTunnelRowType seaTunnelRowType) { - this.tableConfig = tableConfig; - this.writeClientProvider = - new HudiWriteClientProvider( - sinkConfig, tableConfig.getTableName(), seaTunnelRowType); - } - - @Override - public List commit( - List aggregatedCommitInfo) throws IOException { - aggregatedCommitInfo = - aggregatedCommitInfo.stream() - .filter( - commit -> - commit.getHudiCommitInfoList().stream() - .anyMatch( - aggregateCommit -> - !aggregateCommit - .getWriteStatusList() - .isEmpty() - && !writeClientProvider - .getOrCreateClient() - .commit( - aggregateCommit - .getWriteInstantTime(), - aggregateCommit - .getWriteStatusList()))) - .collect(Collectors.toList()); - log.debug( - "hudi records have been committed, error commit info are {}", aggregatedCommitInfo); - return aggregatedCommitInfo; - } - - @Override - public HudiAggregatedCommitInfo combine(List commitInfos) { - return new HudiAggregatedCommitInfo(commitInfos); - } - - @Override - public void abort(List aggregatedCommitInfo) throws Exception { - writeClientProvider.getOrCreateClient().rollbackFailedWrites(); - // rollback force commit - for (HudiAggregatedCommitInfo hudiAggregatedCommitInfo : aggregatedCommitInfo) { - for (HudiCommitInfo commitInfo : hudiAggregatedCommitInfo.getHudiCommitInfoList()) { - Stack forceCommitTime = commitInfo.getForceCommitTime(); - while (!forceCommitTime.isEmpty()) { - writeClientProvider.getOrCreateClient().rollback(forceCommitTime.pop()); - } - } - } - } - - @Override - public void close() { - writeClientProvider.close(); - } -} diff --git a/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/sink/convert/AvroSchemaConverter.java b/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/sink/convert/AvroSchemaConverter.java index addbf8491f9..acb10212757 100644 --- a/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/sink/convert/AvroSchemaConverter.java +++ b/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/sink/convert/AvroSchemaConverter.java @@ -50,13 +50,13 @@ private AvroSchemaConverter() { * @return Avro's {@link Schema} matching this logical type. */ public static Schema convertToSchema(SeaTunnelDataType schema) { - return convertToSchema(schema, "org.apache.seatunnel.avro.generated.record"); + return convertToSchema(schema, "record"); } /** * Converts Seatunnel {@link SeaTunnelDataType} (can be nested) into an Avro schema. * - *

The "{rowName}_" is used as the nested row type name prefix in order to generate the right + *

The "{rowName}." is used as the nested row type name prefix in order to generate the right * schema. Nested record type that only differs with type name is still compatible. * * @param dataType logical type @@ -105,10 +105,15 @@ public static Schema convertToSchema(SeaTunnelDataType dataType, String rowNa return nullableSchema(time); case DECIMAL: DecimalType decimalType = (DecimalType) dataType; - // store BigDecimal as byte[] + // store BigDecimal as Fixed + // for spark compatibility. Schema decimal = LogicalTypes.decimal(decimalType.getPrecision(), decimalType.getScale()) - .addToSchema(SchemaBuilder.builder().bytesType()); + .addToSchema( + SchemaBuilder.fixed(String.format("%s.fixed", rowName)) + .size( + computeMinBytesForDecimalPrecision( + decimalType.getPrecision()))); return nullableSchema(decimal); case ROW: SeaTunnelRowType rowType = (SeaTunnelRowType) dataType; @@ -121,7 +126,7 @@ public static Schema convertToSchema(SeaTunnelDataType dataType, String rowNa SeaTunnelDataType fieldType = rowType.getFieldType(i); SchemaBuilder.GenericDefault fieldBuilder = builder.name(fieldName) - .type(convertToSchema(fieldType, rowName + "_" + fieldName)); + .type(convertToSchema(fieldType, rowName + "." + fieldName)); builder = fieldBuilder.withDefault(null); } @@ -166,4 +171,12 @@ public static SeaTunnelDataType extractValueTypeToAvroMap(SeaTunnelDataType convertRow( seaTunnelRowType.getFieldNames()[i], createConverter(seaTunnelRowType.getFieldType(i)) .convert( - convertToSchema(seaTunnelRowType.getFieldType(i)), + convertToSchema( + seaTunnelRowType.getFieldType(i), + AvroSchemaUtils.getAvroRecordQualifiedName( + hudiTableConfig.getTableName()) + + "." + + seaTunnelRowType.getFieldNames()[i]), element.getField(i))); } return new HoodieAvroRecord<>( diff --git a/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/sink/convert/RowDataToAvroConverters.java b/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/sink/convert/RowDataToAvroConverters.java index a48179fdb7a..5c063626693 100644 --- a/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/sink/convert/RowDataToAvroConverters.java +++ b/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/sink/convert/RowDataToAvroConverters.java @@ -22,6 +22,7 @@ import org.apache.seatunnel.api.table.type.SeaTunnelRow; import org.apache.seatunnel.api.table.type.SeaTunnelRowType; +import org.apache.avro.Conversions; import org.apache.avro.Schema; import org.apache.avro.generic.GenericData; import org.apache.avro.generic.GenericRecord; @@ -45,6 +46,8 @@ /** Tool class used to convert from {@link SeaTunnelRow} to Avro {@link GenericRecord}. */ public class RowDataToAvroConverters implements Serializable { + private static final Conversions.DecimalConversion DECIMAL_CONVERSION = + new Conversions.DecimalConversion(); // -------------------------------------------------------------------------------- // Runtime Converters // -------------------------------------------------------------------------------- @@ -166,8 +169,9 @@ public Object convert(Schema schema, Object object) { @Override public Object convert(Schema schema, Object object) { - return ByteBuffer.wrap( - ((BigDecimal) object).unscaledValue().toByteArray()); + BigDecimal javaDecimal = (BigDecimal) object; + return DECIMAL_CONVERSION.toFixed( + javaDecimal, schema, schema.getLogicalType()); } }; break; diff --git a/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/sink/state/HudiAggregatedCommitInfo.java b/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/sink/state/HudiAggregatedCommitInfo.java index 348a040be65..065fed72ad7 100644 --- a/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/sink/state/HudiAggregatedCommitInfo.java +++ b/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/sink/state/HudiAggregatedCommitInfo.java @@ -17,15 +17,6 @@ package org.apache.seatunnel.connectors.seatunnel.hudi.sink.state; -import lombok.AllArgsConstructor; -import lombok.Data; - import java.io.Serializable; -import java.util.List; - -@Data -@AllArgsConstructor -public class HudiAggregatedCommitInfo implements Serializable { - private final List hudiCommitInfoList; -} +public class HudiAggregatedCommitInfo implements Serializable {} diff --git a/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/sink/state/HudiCommitInfo.java b/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/sink/state/HudiCommitInfo.java index 0357931bb08..808cc4d942a 100644 --- a/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/sink/state/HudiCommitInfo.java +++ b/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/sink/state/HudiCommitInfo.java @@ -17,22 +17,6 @@ package org.apache.seatunnel.connectors.seatunnel.hudi.sink.state; -import org.apache.hudi.client.WriteStatus; - -import lombok.AllArgsConstructor; -import lombok.Data; - import java.io.Serializable; -import java.util.List; -import java.util.Stack; - -@Data -@AllArgsConstructor -public class HudiCommitInfo implements Serializable { - - private final String writeInstantTime; - - private final List writeStatusList; - private final Stack forceCommitTime; -} +public class HudiCommitInfo implements Serializable {} diff --git a/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/sink/writer/HudiRecordWriter.java b/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/sink/writer/HudiRecordWriter.java index 7eb3ab546b7..b98e2228707 100644 --- a/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/sink/writer/HudiRecordWriter.java +++ b/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/sink/writer/HudiRecordWriter.java @@ -17,23 +17,23 @@ package org.apache.seatunnel.connectors.seatunnel.hudi.sink.writer; +import org.apache.seatunnel.api.table.type.RowKind; import org.apache.seatunnel.api.table.type.SeaTunnelRow; import org.apache.seatunnel.api.table.type.SeaTunnelRowType; import org.apache.seatunnel.common.exception.CommonErrorCodeDeprecated; -import org.apache.seatunnel.connectors.seatunnel.hudi.config.HudiSinkConfig; import org.apache.seatunnel.connectors.seatunnel.hudi.config.HudiTableConfig; import org.apache.seatunnel.connectors.seatunnel.hudi.exception.HudiConnectorException; import org.apache.seatunnel.connectors.seatunnel.hudi.exception.HudiErrorCode; import org.apache.seatunnel.connectors.seatunnel.hudi.sink.client.WriteClientProvider; import org.apache.seatunnel.connectors.seatunnel.hudi.sink.convert.HudiRecordConverter; -import org.apache.seatunnel.connectors.seatunnel.hudi.sink.state.HudiCommitInfo; import org.apache.avro.Schema; +import org.apache.commons.lang3.tuple.Pair; +import org.apache.hudi.avro.AvroSchemaUtils; import org.apache.hudi.client.HoodieJavaWriteClient; -import org.apache.hudi.client.WriteStatus; import org.apache.hudi.common.model.HoodieAvroPayload; +import org.apache.hudi.common.model.HoodieKey; import org.apache.hudi.common.model.HoodieRecord; -import org.apache.hudi.common.util.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -42,10 +42,10 @@ import java.io.Serializable; import java.util.ArrayList; +import java.util.LinkedHashMap; import java.util.List; -import java.util.Objects; -import java.util.Optional; -import java.util.Stack; +import java.util.Map; +import java.util.Set; import static org.apache.seatunnel.connectors.seatunnel.hudi.sink.convert.AvroSchemaConverter.convertToSchema; @@ -64,60 +64,44 @@ public class HudiRecordWriter implements Serializable { private final SeaTunnelRowType seaTunnelRowType; - private final boolean autoCommit; - private Schema schema; private transient int batchCount = 0; private final List> writeRecords; - private Stack forceCommitTime; - - private String writeInstantTime; + private final List deleteRecordKeys; - private List writeStatusList; + private final LinkedHashMap>> buffer = + new LinkedHashMap<>(); private transient volatile boolean closed = false; private transient volatile Exception flushException; public HudiRecordWriter( - HudiSinkConfig hudiSinkConfig, HudiTableConfig hudiTableConfig, WriteClientProvider clientProvider, SeaTunnelRowType seaTunnelRowType) { this.hudiTableConfig = hudiTableConfig; - this.autoCommit = hudiSinkConfig.isAutoCommit(); this.clientProvider = clientProvider; this.seaTunnelRowType = seaTunnelRowType; this.writeRecords = new ArrayList<>(); - this.writeStatusList = new ArrayList<>(); - this.forceCommitTime = new Stack<>(); + this.deleteRecordKeys = new ArrayList<>(); this.recordConverter = new HudiRecordConverter(); } - public HudiRecordWriter( - HudiSinkConfig sinkConfig, - HudiTableConfig tableConfig, - WriteClientProvider writeClientProvider, - SeaTunnelRowType seaTunnelRowType, - HudiCommitInfo hudiCommitInfo) { - this(sinkConfig, tableConfig, writeClientProvider, seaTunnelRowType); - this.writeInstantTime = hudiCommitInfo.getWriteInstantTime(); - this.writeStatusList = hudiCommitInfo.getWriteStatusList(); - } - public void open() { - this.schema = new Schema.Parser().parse(convertToSchema(seaTunnelRowType).toString()); + this.schema = + new Schema.Parser() + .parse( + convertToSchema( + seaTunnelRowType, + AvroSchemaUtils.getAvroRecordQualifiedName( + hudiTableConfig.getTableName())) + .toString()); try { - HoodieJavaWriteClient writeClient = - clientProvider.getOrCreateClient(); - if (StringUtils.nonEmpty(writeInstantTime) && Objects.nonNull(writeStatusList)) { - if (!writeClient.commit(writeInstantTime, writeStatusList)) { - LOG.warn("Failed to commit history data."); - } - } + clientProvider.getOrCreateClient(); } catch (Exception e) { throw new HudiConnectorException( CommonErrorCodeDeprecated.WRITER_OPERATION_FAILED, @@ -133,7 +117,7 @@ public void writeRecord(SeaTunnelRow record) { batchCount++; if (hudiTableConfig.getBatchSize() > 0 && batchCount >= hudiTableConfig.getBatchSize()) { - flush(true); + flush(); } } catch (Exception e) { throw new HudiConnectorException( @@ -143,92 +127,89 @@ public void writeRecord(SeaTunnelRow record) { } } - public synchronized void flush(boolean isNeedForceCommit) { + public synchronized void flush() { if (batchCount == 0) { log.debug("No data needs to be refreshed, waiting for incoming data."); return; } checkFlushException(); - HoodieJavaWriteClient writeClient = clientProvider.getOrCreateClient(); - if (autoCommit || writeInstantTime == null) { - writeInstantTime = writeClient.startCommit(); + Boolean preChangeFlag = null; + Set>>> entries = + buffer.entrySet(); + for (Map.Entry>> entry : entries) { + boolean currentChangeFlag = entry.getValue().getKey(); + if (currentChangeFlag) { + if (preChangeFlag != null && !preChangeFlag) { + executeDelete(); + } + writeRecords.add(entry.getValue().getValue()); + } else { + if (preChangeFlag != null && preChangeFlag) { + executeWrite(); + } + deleteRecordKeys.add(entry.getKey()); + } + preChangeFlag = currentChangeFlag; } - List currentWriteStatusList; + + if (preChangeFlag != null) { + if (preChangeFlag) { + executeWrite(); + } else { + executeDelete(); + } + } + batchCount = 0; + buffer.clear(); + } + + private void executeWrite() { + HoodieJavaWriteClient writeClient = clientProvider.getOrCreateClient(); + String writeInstantTime = writeClient.startCommit(); // write records switch (hudiTableConfig.getOpType()) { case INSERT: - currentWriteStatusList = writeClient.insert(writeRecords, writeInstantTime); + writeClient.insert(writeRecords, writeInstantTime); break; case UPSERT: - currentWriteStatusList = writeClient.upsert(writeRecords, writeInstantTime); + writeClient.upsert(writeRecords, writeInstantTime); break; case BULK_INSERT: - currentWriteStatusList = writeClient.bulkInsert(writeRecords, writeInstantTime); + writeClient.bulkInsert(writeRecords, writeInstantTime); break; default: throw new HudiConnectorException( HudiErrorCode.UNSUPPORTED_OPERATION, "Unsupported operation type: " + hudiTableConfig.getOpType()); } - if (!autoCommit) { - this.writeStatusList.addAll(currentWriteStatusList); - } - /** - * when the batch size of temporary records is reached, commit is forced here, even if - * configured not to be auto commit. because a timeline supports only one commit. - */ - forceCommit(isNeedForceCommit, autoCommit); writeRecords.clear(); - batchCount = 0; - } - - public Optional prepareCommit() { - flush(false); - if (!autoCommit) { - return Optional.of( - new HudiCommitInfo(writeInstantTime, writeStatusList, forceCommitTime)); - } - return Optional.empty(); - } - - private void commit() { - if (StringUtils.nonEmpty(writeInstantTime) && !writeStatusList.isEmpty()) { - log.debug( - "Commit hudi records, the instant time is {} and write status are {}", - writeInstantTime, - writeStatusList); - clientProvider.getOrCreateClient().commit(writeInstantTime, writeStatusList); - resetUpsertCommitInfo(); - } - } - - private void forceCommit(boolean isNeedForceCommit, boolean isAutoCommit) { - if (isNeedForceCommit && !isAutoCommit) { - clientProvider.getOrCreateClient().commit(writeInstantTime, writeStatusList); - forceCommitTime.add(writeInstantTime); - resetUpsertCommitInfo(); - } } - public HudiCommitInfo snapshotState() { - HudiCommitInfo hudiCommitInfo = - new HudiCommitInfo(writeInstantTime, writeStatusList, forceCommitTime); - // reset commit info in here, because the commit info will be committed in committer. - resetUpsertCommitInfo(); - // reset the force commit stack. - forceCommitTime = new Stack<>(); - return hudiCommitInfo; - } - - protected void resetUpsertCommitInfo() { - writeInstantTime = null; - writeStatusList = new ArrayList<>(); + private void executeDelete() { + HoodieJavaWriteClient writeClient = clientProvider.getOrCreateClient(); + writeClient.delete(deleteRecordKeys, writeClient.startCommit()); + deleteRecordKeys.clear(); } protected void prepareRecords(SeaTunnelRow element) { HoodieRecord hoodieAvroPayloadHoodieRecord = recordConverter.convertRow(schema, seaTunnelRowType, element, hudiTableConfig); - writeRecords.add(hoodieAvroPayloadHoodieRecord); + HoodieKey recordKey = hoodieAvroPayloadHoodieRecord.getKey(); + boolean changeFlag = changeFlag(element.getRowKind()); + buffer.put(recordKey, Pair.of(changeFlag, hoodieAvroPayloadHoodieRecord)); + } + + private boolean changeFlag(RowKind rowKind) { + switch (rowKind) { + case DELETE: + case UPDATE_BEFORE: + return false; + case INSERT: + case UPDATE_AFTER: + return true; + default: + throw new UnsupportedOperationException("Unknown row kind: " + rowKind); + } } protected void checkFlushException() { @@ -245,10 +226,7 @@ public synchronized void close() { if (!closed) { closed = true; try { - flush(false); - if (!autoCommit) { - commit(); - } + flush(); } catch (Exception e) { LOG.warn("Flush records to Hudi failed.", e); flushException = diff --git a/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/sink/writer/HudiSinkWriter.java b/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/sink/writer/HudiSinkWriter.java index 317215861a2..130a79adab5 100644 --- a/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/sink/writer/HudiSinkWriter.java +++ b/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/sink/writer/HudiSinkWriter.java @@ -35,8 +35,6 @@ import lombok.extern.slf4j.Slf4j; import java.io.IOException; -import java.util.Collections; -import java.util.List; import java.util.Optional; @Slf4j @@ -60,27 +58,15 @@ public HudiSinkWriter( Context context, SeaTunnelRowType seaTunnelRowType, HudiSinkConfig sinkConfig, - HudiTableConfig tableConfig, - List hudiSinkState) { + HudiTableConfig tableConfig) { this.sinkConfig = sinkConfig; this.tableConfig = tableConfig; this.seaTunnelRowType = seaTunnelRowType; this.writeClientProvider = new HudiWriteClientProvider( sinkConfig, tableConfig.getTableName(), seaTunnelRowType); - if (!hudiSinkState.isEmpty()) { - this.hudiRecordWriter = - new HudiRecordWriter( - sinkConfig, - tableConfig, - writeClientProvider, - seaTunnelRowType, - hudiSinkState.get(0).getHudiCommitInfo()); - } else { - this.hudiRecordWriter = - new HudiRecordWriter( - sinkConfig, tableConfig, writeClientProvider, seaTunnelRowType); - } + this.hudiRecordWriter = + new HudiRecordWriter(tableConfig, writeClientProvider, seaTunnelRowType); } @Override @@ -89,16 +75,11 @@ public void write(SeaTunnelRow element) throws IOException { hudiRecordWriter.writeRecord(element); } - @Override - public List snapshotState(long checkpointId) throws IOException { - return Collections.singletonList( - new HudiSinkState(checkpointId, hudiRecordWriter.snapshotState())); - } - @Override public Optional prepareCommit() throws IOException { tryOpen(); - return hudiRecordWriter.prepareCommit(); + hudiRecordWriter.flush(); + return Optional.empty(); } @Override @@ -128,8 +109,7 @@ public void setMultiTableResourceManager( queueIndex, tableConfig.getTableName()); this.hudiRecordWriter = - new HudiRecordWriter( - sinkConfig, tableConfig, writeClientProvider, seaTunnelRowType); + new HudiRecordWriter(tableConfig, writeClientProvider, seaTunnelRowType); } private void tryOpen() { diff --git a/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/util/HudiUtil.java b/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/util/HudiUtil.java index fe6cbe3e206..ef49c28a213 100644 --- a/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/util/HudiUtil.java +++ b/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/util/HudiUtil.java @@ -31,6 +31,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hudi.avro.AvroSchemaUtils; import org.apache.hudi.client.HoodieJavaWriteClient; import org.apache.hudi.client.common.HoodieJavaEngineContext; import org.apache.hudi.common.config.HoodieStorageConfig; @@ -173,7 +174,12 @@ public static HoodieJavaWriteClient createHoodieJavaWriteClie hudiSinkConfig.getTableDfsPath(), hudiTable.getDatabase(), hudiTable.getTableName())) - .withSchema(convertToSchema(seaTunnelRowType).toString()) + .withSchema( + convertToSchema( + seaTunnelRowType, + AvroSchemaUtils.getAvroRecordQualifiedName( + tableName)) + .toString()) .withParallelism( hudiTable.getInsertShuffleParallelism(), hudiTable.getUpsertShuffleParallelism()) @@ -184,7 +190,6 @@ public static HoodieJavaWriteClient createHoodieJavaWriteClie hudiTable.getMinCommitsToKeep(), hudiTable.getMaxCommitsToKeep()) .build()) - .withAutoCommit(hudiSinkConfig.isAutoCommit()) .withCleanConfig( HoodieCleanConfig.newBuilder() .withAutoClean(true) diff --git a/seatunnel-connectors-v2/connector-hudi/src/test/java/org/apache/seatunnel/connectors/seatunnel/hudi/HudiTest.java b/seatunnel-connectors-v2/connector-hudi/src/test/java/org/apache/seatunnel/connectors/seatunnel/hudi/HudiTest.java index 82e85fcf4e2..7dbfc402b6f 100644 --- a/seatunnel-connectors-v2/connector-hudi/src/test/java/org/apache/seatunnel/connectors/seatunnel/hudi/HudiTest.java +++ b/seatunnel-connectors-v2/connector-hudi/src/test/java/org/apache/seatunnel/connectors/seatunnel/hudi/HudiTest.java @@ -17,6 +17,7 @@ package org.apache.seatunnel.connectors.seatunnel.hudi; +import org.apache.seatunnel.api.table.type.DecimalType; import org.apache.seatunnel.api.table.type.LocalTimeType; import org.apache.seatunnel.api.table.type.MapType; import org.apache.seatunnel.api.table.type.SeaTunnelDataType; @@ -27,6 +28,7 @@ import org.apache.avro.generic.GenericData; import org.apache.avro.generic.GenericRecord; import org.apache.hadoop.conf.Configuration; +import org.apache.hudi.avro.AvroSchemaUtils; import org.apache.hudi.client.HoodieJavaWriteClient; import org.apache.hudi.client.WriteStatus; import org.apache.hudi.client.common.HoodieJavaEngineContext; @@ -52,6 +54,7 @@ import org.junit.jupiter.api.io.TempDir; import java.io.IOException; +import java.math.BigDecimal; import java.sql.Timestamp; import java.time.LocalDate; import java.time.LocalTime; @@ -95,7 +98,8 @@ public class HudiTest { "date", "time", "timestamp3", - "map" + "map", + "decimal" }, new SeaTunnelDataType[] { BOOLEAN_TYPE, @@ -107,16 +111,19 @@ public class HudiTest { LocalTimeType.LOCAL_TIME_TYPE, LocalTimeType.LOCAL_DATE_TIME_TYPE, new MapType(STRING_TYPE, LONG_TYPE), + new DecimalType(10, 5), }); private String getSchema() { - return convertToSchema(seaTunnelRowType).toString(); + return convertToSchema( + seaTunnelRowType, AvroSchemaUtils.getAvroRecordQualifiedName(tableName)) + .toString(); } @Test void testSchema() { Assertions.assertEquals( - "{\"type\":\"record\",\"name\":\"record\",\"namespace\":\"org.apache.seatunnel.avro.generated\",\"fields\":[{\"name\":\"bool\",\"type\":[\"null\",\"boolean\"],\"default\":null},{\"name\":\"int\",\"type\":[\"null\",\"int\"],\"default\":null},{\"name\":\"longValue\",\"type\":[\"null\",\"long\"],\"default\":null},{\"name\":\"float\",\"type\":[\"null\",\"float\"],\"default\":null},{\"name\":\"name\",\"type\":[\"null\",\"string\"],\"default\":null},{\"name\":\"date\",\"type\":[\"null\",{\"type\":\"int\",\"logicalType\":\"date\"}],\"default\":null},{\"name\":\"time\",\"type\":[\"null\",{\"type\":\"int\",\"logicalType\":\"time-millis\"}],\"default\":null},{\"name\":\"timestamp3\",\"type\":[\"null\",{\"type\":\"long\",\"logicalType\":\"timestamp-millis\"}],\"default\":null},{\"name\":\"map\",\"type\":[\"null\",{\"type\":\"map\",\"values\":[\"null\",\"long\"]}],\"default\":null}]}", + "{\"type\":\"record\",\"name\":\"hudi_record\",\"namespace\":\"hoodie.hudi\",\"fields\":[{\"name\":\"bool\",\"type\":[\"null\",\"boolean\"],\"default\":null},{\"name\":\"int\",\"type\":[\"null\",\"int\"],\"default\":null},{\"name\":\"longValue\",\"type\":[\"null\",\"long\"],\"default\":null},{\"name\":\"float\",\"type\":[\"null\",\"float\"],\"default\":null},{\"name\":\"name\",\"type\":[\"null\",\"string\"],\"default\":null},{\"name\":\"date\",\"type\":[\"null\",{\"type\":\"int\",\"logicalType\":\"date\"}],\"default\":null},{\"name\":\"time\",\"type\":[\"null\",{\"type\":\"int\",\"logicalType\":\"time-millis\"}],\"default\":null},{\"name\":\"timestamp3\",\"type\":[\"null\",{\"type\":\"long\",\"logicalType\":\"timestamp-millis\"}],\"default\":null},{\"name\":\"map\",\"type\":[\"null\",{\"type\":\"map\",\"values\":[\"null\",\"long\"]}],\"default\":null},{\"name\":\"decimal\",\"type\":[\"null\",{\"type\":\"fixed\",\"name\":\"fixed\",\"namespace\":\"hoodie.hudi.hudi_record.decimal\",\"size\":5,\"logicalType\":\"decimal\",\"precision\":10,\"scale\":5}],\"default\":null}]}", getSchema()); } @@ -165,7 +172,8 @@ void testWriteData() throws IOException { expected.setField(7, timestamp3.toLocalDateTime()); Map map = new HashMap<>(); map.put("element", 123L); - expected.setField(9, map); + expected.setField(8, map); + expected.setField(9, BigDecimal.valueOf(10.121)); String instantTime = javaWriteClient.startCommit(); List> hoodieRecords = new ArrayList<>(); hoodieRecords.add(convertRow(expected)); @@ -178,13 +186,23 @@ void testWriteData() throws IOException { private HoodieRecord convertRow(SeaTunnelRow element) { GenericRecord rec = new GenericData.Record( - new Schema.Parser().parse(convertToSchema(seaTunnelRowType).toString())); + new Schema.Parser() + .parse( + convertToSchema( + seaTunnelRowType, + AvroSchemaUtils.getAvroRecordQualifiedName( + tableName)) + .toString())); for (int i = 0; i < seaTunnelRowType.getTotalFields(); i++) { rec.put( seaTunnelRowType.getFieldNames()[i], createConverter(seaTunnelRowType.getFieldType(i)) .convert( - convertToSchema(seaTunnelRowType.getFieldType(i)), + convertToSchema( + seaTunnelRowType.getFieldType(i), + AvroSchemaUtils.getAvroRecordQualifiedName(tableName) + + "." + + seaTunnelRowType.getFieldNames()[i]), element.getField(i))); } diff --git a/seatunnel-connectors-v2/connector-hudi/src/test/java/org/apache/seatunnel/connectors/seatunnel/hudi/catalog/HudiCatalogTest.java b/seatunnel-connectors-v2/connector-hudi/src/test/java/org/apache/seatunnel/connectors/seatunnel/hudi/catalog/HudiCatalogTest.java index 7be81e89ba8..d3524c85c41 100644 --- a/seatunnel-connectors-v2/connector-hudi/src/test/java/org/apache/seatunnel/connectors/seatunnel/hudi/catalog/HudiCatalogTest.java +++ b/seatunnel-connectors-v2/connector-hudi/src/test/java/org/apache/seatunnel/connectors/seatunnel/hudi/catalog/HudiCatalogTest.java @@ -168,6 +168,7 @@ CatalogTable buildAllTypesTable(TableIdentifier tableIdentifier) { TableSchema schema = builder.build(); HashMap options = new HashMap<>(); options.put("record_key_fields", "id,boolean_col"); + options.put("cdc_enabled", "false"); options.put("table_type", "MERGE_ON_READ"); return CatalogTable.of( tableIdentifier, schema, options, Collections.singletonList("dt_col"), "null"); diff --git a/seatunnel-connectors-v2/connector-iceberg/src/main/java/org/apache/seatunnel/connectors/seatunnel/iceberg/catalog/IcebergCatalog.java b/seatunnel-connectors-v2/connector-iceberg/src/main/java/org/apache/seatunnel/connectors/seatunnel/iceberg/catalog/IcebergCatalog.java index 60591d9893c..216b08f9e2d 100644 --- a/seatunnel-connectors-v2/connector-iceberg/src/main/java/org/apache/seatunnel/connectors/seatunnel/iceberg/catalog/IcebergCatalog.java +++ b/seatunnel-connectors-v2/connector-iceberg/src/main/java/org/apache/seatunnel/connectors/seatunnel/iceberg/catalog/IcebergCatalog.java @@ -63,6 +63,8 @@ @Slf4j public class IcebergCatalog implements Catalog { + public static final String PROPS_TABLE_COMMENT = "comment"; + private final String catalogName; private final ReadonlyConfig readonlyConfig; private final IcebergCatalogLoader icebergCatalogLoader; @@ -257,14 +259,17 @@ public CatalogTable toCatalogTable(Table icebergTable, TablePath tablePath) { icebergTable.spec().fields().stream() .map(PartitionField::name) .collect(Collectors.toList()); - + String comment = + Optional.ofNullable(icebergTable.properties()) + .map(e -> e.get(PROPS_TABLE_COMMENT)) + .orElse(null); return CatalogTable.of( org.apache.seatunnel.api.table.catalog.TableIdentifier.of( catalogName, tablePath.getDatabaseName(), tablePath.getTableName()), builder.build(), icebergTable.properties(), partitionKeys, - null, + comment, catalogName); } diff --git a/seatunnel-connectors-v2/connector-iceberg/src/main/java/org/apache/seatunnel/connectors/seatunnel/iceberg/utils/SchemaUtils.java b/seatunnel-connectors-v2/connector-iceberg/src/main/java/org/apache/seatunnel/connectors/seatunnel/iceberg/utils/SchemaUtils.java index 9aba4a777d8..780990572da 100644 --- a/seatunnel-connectors-v2/connector-iceberg/src/main/java/org/apache/seatunnel/connectors/seatunnel/iceberg/utils/SchemaUtils.java +++ b/seatunnel-connectors-v2/connector-iceberg/src/main/java/org/apache/seatunnel/connectors/seatunnel/iceberg/utils/SchemaUtils.java @@ -28,6 +28,7 @@ import org.apache.seatunnel.api.table.catalog.exception.DatabaseNotExistException; import org.apache.seatunnel.api.table.catalog.exception.TableAlreadyExistException; import org.apache.seatunnel.api.table.type.SeaTunnelDataType; +import org.apache.seatunnel.connectors.seatunnel.iceberg.catalog.IcebergCatalog; import org.apache.seatunnel.connectors.seatunnel.iceberg.config.SinkConfig; import org.apache.seatunnel.connectors.seatunnel.iceberg.data.IcebergTypeMapper; import org.apache.seatunnel.connectors.seatunnel.iceberg.sink.schema.SchemaAddColumn; @@ -105,6 +106,8 @@ public static Table autoCreateTable( SinkConfig config = new SinkConfig(readonlyConfig); // build auto create table Map options = new HashMap<>(table.getOptions()); + Optional.ofNullable(table.getComment()) + .map(e -> options.put(IcebergCatalog.PROPS_TABLE_COMMENT, e)); // override options.putAll(config.getAutoCreateProps()); return createTable(catalog, toIcebergTableIdentifier(tablePath), config, schema, options); diff --git a/seatunnel-connectors-v2/connector-iceberg/src/test/java/org/apache/seatunnel/connectors/seatunnel/iceberg/catalog/IcebergCatalogTest.java b/seatunnel-connectors-v2/connector-iceberg/src/test/java/org/apache/seatunnel/connectors/seatunnel/iceberg/catalog/IcebergCatalogTest.java index 6ec5ae5783f..1eeeeebdf9e 100644 --- a/seatunnel-connectors-v2/connector-iceberg/src/test/java/org/apache/seatunnel/connectors/seatunnel/iceberg/catalog/IcebergCatalogTest.java +++ b/seatunnel-connectors-v2/connector-iceberg/src/test/java/org/apache/seatunnel/connectors/seatunnel/iceberg/catalog/IcebergCatalogTest.java @@ -194,7 +194,8 @@ CatalogTable buildAllTypesTable(TableIdentifier tableIdentifier) { TableSchema schema = builder.build(); HashMap options = new HashMap<>(); options.put("write.parquet.compression-codec", "zstd"); + options.put("comment", "test"); return CatalogTable.of( - tableIdentifier, schema, options, Collections.singletonList("dt_col"), "null"); + tableIdentifier, schema, options, Collections.singletonList("dt_col"), "test"); } } diff --git a/seatunnel-connectors-v2/connector-jdbc/pom.xml b/seatunnel-connectors-v2/connector-jdbc/pom.xml index 7b4199c462f..60e324be4c3 100644 --- a/seatunnel-connectors-v2/connector-jdbc/pom.xml +++ b/seatunnel-connectors-v2/connector-jdbc/pom.xml @@ -49,10 +49,11 @@ 2.5.1 8.6.0 3.1.3 - 2.4.11 + 2.4.12 12.2.0 3.0.0 3.2.0 + 5.1.0-og @@ -203,11 +204,15 @@ ${iris.jdbc.version} provided - org.tikv tikv-client-java ${tikv.version} + + + org.opengauss + opengauss-jdbc + ${opengauss.jdbc.version} provided @@ -316,11 +321,14 @@ com.intersystems intersystems-jdbc - org.tikv tikv-client-java + + org.opengauss + opengauss-jdbc + diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/oceanbase/OceanBaseMySqlCatalog.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/oceanbase/OceanBaseMySqlCatalog.java index 33aa2f8ccd4..046a16f01be 100644 --- a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/oceanbase/OceanBaseMySqlCatalog.java +++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/oceanbase/OceanBaseMySqlCatalog.java @@ -20,26 +20,32 @@ import org.apache.seatunnel.api.table.catalog.CatalogTable; import org.apache.seatunnel.api.table.catalog.Column; import org.apache.seatunnel.api.table.catalog.ConstraintKey; +import org.apache.seatunnel.api.table.catalog.PrimaryKey; import org.apache.seatunnel.api.table.catalog.TableIdentifier; import org.apache.seatunnel.api.table.catalog.TablePath; +import org.apache.seatunnel.api.table.catalog.TableSchema; import org.apache.seatunnel.api.table.catalog.exception.CatalogException; import org.apache.seatunnel.api.table.converter.BasicTypeDefine; import org.apache.seatunnel.common.utils.JdbcUrlUtil; import org.apache.seatunnel.connectors.seatunnel.jdbc.catalog.AbstractJdbcCatalog; -import org.apache.seatunnel.connectors.seatunnel.jdbc.catalog.utils.CatalogUtils; import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.oceanbase.OceanBaseMySqlTypeConverter; -import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.oceanbase.OceanBaseMySqlTypeMapper; import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.oceanbase.OceanBaseMysqlType; +import org.apache.commons.lang.StringUtils; + import com.google.common.base.Preconditions; import lombok.extern.slf4j.Slf4j; import java.sql.Connection; import java.sql.DatabaseMetaData; +import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.ResultSetMetaData; import java.sql.SQLException; import java.sql.Statement; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Locale; @@ -197,12 +203,54 @@ protected String getDropDatabaseSql(String databaseName) { @Override public CatalogTable getTable(String sqlQuery) throws SQLException { - Connection defaultConnection = getConnection(defaultUrl); - try (Statement statement = defaultConnection.createStatement(); - ResultSet resultSet = statement.executeQuery(sqlQuery)) { - ResultSetMetaData metaData = resultSet.getMetaData(); - return CatalogUtils.getCatalogTable( - metaData, new OceanBaseMySqlTypeMapper(typeConverter), sqlQuery); + try (Connection connection = getConnection(defaultUrl)) { + String tableName = null; + String databaseName = null; + String schemaName = null; + String catalogName = "jdbc_catalog"; + TableSchema.Builder schemaBuilder = TableSchema.builder(); + + try (Statement statement = connection.createStatement(); + ResultSet resultSet = statement.executeQuery(sqlQuery)) { + ResultSetMetaData metaData = resultSet.getMetaData(); + tableName = metaData.getTableName(1); + databaseName = metaData.getCatalogName(1); + schemaName = metaData.getSchemaName(1); + catalogName = metaData.getCatalogName(1); + } + databaseName = StringUtils.defaultIfBlank(databaseName, null); + schemaName = StringUtils.defaultIfBlank(schemaName, null); + + TablePath tablePath = + StringUtils.isBlank(tableName) + ? TablePath.DEFAULT + : TablePath.of(databaseName, schemaName, tableName); + + try (PreparedStatement ps = + connection.prepareStatement(getSelectColumnsSql(tablePath)); + ResultSet columnResultSet = ps.executeQuery(); + ResultSet primaryKeys = + connection + .getMetaData() + .getPrimaryKeys(catalogName, schemaName, tableName)) { + while (primaryKeys.next()) { + String primaryKeyColumnName = primaryKeys.getString("COLUMN_NAME"); + schemaBuilder.primaryKey( + PrimaryKey.of( + primaryKeyColumnName, + Collections.singletonList(primaryKeyColumnName))); + } + while (columnResultSet.next()) { + schemaBuilder.column(buildColumn(columnResultSet)); + } + } + return CatalogTable.of( + TableIdentifier.of(catalogName, tablePath), + schemaBuilder.build(), + new HashMap<>(), + new ArrayList<>(), + "", + catalogName); } } diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/opengauss/OpenGaussCatalog.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/opengauss/OpenGaussCatalog.java new file mode 100644 index 00000000000..6a8eab50107 --- /dev/null +++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/opengauss/OpenGaussCatalog.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.seatunnel.jdbc.catalog.opengauss; + +import org.apache.seatunnel.common.utils.JdbcUrlUtil; +import org.apache.seatunnel.connectors.seatunnel.jdbc.catalog.psql.PostgresCatalog; + +import com.google.common.annotations.VisibleForTesting; +import lombok.extern.slf4j.Slf4j; + +import java.sql.Connection; + +@Slf4j +public class OpenGaussCatalog extends PostgresCatalog { + + public OpenGaussCatalog( + String catalogName, + String username, + String pwd, + JdbcUrlUtil.UrlInfo urlInfo, + String defaultSchema) { + super(catalogName, username, pwd, urlInfo, defaultSchema); + } + + @VisibleForTesting + public void setConnection(String url, Connection connection) { + this.connectionMap.put(url, connection); + } +} diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/opengauss/OpenGaussCatalogFactory.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/opengauss/OpenGaussCatalogFactory.java new file mode 100644 index 00000000000..bff96ff6d30 --- /dev/null +++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/opengauss/OpenGaussCatalogFactory.java @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.seatunnel.jdbc.catalog.opengauss; + +import org.apache.seatunnel.api.configuration.ReadonlyConfig; +import org.apache.seatunnel.api.configuration.util.OptionRule; +import org.apache.seatunnel.api.configuration.util.OptionValidationException; +import org.apache.seatunnel.api.table.catalog.Catalog; +import org.apache.seatunnel.api.table.factory.CatalogFactory; +import org.apache.seatunnel.api.table.factory.Factory; +import org.apache.seatunnel.common.utils.JdbcUrlUtil; +import org.apache.seatunnel.connectors.seatunnel.jdbc.catalog.JdbcCatalogOptions; +import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.DatabaseIdentifier; + +import com.google.auto.service.AutoService; + +import java.util.Optional; + +@AutoService(Factory.class) +public class OpenGaussCatalogFactory implements CatalogFactory { + + @Override + public String factoryIdentifier() { + return DatabaseIdentifier.OPENGAUSS; + } + + @Override + public Catalog createCatalog(String catalogName, ReadonlyConfig options) { + String urlWithDatabase = options.get(JdbcCatalogOptions.BASE_URL); + JdbcUrlUtil.UrlInfo urlInfo = JdbcUrlUtil.getUrlInfo(urlWithDatabase); + Optional defaultDatabase = urlInfo.getDefaultDatabase(); + if (!defaultDatabase.isPresent()) { + throw new OptionValidationException(JdbcCatalogOptions.BASE_URL); + } + return new OpenGaussCatalog( + catalogName, + options.get(JdbcCatalogOptions.USERNAME), + options.get(JdbcCatalogOptions.PASSWORD), + urlInfo, + options.get(JdbcCatalogOptions.SCHEMA)); + } + + @Override + public OptionRule optionRule() { + return JdbcCatalogOptions.BASE_RULE.build(); + } +} diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/psql/PostgresCreateTableSqlBuilder.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/psql/PostgresCreateTableSqlBuilder.java index f7b98c1bb17..1fbfd7c095e 100644 --- a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/psql/PostgresCreateTableSqlBuilder.java +++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/psql/PostgresCreateTableSqlBuilder.java @@ -30,11 +30,14 @@ import org.apache.commons.lang3.StringUtils; import lombok.Getter; +import lombok.extern.slf4j.Slf4j; import java.util.ArrayList; import java.util.List; +import java.util.UUID; import java.util.stream.Collectors; +@Slf4j public class PostgresCreateTableSqlBuilder { private List columns; private PrimaryKey primaryKey; @@ -161,10 +164,7 @@ private String buildColumnCommentSql(Column column, String tableName) { } private String buildUniqueKeySql(ConstraintKey constraintKey) { - String constraintName = constraintKey.getConstraintName(); - if (constraintName.length() > 25) { - constraintName = constraintName.substring(0, 25); - } + String constraintName = UUID.randomUUID().toString().replace("-", ""); String indexColumns = constraintKey.getColumnNames().stream() .map( @@ -175,16 +175,12 @@ private String buildUniqueKeySql(ConstraintKey constraintKey) { constraintKeyColumn.getColumnName(), fieldIde))) .collect(Collectors.joining(", ")); - return "CONSTRAINT " + constraintName + " UNIQUE (" + indexColumns + ")"; + return "CONSTRAINT \"" + constraintName + "\" UNIQUE (" + indexColumns + ")"; } private String buildIndexKeySql(TablePath tablePath, ConstraintKey constraintKey) { - // We add table name to index name to avoid name conflict in PG - // Since index name in PG should unique in the schema - String constraintName = tablePath.getTableName() + "_" + constraintKey.getConstraintName(); - if (constraintName.length() > 25) { - constraintName = constraintName.substring(0, 25); - } + // If the index name is omitted, PostgreSQL will choose an appropriate name based on table + // name and indexed columns. String indexColumns = constraintKey.getColumnNames().stream() .map( @@ -196,9 +192,7 @@ private String buildIndexKeySql(TablePath tablePath, ConstraintKey constraintKey fieldIde))) .collect(Collectors.joining(", ")); - return "CREATE INDEX " - + constraintName - + " ON " + return "CREATE INDEX ON " + tablePath.getSchemaAndTableName("\"") + "(" + indexColumns diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/DatabaseIdentifier.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/DatabaseIdentifier.java index 45f849c28bd..e2a32b4f3f0 100644 --- a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/DatabaseIdentifier.java +++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/DatabaseIdentifier.java @@ -42,4 +42,5 @@ public class DatabaseIdentifier { public static final String XUGU = "XUGU"; public static final String IRIS = "IRIS"; public static final String INCEPTOR = "Inceptor"; + public static final String OPENGAUSS = "OpenGauss"; } diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/JdbcDialect.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/JdbcDialect.java index 2fc0fe8dca8..6b70713607d 100644 --- a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/JdbcDialect.java +++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/JdbcDialect.java @@ -553,7 +553,9 @@ default void applySchemaChange( buildAlterTableSql( event.getSourceDialectName(), changeColumn.getSourceType(), - AlterType.CHANGE.name(), + changeColumn.getDataType() == null + ? AlterType.RENAME.name() + : AlterType.CHANGE.name(), changeColumn, tableIdentifierWithQuoted, oldColumnName); @@ -625,6 +627,13 @@ default String buildAlterTableSql( return String.format( "ALTER TABLE %s drop column %s", tableName, quoteIdentifier(oldColumnName)); } + + if (alterOperation.equalsIgnoreCase(AlterType.RENAME.name())) { + return String.format( + "ALTER TABLE %s RENAME COLUMN %s TO %s", + tableName, oldColumnName, newColumn.getName()); + } + TypeConverter typeConverter = ConverterLoader.loadTypeConverter(dialectName()); BasicTypeDefine typeBasicTypeDefine = (BasicTypeDefine) typeConverter.reconvert(newColumn); @@ -638,10 +647,14 @@ default String buildAlterTableSql( newColumn, oldColumnName, typeBasicTypeDefine.getColumnType()); - basicSql = decorateWithNullable(basicSql, typeBasicTypeDefine); - basicSql = decorateWithDefaultValue(basicSql, typeBasicTypeDefine); - basicSql = decorateWithComment(basicSql, typeBasicTypeDefine); - return basicSql + ";"; + // Only decorate with default value when source dialect is same as sink dialect + // Todo Support for cross-database default values for ddl statements + if (sourceDialectName.equals(dialectName())) { + basicSql = decorateWithDefaultValue(basicSql, typeBasicTypeDefine); + } + basicSql = decorateWithNullable(basicSql, typeBasicTypeDefine, sourceDialectName); + basicSql = decorateWithComment(tableName, basicSql, typeBasicTypeDefine); + return dialectName().equals(DatabaseIdentifier.ORACLE) ? basicSql : basicSql + ";"; } /** @@ -707,14 +720,22 @@ default String decorateWithColumnNameAndType( * * @param basicSql alter table sql for sink table * @param typeBasicTypeDefine type basic type define of new column + * @param sourceDialectName source dialect name * @return alter table sql with nullable for sink table */ - default String decorateWithNullable(String basicSql, BasicTypeDefine typeBasicTypeDefine) { + default String decorateWithNullable( + String basicSql, BasicTypeDefine typeBasicTypeDefine, String sourceDialectName) { StringBuilder sql = new StringBuilder(basicSql); - if (typeBasicTypeDefine.isNullable()) { + if (typeBasicTypeDefine.isNullable() + && !dialectName().equalsIgnoreCase(DatabaseIdentifier.ORACLE)) { sql.append("NULL "); } else { - sql.append("NOT NULL "); + // Todo: Support cross-dabaase default values for ddl statements which can remove this + if (!(!dialectName().equalsIgnoreCase(sourceDialectName) + && dialectName().equalsIgnoreCase(DatabaseIdentifier.MYSQL) + && typeBasicTypeDefine.getDataType().equalsIgnoreCase("datetime"))) { + sql.append("NOT NULL "); + } } return sql.toString(); } @@ -743,11 +764,13 @@ && needsQuotesWithDefaultValue(typeBasicTypeDefine.getColumnType()) /** * decorate with comment * + * @param tableName table name with quoted * @param basicSql alter table sql for sink table * @param typeBasicTypeDefine type basic type define of new column * @return alter table sql with comment for sink table */ - default String decorateWithComment(String basicSql, BasicTypeDefine typeBasicTypeDefine) { + default String decorateWithComment( + String tableName, String basicSql, BasicTypeDefine typeBasicTypeDefine) { String comment = typeBasicTypeDefine.getComment(); StringBuilder sql = new StringBuilder(basicSql); if (StringUtils.isNotBlank(comment)) { @@ -790,6 +813,7 @@ enum AlterType { ADD, DROP, MODIFY, - CHANGE + CHANGE, + RENAME } } diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/db2/DB2Dialect.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/db2/DB2Dialect.java index 6150dd4330d..5af57bf1045 100644 --- a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/db2/DB2Dialect.java +++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/db2/DB2Dialect.java @@ -22,7 +22,9 @@ import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.JdbcDialect; import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.JdbcDialectTypeMapper; +import java.util.Arrays; import java.util.Optional; +import java.util.stream.Collectors; public class DB2Dialect implements JdbcDialect { @@ -44,6 +46,56 @@ public JdbcDialectTypeMapper getJdbcDialectTypeMapper() { @Override public Optional getUpsertStatement( String database, String tableName, String[] fieldNames, String[] uniqueKeyFields) { - return Optional.empty(); + // Generate field list for USING and INSERT clauses + String fieldList = String.join(", ", fieldNames); + + // Generate placeholder list for VALUES clause + String placeholderList = + Arrays.stream(fieldNames).map(field -> "?").collect(Collectors.joining(", ")); + + // Generate ON clause + String onClause = + Arrays.stream(uniqueKeyFields) + .map(field -> "target." + field + " = source." + field) + .collect(Collectors.joining(" AND ")); + + // Generate WHEN MATCHED clause + String whenMatchedClause = + Arrays.stream(fieldNames) + .map(field -> "target." + field + " <> source." + field) + .collect(Collectors.joining(" OR ")); + + // Generate UPDATE SET clause + String updateSetClause = + Arrays.stream(fieldNames) + .map(field -> "target." + field + " = source." + field) + .collect(Collectors.joining(", ")); + + // Generate WHEN NOT MATCHED clause + String insertClause = + "INSERT (" + + fieldList + + ") VALUES (" + + Arrays.stream(fieldNames) + .map(field -> "source." + field) + .collect(Collectors.joining(", ")) + + ")"; + + // Combine all parts to form the final SQL statement + String mergeStatement = + String.format( + "MERGE INTO %s.%s AS target USING (VALUES (%s)) AS source (%s) ON %s " + + "WHEN MATCHED AND (%s) THEN UPDATE SET %s " + + "WHEN NOT MATCHED THEN %s;", + database, + tableName, + placeholderList, + fieldList, + onClause, + whenMatchedClause, + updateSetClause, + insertClause); + + return Optional.of(mergeStatement); } } diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/mysql/MysqlDialect.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/mysql/MysqlDialect.java index 22431b0d96f..163c0304459 100644 --- a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/mysql/MysqlDialect.java +++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/mysql/MysqlDialect.java @@ -228,12 +228,13 @@ public Long approximateRowCntStatement(Connection connection, JdbcSourceTable ta } @Override - public String decorateWithComment(String basicSql, BasicTypeDefine typeBasicTypeDefine) { + public String decorateWithComment( + String tableName, String basicSql, BasicTypeDefine typeBasicTypeDefine) { MysqlType nativeType = (MysqlType) typeBasicTypeDefine.getNativeType(); if (NOT_SUPPORTED_DEFAULT_VALUES.contains(nativeType)) { return basicSql; } - return JdbcDialect.super.decorateWithComment(basicSql, typeBasicTypeDefine); + return JdbcDialect.super.decorateWithComment(tableName, basicSql, typeBasicTypeDefine); } @Override diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/oceanbase/OceanBaseMySqlTypeConverter.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/oceanbase/OceanBaseMySqlTypeConverter.java index 4e9fa04d0d3..78c8415a886 100644 --- a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/oceanbase/OceanBaseMySqlTypeConverter.java +++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/oceanbase/OceanBaseMySqlTypeConverter.java @@ -25,6 +25,7 @@ import org.apache.seatunnel.api.table.type.DecimalType; import org.apache.seatunnel.api.table.type.LocalTimeType; import org.apache.seatunnel.api.table.type.PrimitiveByteArrayType; +import org.apache.seatunnel.api.table.type.VectorType; import org.apache.seatunnel.common.exception.CommonError; import org.apache.seatunnel.connectors.seatunnel.common.source.TypeDefineUtils; import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.DatabaseIdentifier; @@ -100,6 +101,9 @@ public class OceanBaseMySqlTypeConverter public static final long POWER_2_32 = (long) Math.pow(2, 32); public static final long MAX_VARBINARY_LENGTH = POWER_2_16 - 4; + private static final String VECTOR_TYPE_NAME = ""; + private static final String VECTOR_NAME = "VECTOR"; + @Override public String identifier() { return DatabaseIdentifier.OCENABASE; @@ -289,6 +293,17 @@ public Column convert(BasicTypeDefine typeDefine) { builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE); builder.scale(typeDefine.getScale()); break; + case VECTOR_TYPE_NAME: + String columnType = typeDefine.getColumnType(); + if (columnType.startsWith("vector(") && columnType.endsWith(")")) { + Integer number = + Integer.parseInt( + columnType.substring( + columnType.indexOf("(") + 1, columnType.indexOf(")"))); + builder.dataType(VectorType.VECTOR_FLOAT_TYPE); + builder.scale(number); + } + break; default: throw CommonError.convertToSeaTunnelTypeError( DatabaseIdentifier.OCENABASE, mysqlDataType, typeDefine.getName()); @@ -501,6 +516,11 @@ public BasicTypeDefine reconvert(Column column) { builder.columnType(MYSQL_DATETIME); } break; + case FLOAT_VECTOR: + builder.nativeType(VECTOR_NAME); + builder.columnType(String.format("%s(%s)", VECTOR_NAME, column.getScale())); + builder.dataType(VECTOR_NAME); + break; default: throw CommonError.convertToConnectorTypeError( DatabaseIdentifier.OCENABASE, diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/oceanbase/OceanBaseMysqlDialect.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/oceanbase/OceanBaseMysqlDialect.java index 1c5d7734fb2..1824d6c76ab 100644 --- a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/oceanbase/OceanBaseMysqlDialect.java +++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/oceanbase/OceanBaseMysqlDialect.java @@ -231,12 +231,13 @@ public Long approximateRowCntStatement(Connection connection, JdbcSourceTable ta } @Override - public String decorateWithComment(String basicSql, BasicTypeDefine typeBasicTypeDefine) { + public String decorateWithComment( + String tableName, String basicSql, BasicTypeDefine typeBasicTypeDefine) { OceanBaseMysqlType nativeType = (OceanBaseMysqlType) typeBasicTypeDefine.getNativeType(); if (NOT_SUPPORTED_DEFAULT_VALUES.contains(nativeType)) { return basicSql; } - return JdbcDialect.super.decorateWithComment(basicSql, typeBasicTypeDefine); + return JdbcDialect.super.decorateWithComment(tableName, basicSql, typeBasicTypeDefine); } @Override diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/oceanbase/OceanBaseMysqlJdbcRowConverter.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/oceanbase/OceanBaseMysqlJdbcRowConverter.java index a498879138d..0a52e6a90be 100644 --- a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/oceanbase/OceanBaseMysqlJdbcRowConverter.java +++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/oceanbase/OceanBaseMysqlJdbcRowConverter.java @@ -32,6 +32,8 @@ import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.DatabaseIdentifier; import org.apache.seatunnel.connectors.seatunnel.jdbc.utils.JdbcFieldTypeUtils; +import org.apache.commons.lang3.StringUtils; + import java.math.BigDecimal; import java.nio.ByteBuffer; import java.sql.Date; @@ -89,12 +91,16 @@ public SeaTunnelRow toInternal(ResultSet rs, TableSchema tableSchema) throws SQL fields[fieldIndex] = JdbcFieldTypeUtils.getFloat(rs, resultSetIndex); break; case FLOAT_VECTOR: - Object[] objects = (Object[]) rs.getObject(fieldIndex); - Float[] arrays = new Float[objects.length]; - for (int i = 0; i < objects.length; i++) { - arrays[i] = Float.parseFloat(objects[i].toString()); + String result = JdbcFieldTypeUtils.getString(rs, resultSetIndex); + if (StringUtils.isNotBlank(result)) { + result = result.replace("[", "").replace("]", ""); + String[] stringArray = result.split(","); + Float[] arrays = new Float[stringArray.length]; + for (int i = 0; i < stringArray.length; i++) { + arrays[i] = Float.parseFloat(stringArray[i]); + } + fields[fieldIndex] = BufferUtils.toByteBuffer(arrays); } - fields[fieldIndex] = BufferUtils.toByteBuffer(arrays); break; case DOUBLE: fields[fieldIndex] = JdbcFieldTypeUtils.getDouble(rs, resultSetIndex); diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/opengauss/OpenGaussDialectFactory.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/opengauss/OpenGaussDialectFactory.java new file mode 100644 index 00000000000..b1ceed51e9b --- /dev/null +++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/opengauss/OpenGaussDialectFactory.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.opengauss; + +import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.JdbcDialectFactory; +import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.psql.PostgresDialectFactory; + +import com.google.auto.service.AutoService; + +@AutoService(JdbcDialectFactory.class) +public class OpenGaussDialectFactory extends PostgresDialectFactory { + + @Override + public boolean acceptsURL(String url) { + return url.startsWith("jdbc:opengauss:"); + } +} diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/oracle/OracleDialect.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/oracle/OracleDialect.java index b6a35dba0c1..b314302ba4c 100644 --- a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/oracle/OracleDialect.java +++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/oracle/OracleDialect.java @@ -18,6 +18,7 @@ package org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.oracle; import org.apache.seatunnel.api.table.catalog.TablePath; +import org.apache.seatunnel.api.table.converter.BasicTypeDefine; import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.converter.JdbcRowConverter; import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.DatabaseIdentifier; import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.JdbcDialect; @@ -326,4 +327,19 @@ public Object[] sampleDataFromColumn( } } } + + @Override + public String decorateWithComment( + String tableName, String basicSql, BasicTypeDefine typeBasicTypeDefine) { + String comment = typeBasicTypeDefine.getComment(); + StringBuilder sql = new StringBuilder(basicSql); + if (StringUtils.isNotBlank(comment)) { + String commentSql = + String.format( + "COMMENT ON COLUMN %s.%s IS '%s'", + tableName, quoteIdentifier(typeBasicTypeDefine.getName()), comment); + sql.append(";\n").append(commentSql); + } + return sql.toString(); + } } diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/psql/PostgresJdbcRowConverter.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/psql/PostgresJdbcRowConverter.java index f1cd4f8ec98..071e8ec6e1d 100644 --- a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/psql/PostgresJdbcRowConverter.java +++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/psql/PostgresJdbcRowConverter.java @@ -17,26 +17,38 @@ package org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.psql; +import org.apache.seatunnel.api.table.catalog.Column; import org.apache.seatunnel.api.table.catalog.TableSchema; import org.apache.seatunnel.api.table.type.ArrayType; import org.apache.seatunnel.api.table.type.SeaTunnelDataType; import org.apache.seatunnel.api.table.type.SeaTunnelRow; import org.apache.seatunnel.api.table.type.SeaTunnelRowType; +import org.apache.seatunnel.api.table.type.SqlType; import org.apache.seatunnel.common.exception.CommonErrorCodeDeprecated; +import org.apache.seatunnel.connectors.seatunnel.jdbc.exception.JdbcConnectorErrorCode; import org.apache.seatunnel.connectors.seatunnel.jdbc.exception.JdbcConnectorException; import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.converter.AbstractJdbcRowConverter; import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.DatabaseIdentifier; import org.apache.seatunnel.connectors.seatunnel.jdbc.utils.JdbcFieldTypeUtils; +import org.postgresql.util.PGobject; + +import java.math.BigDecimal; import java.sql.Array; import java.sql.Date; +import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Time; import java.sql.Timestamp; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; import java.util.Locale; import java.util.Optional; +import static org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.psql.PostgresTypeConverter.PG_INET; + public class PostgresJdbcRowConverter extends AbstractJdbcRowConverter { private static final String PG_GEOMETRY = "GEOMETRY"; @@ -143,4 +155,114 @@ public SeaTunnelRow toInternal(ResultSet rs, TableSchema tableSchema) throws SQL } return new SeaTunnelRow(fields); } + + @Override + public PreparedStatement toExternal( + TableSchema tableSchema, SeaTunnelRow row, PreparedStatement statement) + throws SQLException { + SeaTunnelRowType rowType = tableSchema.toPhysicalRowDataType(); + String[] sourceTypes = + tableSchema.getColumns().stream() + .filter(Column::isPhysical) + .map(Column::getSourceType) + .toArray(String[]::new); + for (int fieldIndex = 0; fieldIndex < rowType.getTotalFields(); fieldIndex++) { + try { + SeaTunnelDataType seaTunnelDataType = rowType.getFieldType(fieldIndex); + int statementIndex = fieldIndex + 1; + Object fieldValue = row.getField(fieldIndex); + if (fieldValue == null) { + statement.setObject(statementIndex, null); + continue; + } + + switch (seaTunnelDataType.getSqlType()) { + case STRING: + String sourceType = sourceTypes[fieldIndex]; + if (PG_INET.equalsIgnoreCase(sourceType)) { + PGobject inetObject = new PGobject(); + inetObject.setType(PG_INET); + inetObject.setValue(String.valueOf(row.getField(fieldIndex))); + statement.setObject(statementIndex, inetObject); + } else { + statement.setString(statementIndex, (String) row.getField(fieldIndex)); + } + break; + case BOOLEAN: + statement.setBoolean(statementIndex, (Boolean) row.getField(fieldIndex)); + break; + case TINYINT: + statement.setByte(statementIndex, (Byte) row.getField(fieldIndex)); + break; + case SMALLINT: + statement.setShort(statementIndex, (Short) row.getField(fieldIndex)); + break; + case INT: + statement.setInt(statementIndex, (Integer) row.getField(fieldIndex)); + break; + case BIGINT: + statement.setLong(statementIndex, (Long) row.getField(fieldIndex)); + break; + case FLOAT: + statement.setFloat(statementIndex, (Float) row.getField(fieldIndex)); + break; + case DOUBLE: + statement.setDouble(statementIndex, (Double) row.getField(fieldIndex)); + break; + case DECIMAL: + statement.setBigDecimal( + statementIndex, (BigDecimal) row.getField(fieldIndex)); + break; + case DATE: + LocalDate localDate = (LocalDate) row.getField(fieldIndex); + statement.setDate(statementIndex, java.sql.Date.valueOf(localDate)); + break; + case TIME: + writeTime(statement, statementIndex, (LocalTime) row.getField(fieldIndex)); + break; + case TIMESTAMP: + LocalDateTime localDateTime = (LocalDateTime) row.getField(fieldIndex); + statement.setTimestamp( + statementIndex, java.sql.Timestamp.valueOf(localDateTime)); + break; + case BYTES: + statement.setBytes(statementIndex, (byte[]) row.getField(fieldIndex)); + break; + case NULL: + statement.setNull(statementIndex, java.sql.Types.NULL); + break; + case ARRAY: + SeaTunnelDataType elementType = + ((ArrayType) seaTunnelDataType).getElementType(); + Object[] array = (Object[]) row.getField(fieldIndex); + if (array == null) { + statement.setNull(statementIndex, java.sql.Types.ARRAY); + break; + } + if (SqlType.TINYINT.equals(elementType.getSqlType())) { + Short[] shortArray = new Short[array.length]; + for (int i = 0; i < array.length; i++) { + shortArray[i] = Short.valueOf(array[i].toString()); + } + statement.setObject(statementIndex, shortArray); + } else { + statement.setObject(statementIndex, array); + } + break; + case MAP: + case ROW: + default: + throw new JdbcConnectorException( + CommonErrorCodeDeprecated.UNSUPPORTED_DATA_TYPE, + "Unexpected value: " + seaTunnelDataType); + } + } catch (Exception e) { + throw new JdbcConnectorException( + JdbcConnectorErrorCode.DATA_TYPE_CAST_FAILED, + "error field:" + rowType.getFieldNames()[fieldIndex], + e); + } + } + return statement; + } } diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/psql/PostgresTypeConverter.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/psql/PostgresTypeConverter.java index 322bdc2a99e..980dd760e9f 100644 --- a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/psql/PostgresTypeConverter.java +++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/psql/PostgresTypeConverter.java @@ -81,6 +81,7 @@ public class PostgresTypeConverter implements TypeConverter { public static final String PG_CHAR_ARRAY = "_bpchar"; // character varying <=> varchar public static final String PG_VARCHAR = "varchar"; + public static final String PG_INET = "inet"; public static final String PG_CHARACTER_VARYING = "character varying"; // character varying[] <=> varchar[] <=> _varchar public static final String PG_VARCHAR_ARRAY = "_varchar"; @@ -221,7 +222,9 @@ public Column convert(BasicTypeDefine typeDefine) { case PG_XML: case PG_GEOMETRY: case PG_GEOGRAPHY: + case PG_INET: builder.dataType(BasicType.STRING_TYPE); + builder.sourceType(pgDataType); break; case PG_CHAR_ARRAY: case PG_VARCHAR_ARRAY: diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/saphana/SapHanaTypeConverter.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/saphana/SapHanaTypeConverter.java index 89344b43cad..a6f3791a694 100644 --- a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/saphana/SapHanaTypeConverter.java +++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/saphana/SapHanaTypeConverter.java @@ -252,9 +252,9 @@ public Column convert(BasicTypeDefine typeDefine) { ? typeDefine.getLength().intValue() : MAX_PRECISION - 4; if (scale == null) { - builder.dataType(new DecimalType((int) precision, MAX_SCALE)); + builder.dataType(new DecimalType((int) precision, 0)); builder.columnLength(precision); - builder.scale(MAX_SCALE); + builder.scale(0); } else if (scale < 0) { int newPrecision = (int) (precision - scale); if (newPrecision == 1) { @@ -277,16 +277,17 @@ public Column convert(BasicTypeDefine typeDefine) { } break; case HANA_SMALLDECIMAL: + int smallDecimalScale = typeDefine.getScale() != null ? typeDefine.getScale() : 0; if (typeDefine.getPrecision() == null) { - builder.dataType(new DecimalType(DEFAULT_PRECISION, MAX_SMALL_DECIMAL_SCALE)); + builder.dataType(new DecimalType(DEFAULT_PRECISION, smallDecimalScale)); builder.columnLength((long) DEFAULT_PRECISION); - builder.scale(MAX_SMALL_DECIMAL_SCALE); + builder.scale(smallDecimalScale); } else { builder.dataType( new DecimalType( - typeDefine.getPrecision().intValue(), MAX_SMALL_DECIMAL_SCALE)); + typeDefine.getPrecision().intValue(), smallDecimalScale)); builder.columnLength(typeDefine.getPrecision()); - builder.scale(MAX_SMALL_DECIMAL_SCALE); + builder.scale(smallDecimalScale); } break; case HANA_REAL: @@ -297,6 +298,7 @@ public Column convert(BasicTypeDefine typeDefine) { break; case HANA_ST_POINT: case HANA_ST_GEOMETRY: + builder.columnLength(typeDefine.getLength()); builder.dataType(PrimitiveByteArrayType.INSTANCE); break; default: diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/sink/AbstractJdbcSinkWriter.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/sink/AbstractJdbcSinkWriter.java index f894999a42f..876481e2d27 100644 --- a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/sink/AbstractJdbcSinkWriter.java +++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/sink/AbstractJdbcSinkWriter.java @@ -17,6 +17,7 @@ package org.apache.seatunnel.connectors.seatunnel.jdbc.sink; +import org.apache.seatunnel.api.event.EventType; import org.apache.seatunnel.api.sink.SinkWriter; import org.apache.seatunnel.api.sink.SupportMultiTableSinkWriter; import org.apache.seatunnel.api.table.catalog.Column; @@ -96,14 +97,15 @@ protected void processSchemaChangeEvent(AlterTableColumnEvent event) throws IOEx break; case SCHEMA_CHANGE_MODIFY_COLUMN: Column modifyColumn = ((AlterTableModifyColumnEvent) event).getColumn(); - replaceColumnByIndex(columns, modifyColumn.getName(), modifyColumn); + replaceColumnByIndex( + event.getEventType(), columns, modifyColumn.getName(), modifyColumn); break; case SCHEMA_CHANGE_CHANGE_COLUMN: AlterTableChangeColumnEvent alterTableChangeColumnEvent = (AlterTableChangeColumnEvent) event; Column changeColumn = alterTableChangeColumnEvent.getColumn(); String oldColumnName = alterTableChangeColumnEvent.getOldColumn(); - replaceColumnByIndex(columns, oldColumnName, changeColumn); + replaceColumnByIndex(event.getEventType(), columns, oldColumnName, changeColumn); break; default: throw new SeaTunnelException( @@ -132,10 +134,17 @@ protected void reOpenOutputFormat(AlterTableColumnEvent event) throws IOExceptio } protected void replaceColumnByIndex( - List columns, String oldColumnName, Column newColumn) { + EventType eventType, List columns, String oldColumnName, Column newColumn) { for (int i = 0; i < columns.size(); i++) { - if (columns.get(i).getName().equalsIgnoreCase(oldColumnName)) { - columns.set(i, newColumn); + Column column = columns.get(i); + if (column.getName().equalsIgnoreCase(oldColumnName)) { + // rename ...... to ...... which just has column name + if (eventType.equals(EventType.SCHEMA_CHANGE_CHANGE_COLUMN) + && newColumn.getDataType() == null) { + columns.set(i, column.rename(newColumn.getName())); + } else { + columns.set(i, newColumn); + } } } } diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/sink/JdbcSinkWriter.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/sink/JdbcSinkWriter.java index 3f43b2088d0..41dd41ff9e9 100644 --- a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/sink/JdbcSinkWriter.java +++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/sink/JdbcSinkWriter.java @@ -17,7 +17,9 @@ package org.apache.seatunnel.connectors.seatunnel.jdbc.sink; +import org.apache.seatunnel.api.event.EventType; import org.apache.seatunnel.api.sink.MultiTableResourceManager; +import org.apache.seatunnel.api.table.catalog.Column; import org.apache.seatunnel.api.table.catalog.TablePath; import org.apache.seatunnel.api.table.catalog.TableSchema; import org.apache.seatunnel.api.table.type.SeaTunnelRow; @@ -28,6 +30,7 @@ import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.JdbcOutputFormatBuilder; import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.connection.SimpleJdbcConnectionPoolProviderProxy; import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.JdbcDialect; +import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.oracle.OracleDialect; import org.apache.seatunnel.connectors.seatunnel.jdbc.state.JdbcSinkState; import org.apache.seatunnel.connectors.seatunnel.jdbc.state.XidInfo; @@ -40,6 +43,8 @@ import java.util.List; import java.util.Optional; +import static org.apache.seatunnel.api.event.EventType.SCHEMA_CHANGE_CHANGE_COLUMN; + @Slf4j public class JdbcSinkWriter extends AbstractJdbcSinkWriter { private final Integer primaryKeyIndex; @@ -162,4 +167,22 @@ public void close() throws IOException { outputFormat.close(); } } + + @Override + protected void replaceColumnByIndex( + EventType eventType, List columns, String oldColumnName, Column newColumn) { + // The operation of renaming a column in Oracle is only supported to modify the column name, + // so we just modify the column name directly. + if (eventType.equals(SCHEMA_CHANGE_CHANGE_COLUMN) && dialect instanceof OracleDialect) { + for (int i = 0; i < columns.size(); i++) { + Column column = columns.get(i); + if (column.getName().equalsIgnoreCase(oldColumnName)) { + column = column.rename(newColumn.getName()); + columns.set(i, column); + } + } + return; + } + super.replaceColumnByIndex(eventType, columns, oldColumnName, newColumn); + } } diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/source/FixedChunkSplitter.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/source/FixedChunkSplitter.java index edeef96f0a2..72a4e061ac5 100644 --- a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/source/FixedChunkSplitter.java +++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/source/FixedChunkSplitter.java @@ -82,8 +82,8 @@ protected Collection createSplits( partitionEnd = range.getRight(); } if (partitionStart == null || partitionEnd == null) { - JdbcSourceSplit spilt = createSingleSplit(table); - return Collections.singletonList(spilt); + JdbcSourceSplit split = createSingleSplit(table); + return Collections.singletonList(split); } return createNumberColumnSplits( diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/utils/JdbcCatalogUtils.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/utils/JdbcCatalogUtils.java index 6eabba1edc1..0ab6b58e209 100644 --- a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/utils/JdbcCatalogUtils.java +++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/utils/JdbcCatalogUtils.java @@ -232,10 +232,12 @@ static CatalogTable mergeCatalogTable(CatalogTable tableOfPath, CatalogTable tab && columnsOfPath .get(column.getName()) .getDataType() + .getSqlType() .equals( columnsOfQuery .get(column.getName()) - .getDataType())) + .getDataType() + .getSqlType())) .map(column -> columnsOfPath.get(column.getName())) .collect(Collectors.toList()); boolean schemaIncludeAllColumns = columnsOfMerge.size() == columnKeysOfQuery.size(); diff --git a/seatunnel-connectors-v2/connector-jdbc/src/test/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/psql/PostgresCreateTableSqlBuilderTest.java b/seatunnel-connectors-v2/connector-jdbc/src/test/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/psql/PostgresCreateTableSqlBuilderTest.java index 37049eced38..bc204a913a4 100644 --- a/seatunnel-connectors-v2/connector-jdbc/src/test/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/psql/PostgresCreateTableSqlBuilderTest.java +++ b/seatunnel-connectors-v2/connector-jdbc/src/test/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/psql/PostgresCreateTableSqlBuilderTest.java @@ -35,6 +35,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.regex.Pattern; class PostgresCreateTableSqlBuilderTest { @@ -49,17 +50,18 @@ void build() { String createTableSql = postgresCreateTableSqlBuilder.build( catalogTable.getTableId().toTablePath()); - Assertions.assertEquals( - "CREATE TABLE \"test\" (\n" + String pattern = + "CREATE TABLE \"test\" \\(\n" + "\"id\" int4 NOT NULL PRIMARY KEY,\n" + "\"name\" text NOT NULL,\n" + "\"age\" int4 NOT NULL,\n" - + "\tCONSTRAINT unique_name UNIQUE (\"name\")\n" - + ");", - createTableSql); + + "\tCONSTRAINT \"([a-zA-Z0-9]+)\" UNIQUE \\(\"name\"\\)\n" + + "\\);"; + Assertions.assertTrue( + Pattern.compile(pattern).matcher(createTableSql).find()); + Assertions.assertEquals( - Lists.newArrayList( - "CREATE INDEX test_index_age ON \"test\"(\"age\");"), + Lists.newArrayList("CREATE INDEX ON \"test\"(\"age\");"), postgresCreateTableSqlBuilder.getCreateIndexSqls()); // skip index diff --git a/seatunnel-connectors-v2/connector-jdbc/src/test/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/saphana/SapHanaTypeConverterTest.java b/seatunnel-connectors-v2/connector-jdbc/src/test/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/saphana/SapHanaTypeConverterTest.java index 69d01d32b05..9f672bbeeeb 100644 --- a/seatunnel-connectors-v2/connector-jdbc/src/test/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/saphana/SapHanaTypeConverterTest.java +++ b/seatunnel-connectors-v2/connector-jdbc/src/test/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/saphana/SapHanaTypeConverterTest.java @@ -126,7 +126,7 @@ public void testConvertSmallDecimal() { .build(); Column column = SapHanaTypeConverter.INSTANCE.convert(typeDefine); Assertions.assertEquals(typeDefine.getName(), column.getName()); - Assertions.assertEquals(new DecimalType(38, 368), column.getDataType()); + Assertions.assertEquals(new DecimalType(38, 0), column.getDataType()); Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType()); typeDefine = @@ -139,7 +139,7 @@ public void testConvertSmallDecimal() { .build(); column = SapHanaTypeConverter.INSTANCE.convert(typeDefine); Assertions.assertEquals(typeDefine.getName(), column.getName()); - Assertions.assertEquals(new DecimalType(10, 368), column.getDataType()); + Assertions.assertEquals(new DecimalType(10, 5), column.getDataType()); Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType()); } @@ -153,7 +153,7 @@ public void testConvertDecimal() { .build(); Column column = SapHanaTypeConverter.INSTANCE.convert(typeDefine); Assertions.assertEquals(typeDefine.getName(), column.getName()); - Assertions.assertEquals(new DecimalType(34, 6176), column.getDataType()); + Assertions.assertEquals(new DecimalType(34, 0), column.getDataType()); Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType()); BasicTypeDefine typeDefine2 = @@ -382,6 +382,37 @@ public void testConvertDatetime() { Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType()); } + @Test + public void testConvertSpecialType() { + BasicTypeDefine typeDefine = + BasicTypeDefine.builder() + .name("test") + .columnType("ST_POINT") + .length(8L) + .dataType("ST_POINT") + .build(); + Column column = SapHanaTypeConverter.INSTANCE.convert(typeDefine); + + Assertions.assertEquals(typeDefine.getName(), column.getName()); + Assertions.assertEquals(PrimitiveByteArrayType.INSTANCE, column.getDataType()); + Assertions.assertEquals(8, column.getColumnLength()); + Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType()); + + typeDefine = + BasicTypeDefine.builder() + .name("test") + .columnType("ST_GEOMETRY") + .length(8L) + .dataType("ST_GEOMETRY") + .build(); + column = SapHanaTypeConverter.INSTANCE.convert(typeDefine); + + Assertions.assertEquals(typeDefine.getName(), column.getName()); + Assertions.assertEquals(PrimitiveByteArrayType.INSTANCE, column.getDataType()); + Assertions.assertEquals(8, column.getColumnLength()); + Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType()); + } + @Test public void testReconvertUnsupported() { Column column = diff --git a/seatunnel-connectors-v2/connector-jdbc/src/test/java/org/apache/seatunnel/connectors/seatunnel/jdbc/utils/JdbcCatalogUtilsTest.java b/seatunnel-connectors-v2/connector-jdbc/src/test/java/org/apache/seatunnel/connectors/seatunnel/jdbc/utils/JdbcCatalogUtilsTest.java index 4162bce30bb..872dc26f8f0 100644 --- a/seatunnel-connectors-v2/connector-jdbc/src/test/java/org/apache/seatunnel/connectors/seatunnel/jdbc/utils/JdbcCatalogUtilsTest.java +++ b/seatunnel-connectors-v2/connector-jdbc/src/test/java/org/apache/seatunnel/connectors/seatunnel/jdbc/utils/JdbcCatalogUtilsTest.java @@ -25,6 +25,7 @@ import org.apache.seatunnel.api.table.catalog.TableIdentifier; import org.apache.seatunnel.api.table.catalog.TableSchema; import org.apache.seatunnel.api.table.type.BasicType; +import org.apache.seatunnel.api.table.type.DecimalType; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; @@ -314,4 +315,59 @@ public void testColumnNotIncludeMerge() { tableOfQuery.getTableSchema().getColumns(), mergeTable.getTableSchema().getColumns()); } + + @Test + public void testDecimalColumnMerge() { + CatalogTable tableOfQuery = + CatalogTable.of( + TableIdentifier.of("default", null, null, "default"), + TableSchema.builder() + .column( + PhysicalColumn.of( + "f1", + new DecimalType(10, 1), + null, + true, + null, + null, + null, + false, + false, + null, + null, + null)) + .build(), + Collections.emptyMap(), + Collections.emptyList(), + null); + + CatalogTable tableOfPath = + CatalogTable.of( + TableIdentifier.of("default", null, null, "default"), + TableSchema.builder() + .column( + PhysicalColumn.of( + "f1", + new DecimalType(10, 2), + null, + true, + null, + null, + null, + false, + false, + null, + null, + null)) + .build(), + Collections.emptyMap(), + Collections.emptyList(), + null); + + CatalogTable mergeTable = JdbcCatalogUtils.mergeCatalogTable(tableOfPath, tableOfQuery); + // When column type is decimal, the precision and scale should not affect the merge result + Assertions.assertEquals( + tableOfPath.getTableSchema().getColumns().get(0), + mergeTable.getTableSchema().getColumns().get(0)); + } } diff --git a/seatunnel-connectors-v2/connector-kafka/src/main/java/org/apache/seatunnel/connectors/seatunnel/kafka/source/KafkaRecordEmitter.java b/seatunnel-connectors-v2/connector-kafka/src/main/java/org/apache/seatunnel/connectors/seatunnel/kafka/source/KafkaRecordEmitter.java index 6593137aff7..87d2b7b7c9f 100644 --- a/seatunnel-connectors-v2/connector-kafka/src/main/java/org/apache/seatunnel/connectors/seatunnel/kafka/source/KafkaRecordEmitter.java +++ b/seatunnel-connectors-v2/connector-kafka/src/main/java/org/apache/seatunnel/connectors/seatunnel/kafka/source/KafkaRecordEmitter.java @@ -31,7 +31,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; import java.util.Map; public class KafkaRecordEmitter @@ -71,13 +70,14 @@ public void emitRecord( // consumerRecord.offset + 1 is the offset commit to Kafka and also the start offset // for the next run splitState.setCurrentOffset(consumerRecord.offset() + 1); - } catch (IOException e) { + } catch (Exception e) { if (this.messageFormatErrorHandleWay == MessageFormatErrorHandleWay.SKIP) { logger.warn( "Deserialize message failed, skip this message, message: {}", new String(consumerRecord.value())); + } else { + throw e; } - throw e; } } diff --git a/seatunnel-connectors-v2/connector-milvus/pom.xml b/seatunnel-connectors-v2/connector-milvus/pom.xml index fc972ce1968..9a5fed37ab2 100644 --- a/seatunnel-connectors-v2/connector-milvus/pom.xml +++ b/seatunnel-connectors-v2/connector-milvus/pom.xml @@ -28,12 +28,20 @@ connector-milvus SeaTunnel : Connectors V2 : Milvus - + + + + com.google.code.gson + gson + 2.10.1 + + + io.milvus milvus-sdk-java - 2.4.3 + 2.4.5 org.slf4j @@ -42,19 +50,6 @@ - - org.mockito - mockito-core - 4.11.0 - test - - - org.mockito - mockito-inline - 4.11.0 - test - - diff --git a/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/catalog/MilvusCatalog.java b/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/catalog/MilvusCatalog.java index c1e1ac292da..3517597c5fb 100644 --- a/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/catalog/MilvusCatalog.java +++ b/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/catalog/MilvusCatalog.java @@ -24,7 +24,6 @@ import org.apache.seatunnel.api.table.catalog.ConstraintKey; import org.apache.seatunnel.api.table.catalog.InfoPreviewResult; import org.apache.seatunnel.api.table.catalog.PreviewResult; -import org.apache.seatunnel.api.table.catalog.PrimaryKey; import org.apache.seatunnel.api.table.catalog.TablePath; import org.apache.seatunnel.api.table.catalog.TableSchema; import org.apache.seatunnel.api.table.catalog.VectorIndex; @@ -33,20 +32,21 @@ import org.apache.seatunnel.api.table.catalog.exception.DatabaseNotExistException; import org.apache.seatunnel.api.table.catalog.exception.TableAlreadyExistException; import org.apache.seatunnel.api.table.catalog.exception.TableNotExistException; -import org.apache.seatunnel.api.table.type.ArrayType; -import org.apache.seatunnel.api.table.type.SeaTunnelDataType; +import org.apache.seatunnel.api.table.type.CommonOptions; import org.apache.seatunnel.connectors.seatunnel.milvus.config.MilvusSinkConfig; -import org.apache.seatunnel.connectors.seatunnel.milvus.convert.MilvusConvertUtils; import org.apache.seatunnel.connectors.seatunnel.milvus.exception.MilvusConnectionErrorCode; import org.apache.seatunnel.connectors.seatunnel.milvus.exception.MilvusConnectorException; +import org.apache.seatunnel.connectors.seatunnel.milvus.utils.sink.MilvusSinkConverter; import org.apache.commons.collections4.CollectionUtils; +import org.apache.commons.lang3.StringUtils; +import com.google.protobuf.ProtocolStringList; import io.milvus.client.MilvusServiceClient; import io.milvus.common.clientenum.ConsistencyLevelEnum; -import io.milvus.grpc.DataType; import io.milvus.grpc.ListDatabasesResponse; import io.milvus.grpc.ShowCollectionsResponse; +import io.milvus.grpc.ShowPartitionsResponse; import io.milvus.grpc.ShowType; import io.milvus.param.ConnectParam; import io.milvus.param.IndexType; @@ -61,6 +61,8 @@ import io.milvus.param.collection.HasCollectionParam; import io.milvus.param.collection.ShowCollectionsParam; import io.milvus.param.index.CreateIndexParam; +import io.milvus.param.partition.CreatePartitionParam; +import io.milvus.param.partition.ShowPartitionsParam; import lombok.extern.slf4j.Slf4j; import java.util.ArrayList; @@ -70,6 +72,7 @@ import java.util.Optional; import static com.google.common.base.Preconditions.checkNotNull; +import static org.apache.seatunnel.connectors.seatunnel.milvus.config.MilvusSinkConfig.CREATE_INDEX; @Slf4j public class MilvusCatalog implements Catalog { @@ -196,7 +199,8 @@ public void createTable(TablePath tablePath, CatalogTable catalogTable, boolean checkNotNull(tableSchema, "tableSchema must not be null"); createTableInternal(tablePath, catalogTable); - if (CollectionUtils.isNotEmpty(tableSchema.getConstraintKeys())) { + if (CollectionUtils.isNotEmpty(tableSchema.getConstraintKeys()) + && config.get(CREATE_INDEX)) { for (ConstraintKey constraintKey : tableSchema.getConstraintKeys()) { if (constraintKey .getConstraintType() @@ -231,27 +235,61 @@ private void createIndexInternal( public void createTableInternal(TablePath tablePath, CatalogTable catalogTable) { try { + Map options = catalogTable.getOptions(); + + // partition key logic + boolean existPartitionKeyField = options.containsKey(MilvusOptions.PARTITION_KEY_FIELD); + String partitionKeyField = + existPartitionKeyField ? options.get(MilvusOptions.PARTITION_KEY_FIELD) : null; + // if options set, will overwrite aut read + if (StringUtils.isNotEmpty(config.get(MilvusSinkConfig.PARTITION_KEY))) { + existPartitionKeyField = true; + partitionKeyField = config.get(MilvusSinkConfig.PARTITION_KEY); + } + TableSchema tableSchema = catalogTable.getTableSchema(); List fieldTypes = new ArrayList<>(); for (Column column : tableSchema.getColumns()) { - fieldTypes.add(convertToFieldType(column, tableSchema.getPrimaryKey())); + if (column.getOptions() != null + && column.getOptions().containsKey(CommonOptions.METADATA.getName()) + && (Boolean) column.getOptions().get(CommonOptions.METADATA.getName())) { + // skip dynamic field + continue; + } + FieldType fieldType = + MilvusSinkConverter.convertToFieldType( + column, + tableSchema.getPrimaryKey(), + partitionKeyField, + config.get(MilvusSinkConfig.ENABLE_AUTO_ID)); + fieldTypes.add(fieldType); } - Map options = catalogTable.getOptions(); Boolean enableDynamicField = (options.containsKey(MilvusOptions.ENABLE_DYNAMIC_FIELD)) ? Boolean.valueOf(options.get(MilvusOptions.ENABLE_DYNAMIC_FIELD)) : config.get(MilvusSinkConfig.ENABLE_DYNAMIC_FIELD); - + String collectionDescription = ""; + if (config.get(MilvusSinkConfig.COLLECTION_DESCRIPTION) != null + && config.get(MilvusSinkConfig.COLLECTION_DESCRIPTION) + .containsKey(tablePath.getTableName())) { + // use description from config first + collectionDescription = + config.get(MilvusSinkConfig.COLLECTION_DESCRIPTION) + .get(tablePath.getTableName()); + } else if (null != catalogTable.getComment()) { + collectionDescription = catalogTable.getComment(); + } CreateCollectionParam.Builder builder = CreateCollectionParam.newBuilder() .withDatabaseName(tablePath.getDatabaseName()) .withCollectionName(tablePath.getTableName()) + .withDescription(collectionDescription) .withFieldTypes(fieldTypes) .withEnableDynamicField(enableDynamicField) .withConsistencyLevel(ConsistencyLevelEnum.BOUNDED); - if (null != catalogTable.getComment()) { - builder.withDescription(catalogTable.getComment()); + if (StringUtils.isNotEmpty(options.get(MilvusOptions.SHARDS_NUM))) { + builder.withShardsNum(Integer.parseInt(options.get(MilvusOptions.SHARDS_NUM))); } CreateCollectionParam createCollectionParam = builder.build(); @@ -260,89 +298,51 @@ public void createTableInternal(TablePath tablePath, CatalogTable catalogTable) throw new MilvusConnectorException( MilvusConnectionErrorCode.CREATE_COLLECTION_ERROR, response.getMessage()); } + + // not exist partition key field, will read show partitions to create + if (!existPartitionKeyField && options.containsKey(MilvusOptions.PARTITION_KEY_FIELD)) { + createPartitionInternal(options.get(MilvusOptions.PARTITION_KEY_FIELD), tablePath); + } + } catch (Exception e) { throw new MilvusConnectorException( MilvusConnectionErrorCode.CREATE_COLLECTION_ERROR, e); } } - private FieldType convertToFieldType(Column column, PrimaryKey primaryKey) { - SeaTunnelDataType seaTunnelDataType = column.getDataType(); - FieldType.Builder build = - FieldType.newBuilder() - .withName(column.getName()) - .withDataType( - MilvusConvertUtils.convertSqlTypeToDataType( - seaTunnelDataType.getSqlType())); - switch (seaTunnelDataType.getSqlType()) { - case ROW: - build.withMaxLength(65535); - break; - case DATE: - build.withMaxLength(20); - break; - case INT: - build.withDataType(DataType.Int32); - break; - case SMALLINT: - build.withDataType(DataType.Int16); - break; - case TINYINT: - build.withDataType(DataType.Int8); - break; - case FLOAT: - build.withDataType(DataType.Float); - break; - case DOUBLE: - build.withDataType(DataType.Double); - break; - case MAP: - build.withDataType(DataType.JSON); - break; - case BOOLEAN: - build.withDataType(DataType.Bool); - break; - case STRING: - if (column.getColumnLength() == 0) { - build.withMaxLength(512); - } else { - build.withMaxLength((int) (column.getColumnLength() / 4)); - } - break; - case ARRAY: - ArrayType arrayType = (ArrayType) column.getDataType(); - SeaTunnelDataType elementType = arrayType.getElementType(); - build.withElementType( - MilvusConvertUtils.convertSqlTypeToDataType(elementType.getSqlType())); - build.withMaxCapacity(4095); - switch (elementType.getSqlType()) { - case STRING: - if (column.getColumnLength() == 0) { - build.withMaxLength(512); - } else { - build.withMaxLength((int) (column.getColumnLength() / 4)); - } - break; - } - break; - case BINARY_VECTOR: - case FLOAT_VECTOR: - case FLOAT16_VECTOR: - case BFLOAT16_VECTOR: - build.withDimension(column.getScale()); - break; + private void createPartitionInternal(String partitionNames, TablePath tablePath) { + R showPartitionsResponseR = + this.client.showPartitions( + ShowPartitionsParam.newBuilder() + .withDatabaseName(tablePath.getDatabaseName()) + .withCollectionName(tablePath.getTableName()) + .build()); + if (!Objects.equals(showPartitionsResponseR.getStatus(), R.success().getStatus())) { + throw new MilvusConnectorException( + MilvusConnectionErrorCode.SHOW_PARTITION_ERROR, + showPartitionsResponseR.getMessage()); } - - if (null != primaryKey && primaryKey.getColumnNames().contains(column.getName())) { - build.withPrimaryKey(true); - if (null != primaryKey.getEnableAutoId()) { - build.withAutoID(primaryKey.getEnableAutoId()); - } else { - build.withAutoID(config.get(MilvusSinkConfig.ENABLE_AUTO_ID)); + ProtocolStringList existPartitionNames = + showPartitionsResponseR.getData().getPartitionNamesList(); + + // start to loop create partition + String[] partitionNameArray = partitionNames.split(","); + for (String partitionName : partitionNameArray) { + if (existPartitionNames.contains(partitionName)) { + continue; + } + R response = + this.client.createPartition( + CreatePartitionParam.newBuilder() + .withDatabaseName(tablePath.getDatabaseName()) + .withCollectionName(tablePath.getTableName()) + .withPartitionName(partitionName) + .build()); + if (!R.success().getStatus().equals(response.getStatus())) { + throw new MilvusConnectorException( + MilvusConnectionErrorCode.CREATE_PARTITION_ERROR, response.getMessage()); } } - - return build.build(); } @Override diff --git a/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/catalog/MilvusOptions.java b/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/catalog/MilvusOptions.java index b589b21d3da..96241546f6c 100644 --- a/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/catalog/MilvusOptions.java +++ b/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/catalog/MilvusOptions.java @@ -14,9 +14,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + package org.apache.seatunnel.connectors.seatunnel.milvus.catalog; public class MilvusOptions { public static final String ENABLE_DYNAMIC_FIELD = "enableDynamicField"; + public static final String SHARDS_NUM = "shardsNum"; + public static final String PARTITION_KEY_FIELD = "partitionKeyField"; + public static final String PARTITION_NAMES = "partitionNames"; } diff --git a/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/config/MilvusSinkConfig.java b/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/config/MilvusSinkConfig.java index cd286c987df..8d874fc0ae3 100644 --- a/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/config/MilvusSinkConfig.java +++ b/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/config/MilvusSinkConfig.java @@ -23,6 +23,8 @@ import org.apache.seatunnel.api.sink.SchemaSaveMode; import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; import static org.apache.seatunnel.api.sink.DataSaveMode.APPEND_DATA; import static org.apache.seatunnel.api.sink.DataSaveMode.DROP_DATA; @@ -32,6 +34,16 @@ public class MilvusSinkConfig extends MilvusCommonConfig { public static final Option DATABASE = Options.key("database").stringType().noDefaultValue().withDescription("database"); + public static final Option> COLLECTION_DESCRIPTION = + Options.key("collection_description") + .mapType() + .defaultValue(new HashMap<>()) + .withDescription("collection description"); + public static final Option PARTITION_KEY = + Options.key("partition_key") + .stringType() + .noDefaultValue() + .withDescription("Milvus partition key field"); public static final Option SCHEMA_SAVE_MODE = Options.key("schema_save_mode") @@ -70,4 +82,19 @@ public class MilvusSinkConfig extends MilvusCommonConfig { .intType() .defaultValue(1000) .withDescription("writer batch size"); + public static final Option RATE_LIMIT = + Options.key("rate_limit") + .intType() + .defaultValue(100000) + .withDescription("writer rate limit"); + public static final Option LOAD_COLLECTION = + Options.key("load_collection") + .booleanType() + .defaultValue(false) + .withDescription("if load collection"); + public static final Option CREATE_INDEX = + Options.key("create_index") + .booleanType() + .defaultValue(false) + .withDescription("if load collection"); } diff --git a/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/config/MilvusSourceConfig.java b/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/config/MilvusSourceConfig.java index b3efba279dc..94b98548386 100644 --- a/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/config/MilvusSourceConfig.java +++ b/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/config/MilvusSourceConfig.java @@ -33,4 +33,16 @@ public class MilvusSourceConfig extends MilvusCommonConfig { .stringType() .noDefaultValue() .withDescription("Milvus collection to read"); + + public static final Option BATCH_SIZE = + Options.key("batch_size") + .intType() + .defaultValue(1000) + .withDescription("writer batch size"); + + public static final Option RATE_LIMIT = + Options.key("rate_limit") + .intType() + .defaultValue(1000000) + .withDescription("writer rate limit"); } diff --git a/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/convert/MilvusConvertUtils.java b/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/convert/MilvusConvertUtils.java deleted file mode 100644 index 65027077957..00000000000 --- a/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/convert/MilvusConvertUtils.java +++ /dev/null @@ -1,417 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.seatunnel.connectors.seatunnel.milvus.convert; - -import org.apache.seatunnel.api.configuration.ReadonlyConfig; -import org.apache.seatunnel.api.table.catalog.CatalogTable; -import org.apache.seatunnel.api.table.catalog.Column; -import org.apache.seatunnel.api.table.catalog.ConstraintKey; -import org.apache.seatunnel.api.table.catalog.PhysicalColumn; -import org.apache.seatunnel.api.table.catalog.PrimaryKey; -import org.apache.seatunnel.api.table.catalog.TableIdentifier; -import org.apache.seatunnel.api.table.catalog.TablePath; -import org.apache.seatunnel.api.table.catalog.TableSchema; -import org.apache.seatunnel.api.table.catalog.VectorIndex; -import org.apache.seatunnel.api.table.catalog.exception.CatalogException; -import org.apache.seatunnel.api.table.type.ArrayType; -import org.apache.seatunnel.api.table.type.BasicType; -import org.apache.seatunnel.api.table.type.SeaTunnelDataType; -import org.apache.seatunnel.api.table.type.SeaTunnelRow; -import org.apache.seatunnel.api.table.type.SqlType; -import org.apache.seatunnel.api.table.type.VectorType; -import org.apache.seatunnel.common.utils.BufferUtils; -import org.apache.seatunnel.common.utils.JsonUtils; -import org.apache.seatunnel.connectors.seatunnel.milvus.catalog.MilvusOptions; -import org.apache.seatunnel.connectors.seatunnel.milvus.config.MilvusSourceConfig; -import org.apache.seatunnel.connectors.seatunnel.milvus.exception.MilvusConnectionErrorCode; -import org.apache.seatunnel.connectors.seatunnel.milvus.exception.MilvusConnectorException; - -import org.apache.commons.collections4.CollectionUtils; -import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.util.Lists; - -import com.google.gson.Gson; -import com.google.gson.JsonParser; -import com.google.protobuf.ProtocolStringList; -import io.milvus.client.MilvusServiceClient; -import io.milvus.common.utils.JacksonUtils; -import io.milvus.grpc.CollectionSchema; -import io.milvus.grpc.DataType; -import io.milvus.grpc.DescribeCollectionResponse; -import io.milvus.grpc.DescribeIndexResponse; -import io.milvus.grpc.FieldSchema; -import io.milvus.grpc.IndexDescription; -import io.milvus.grpc.KeyValuePair; -import io.milvus.grpc.ShowCollectionsResponse; -import io.milvus.grpc.ShowType; -import io.milvus.param.ConnectParam; -import io.milvus.param.R; -import io.milvus.param.collection.DescribeCollectionParam; -import io.milvus.param.collection.ShowCollectionsParam; -import io.milvus.param.index.DescribeIndexParam; - -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; - -public class MilvusConvertUtils { - - private static final String CATALOG_NAME = "Milvus"; - - private static final Gson gson = new Gson(); - - public static Map getSourceTables(ReadonlyConfig config) { - MilvusServiceClient client = null; - try { - client = - new MilvusServiceClient( - ConnectParam.newBuilder() - .withUri(config.get(MilvusSourceConfig.URL)) - .withToken(config.get(MilvusSourceConfig.TOKEN)) - .build()); - - String database = config.get(MilvusSourceConfig.DATABASE); - List collectionList = new ArrayList<>(); - if (StringUtils.isNotEmpty(config.get(MilvusSourceConfig.COLLECTION))) { - collectionList.add(config.get(MilvusSourceConfig.COLLECTION)); - } else { - R response = - client.showCollections( - ShowCollectionsParam.newBuilder() - .withDatabaseName(database) - .withShowType(ShowType.All) - .build()); - if (response.getStatus() != R.Status.Success.getCode()) { - throw new MilvusConnectorException( - MilvusConnectionErrorCode.SHOW_COLLECTIONS_ERROR); - } - - ProtocolStringList collections = response.getData().getCollectionNamesList(); - if (CollectionUtils.isEmpty(collections)) { - throw new MilvusConnectorException( - MilvusConnectionErrorCode.DATABASE_NO_COLLECTIONS, database); - } - collectionList.addAll(collections); - } - - Map map = new HashMap<>(); - for (String collection : collectionList) { - CatalogTable catalogTable = getCatalogTable(client, database, collection); - map.put(TablePath.of(database, collection), catalogTable); - } - return map; - } catch (Exception e) { - throw new CatalogException(e.getMessage(), e); - } finally { - if (client != null) { - client.close(); - } - } - } - - public static CatalogTable getCatalogTable( - MilvusServiceClient client, String database, String collection) { - R response = - client.describeCollection( - DescribeCollectionParam.newBuilder() - .withDatabaseName(database) - .withCollectionName(collection) - .build()); - - if (response.getStatus() != R.Status.Success.getCode()) { - throw new MilvusConnectorException(MilvusConnectionErrorCode.DESC_COLLECTION_ERROR); - } - - // collection column - DescribeCollectionResponse data = response.getData(); - CollectionSchema schema = data.getSchema(); - List columns = new ArrayList<>(); - for (FieldSchema fieldSchema : schema.getFieldsList()) { - columns.add(MilvusConvertUtils.convertColumn(fieldSchema)); - } - - // primary key - PrimaryKey primaryKey = buildPrimaryKey(schema.getFieldsList()); - - // index - R describeIndexResponseR = - client.describeIndex( - DescribeIndexParam.newBuilder() - .withDatabaseName(database) - .withCollectionName(collection) - .build()); - if (describeIndexResponseR.getStatus() != R.Status.Success.getCode()) { - throw new MilvusConnectorException(MilvusConnectionErrorCode.DESC_INDEX_ERROR); - } - DescribeIndexResponse indexResponse = describeIndexResponseR.getData(); - List vectorIndexes = buildVectorIndexes(indexResponse); - - // build tableSchema - TableSchema tableSchema = - TableSchema.builder() - .columns(columns) - .primaryKey(primaryKey) - .constraintKey( - ConstraintKey.of( - ConstraintKey.ConstraintType.VECTOR_INDEX_KEY, - "vector_index", - vectorIndexes)) - .build(); - - // build tableId - TableIdentifier tableId = TableIdentifier.of(CATALOG_NAME, database, collection); - - // build options info - Map options = new HashMap<>(); - options.put( - MilvusOptions.ENABLE_DYNAMIC_FIELD, String.valueOf(schema.getEnableDynamicField())); - - return CatalogTable.of( - tableId, tableSchema, options, new ArrayList<>(), schema.getDescription()); - } - - private static List buildVectorIndexes( - DescribeIndexResponse indexResponse) { - if (CollectionUtils.isEmpty(indexResponse.getIndexDescriptionsList())) { - return null; - } - - List list = new ArrayList<>(); - for (IndexDescription per : indexResponse.getIndexDescriptionsList()) { - Map paramsMap = - per.getParamsList().stream() - .collect( - Collectors.toMap(KeyValuePair::getKey, KeyValuePair::getValue)); - - VectorIndex index = - new VectorIndex( - per.getIndexName(), - per.getFieldName(), - paramsMap.get("index_type"), - paramsMap.get("metric_type")); - - list.add(index); - } - - return list; - } - - public static PrimaryKey buildPrimaryKey(List fields) { - for (FieldSchema field : fields) { - if (field.getIsPrimaryKey()) { - return PrimaryKey.of( - field.getName(), Lists.newArrayList(field.getName()), field.getAutoID()); - } - } - - return null; - } - - public static PhysicalColumn convertColumn(FieldSchema fieldSchema) { - DataType dataType = fieldSchema.getDataType(); - PhysicalColumn.PhysicalColumnBuilder builder = PhysicalColumn.builder(); - builder.name(fieldSchema.getName()); - builder.sourceType(dataType.name()); - builder.comment(fieldSchema.getDescription()); - - switch (dataType) { - case Bool: - builder.dataType(BasicType.BOOLEAN_TYPE); - break; - case Int8: - builder.dataType(BasicType.BYTE_TYPE); - break; - case Int16: - builder.dataType(BasicType.SHORT_TYPE); - break; - case Int32: - builder.dataType(BasicType.INT_TYPE); - break; - case Int64: - builder.dataType(BasicType.LONG_TYPE); - break; - case Float: - builder.dataType(BasicType.FLOAT_TYPE); - break; - case Double: - builder.dataType(BasicType.DOUBLE_TYPE); - break; - case VarChar: - builder.dataType(BasicType.STRING_TYPE); - for (KeyValuePair keyValuePair : fieldSchema.getTypeParamsList()) { - if (keyValuePair.getKey().equals("max_length")) { - builder.columnLength(Long.parseLong(keyValuePair.getValue()) * 4); - break; - } - } - break; - case String: - case JSON: - builder.dataType(BasicType.STRING_TYPE); - break; - case Array: - builder.dataType(ArrayType.STRING_ARRAY_TYPE); - break; - case FloatVector: - builder.dataType(VectorType.VECTOR_FLOAT_TYPE); - for (KeyValuePair keyValuePair : fieldSchema.getTypeParamsList()) { - if (keyValuePair.getKey().equals("dim")) { - builder.scale(Integer.valueOf(keyValuePair.getValue())); - break; - } - } - break; - case BinaryVector: - builder.dataType(VectorType.VECTOR_BINARY_TYPE); - for (KeyValuePair keyValuePair : fieldSchema.getTypeParamsList()) { - if (keyValuePair.getKey().equals("dim")) { - builder.scale(Integer.valueOf(keyValuePair.getValue())); - break; - } - } - break; - case SparseFloatVector: - builder.dataType(VectorType.VECTOR_SPARSE_FLOAT_TYPE); - break; - case Float16Vector: - builder.dataType(VectorType.VECTOR_FLOAT16_TYPE); - for (KeyValuePair keyValuePair : fieldSchema.getTypeParamsList()) { - if (keyValuePair.getKey().equals("dim")) { - builder.scale(Integer.valueOf(keyValuePair.getValue())); - break; - } - } - break; - case BFloat16Vector: - builder.dataType(VectorType.VECTOR_BFLOAT16_TYPE); - for (KeyValuePair keyValuePair : fieldSchema.getTypeParamsList()) { - if (keyValuePair.getKey().equals("dim")) { - builder.scale(Integer.valueOf(keyValuePair.getValue())); - break; - } - } - break; - default: - throw new UnsupportedOperationException("Unsupported data type: " + dataType); - } - - return builder.build(); - } - - public static Object convertBySeaTunnelType(SeaTunnelDataType fieldType, Object value) { - SqlType sqlType = fieldType.getSqlType(); - switch (sqlType) { - case INT: - return Integer.parseInt(value.toString()); - case BIGINT: - return Long.parseLong(value.toString()); - case SMALLINT: - return Short.parseShort(value.toString()); - case STRING: - case DATE: - return value.toString(); - case FLOAT_VECTOR: - ByteBuffer floatVectorBuffer = (ByteBuffer) value; - Float[] floats = BufferUtils.toFloatArray(floatVectorBuffer); - return Arrays.stream(floats).collect(Collectors.toList()); - case BINARY_VECTOR: - case BFLOAT16_VECTOR: - case FLOAT16_VECTOR: - ByteBuffer vector = (ByteBuffer) value; - return gson.toJsonTree(vector.array()); - case SPARSE_FLOAT_VECTOR: - return JsonParser.parseString(JacksonUtils.toJsonString(value)).getAsJsonObject(); - case FLOAT: - return Float.parseFloat(value.toString()); - case BOOLEAN: - return Boolean.parseBoolean(value.toString()); - case DOUBLE: - return Double.parseDouble(value.toString()); - case ARRAY: - ArrayType arrayType = (ArrayType) fieldType; - switch (arrayType.getElementType().getSqlType()) { - case STRING: - String[] stringArray = (String[]) value; - return Arrays.asList(stringArray); - case INT: - Integer[] intArray = (Integer[]) value; - return Arrays.asList(intArray); - case BIGINT: - Long[] longArray = (Long[]) value; - return Arrays.asList(longArray); - case FLOAT: - Float[] floatArray = (Float[]) value; - return Arrays.asList(floatArray); - case DOUBLE: - Double[] doubleArray = (Double[]) value; - return Arrays.asList(doubleArray); - } - case ROW: - SeaTunnelRow row = (SeaTunnelRow) value; - return JsonUtils.toJsonString(row.getFields()); - case MAP: - return JacksonUtils.toJsonString(value); - default: - throw new MilvusConnectorException( - MilvusConnectionErrorCode.NOT_SUPPORT_TYPE, sqlType.name()); - } - } - - public static DataType convertSqlTypeToDataType(SqlType sqlType) { - switch (sqlType) { - case BOOLEAN: - return DataType.Bool; - case TINYINT: - return DataType.Int8; - case SMALLINT: - return DataType.Int16; - case INT: - return DataType.Int32; - case BIGINT: - return DataType.Int64; - case FLOAT: - return DataType.Float; - case DOUBLE: - return DataType.Double; - case STRING: - return DataType.VarChar; - case ARRAY: - return DataType.Array; - case FLOAT_VECTOR: - return DataType.FloatVector; - case BINARY_VECTOR: - return DataType.BinaryVector; - case FLOAT16_VECTOR: - return DataType.Float16Vector; - case BFLOAT16_VECTOR: - return DataType.BFloat16Vector; - case SPARSE_FLOAT_VECTOR: - return DataType.SparseFloatVector; - case DATE: - return DataType.VarChar; - case ROW: - return DataType.VarChar; - } - throw new CatalogException( - String.format("Not support convert to milvus type, sqlType is %s", sqlType)); - } -} diff --git a/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/exception/MilvusConnectionErrorCode.java b/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/exception/MilvusConnectionErrorCode.java index 3acc3de804c..5aaee447ea6 100644 --- a/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/exception/MilvusConnectionErrorCode.java +++ b/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/exception/MilvusConnectionErrorCode.java @@ -35,7 +35,12 @@ public enum MilvusConnectionErrorCode implements SeaTunnelErrorCode { CREATE_DATABASE_ERROR("MILVUS-13", "Create database error"), CREATE_COLLECTION_ERROR("MILVUS-14", "Create collection error"), CREATE_INDEX_ERROR("MILVUS-15", "Create index error"), - ; + INIT_CLIENT_ERROR("MILVUS-16", "Init milvus client error"), + WRITE_DATA_FAIL("MILVUS-17", "Write milvus data fail"), + READ_DATA_FAIL("MILVUS-18", "Read milvus data fail"), + LIST_PARTITIONS_FAILED("MILVUS-19", "Failed to list milvus partition"), + SHOW_PARTITION_ERROR("MILVUS-20", "Desc partition error"), + CREATE_PARTITION_ERROR("MILVUS-21", "Create partition error"); private final String code; private final String description; diff --git a/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/sink/MilvusBufferBatchWriter.java b/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/sink/MilvusBufferBatchWriter.java new file mode 100644 index 00000000000..36949075f3e --- /dev/null +++ b/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/sink/MilvusBufferBatchWriter.java @@ -0,0 +1,349 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.seatunnel.milvus.sink; + +import org.apache.seatunnel.api.configuration.ReadonlyConfig; +import org.apache.seatunnel.api.table.catalog.CatalogTable; +import org.apache.seatunnel.api.table.catalog.PrimaryKey; +import org.apache.seatunnel.api.table.type.CommonOptions; +import org.apache.seatunnel.api.table.type.SeaTunnelRow; +import org.apache.seatunnel.common.utils.SeaTunnelException; +import org.apache.seatunnel.connectors.seatunnel.milvus.exception.MilvusConnectionErrorCode; +import org.apache.seatunnel.connectors.seatunnel.milvus.exception.MilvusConnectorException; +import org.apache.seatunnel.connectors.seatunnel.milvus.utils.MilvusConnectorUtils; +import org.apache.seatunnel.connectors.seatunnel.milvus.utils.sink.MilvusSinkConverter; + +import org.apache.commons.lang3.StringUtils; + +import com.google.gson.JsonObject; +import io.milvus.v2.client.ConnectConfig; +import io.milvus.v2.client.MilvusClientV2; +import io.milvus.v2.common.IndexParam; +import io.milvus.v2.service.collection.request.AlterCollectionReq; +import io.milvus.v2.service.collection.request.DescribeCollectionReq; +import io.milvus.v2.service.collection.request.GetLoadStateReq; +import io.milvus.v2.service.collection.request.LoadCollectionReq; +import io.milvus.v2.service.collection.response.DescribeCollectionResp; +import io.milvus.v2.service.index.request.CreateIndexReq; +import io.milvus.v2.service.partition.request.CreatePartitionReq; +import io.milvus.v2.service.partition.request.HasPartitionReq; +import io.milvus.v2.service.vector.request.InsertReq; +import io.milvus.v2.service.vector.request.UpsertReq; +import lombok.extern.slf4j.Slf4j; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.atomic.AtomicLong; + +import static org.apache.seatunnel.connectors.seatunnel.milvus.config.MilvusSinkConfig.BATCH_SIZE; +import static org.apache.seatunnel.connectors.seatunnel.milvus.config.MilvusSinkConfig.CREATE_INDEX; +import static org.apache.seatunnel.connectors.seatunnel.milvus.config.MilvusSinkConfig.ENABLE_AUTO_ID; +import static org.apache.seatunnel.connectors.seatunnel.milvus.config.MilvusSinkConfig.ENABLE_UPSERT; +import static org.apache.seatunnel.connectors.seatunnel.milvus.config.MilvusSinkConfig.LOAD_COLLECTION; +import static org.apache.seatunnel.connectors.seatunnel.milvus.config.MilvusSinkConfig.RATE_LIMIT; +import static org.apache.seatunnel.connectors.seatunnel.milvus.config.MilvusSinkConfig.TOKEN; +import static org.apache.seatunnel.connectors.seatunnel.milvus.config.MilvusSinkConfig.URL; + +@Slf4j +public class MilvusBufferBatchWriter { + + private final CatalogTable catalogTable; + private final ReadonlyConfig config; + private final String collectionName; + private final Boolean autoId; + private final Boolean enableUpsert; + private Boolean hasPartitionKey; + + private MilvusClientV2 milvusClient; + private final MilvusSinkConverter milvusSinkConverter; + private int batchSize; + private volatile Map> milvusDataCache; + private final AtomicLong writeCache = new AtomicLong(); + private final AtomicLong writeCount = new AtomicLong(); + + private final List jsonFieldNames; + private final String dynamicFieldName; + + public MilvusBufferBatchWriter(CatalogTable catalogTable, ReadonlyConfig config) + throws SeaTunnelException { + this.catalogTable = catalogTable; + this.config = config; + this.autoId = + getAutoId( + catalogTable.getTableSchema().getPrimaryKey(), config.get(ENABLE_AUTO_ID)); + this.enableUpsert = config.get(ENABLE_UPSERT); + this.batchSize = config.get(BATCH_SIZE); + this.collectionName = catalogTable.getTablePath().getTableName(); + this.milvusDataCache = new HashMap<>(); + this.milvusSinkConverter = new MilvusSinkConverter(); + + this.dynamicFieldName = MilvusConnectorUtils.getDynamicField(catalogTable); + this.jsonFieldNames = MilvusConnectorUtils.getJsonField(catalogTable); + + initMilvusClient(config); + } + /* + * set up the Milvus client + */ + private void initMilvusClient(ReadonlyConfig config) throws SeaTunnelException { + try { + log.info("begin to init Milvus client"); + String dbName = catalogTable.getTablePath().getDatabaseName(); + String collectionName = catalogTable.getTablePath().getTableName(); + + ConnectConfig connectConfig = + ConnectConfig.builder().uri(config.get(URL)).token(config.get(TOKEN)).build(); + this.milvusClient = new MilvusClientV2(connectConfig); + if (StringUtils.isNotEmpty(dbName)) { + milvusClient.useDatabase(dbName); + } + this.hasPartitionKey = + MilvusConnectorUtils.hasPartitionKey(milvusClient, collectionName); + // set rate limit + if (config.get(RATE_LIMIT) > 0) { + log.info("set rate limit for collection: " + collectionName); + Map properties = new HashMap<>(); + properties.put("collection.insertRate.max.mb", config.get(RATE_LIMIT).toString()); + properties.put("collection.upsertRate.max.mb", config.get(RATE_LIMIT).toString()); + AlterCollectionReq alterCollectionReq = + AlterCollectionReq.builder() + .collectionName(collectionName) + .properties(properties) + .build(); + milvusClient.alterCollection(alterCollectionReq); + } + try { + if (config.get(CREATE_INDEX)) { + // create index + log.info("create index for collection: " + collectionName); + DescribeCollectionResp describeCollectionResp = + milvusClient.describeCollection( + DescribeCollectionReq.builder() + .collectionName(collectionName) + .build()); + List indexParams = new ArrayList<>(); + for (String fieldName : describeCollectionResp.getVectorFieldNames()) { + IndexParam indexParam = + IndexParam.builder() + .fieldName(fieldName) + .metricType(IndexParam.MetricType.COSINE) + .build(); + indexParams.add(indexParam); + } + CreateIndexReq createIndexReq = + CreateIndexReq.builder() + .collectionName(collectionName) + .indexParams(indexParams) + .build(); + milvusClient.createIndex(createIndexReq); + } + } catch (Exception e) { + log.warn("create index failed, maybe index already exists"); + } + if (config.get(LOAD_COLLECTION) + && !milvusClient.getLoadState( + GetLoadStateReq.builder().collectionName(collectionName).build())) { + log.info("load collection: " + collectionName); + milvusClient.loadCollection( + LoadCollectionReq.builder().collectionName(collectionName).build()); + } + log.info("init Milvus client success"); + } catch (Exception e) { + log.error("init Milvus client failed", e); + throw new MilvusConnectorException(MilvusConnectionErrorCode.INIT_CLIENT_ERROR, e); + } + } + + private Boolean getAutoId(PrimaryKey primaryKey, Boolean enableAutoId) { + if (null != primaryKey && null != primaryKey.getEnableAutoId()) { + return primaryKey.getEnableAutoId(); + } else { + return enableAutoId; + } + } + + public void addToBatch(SeaTunnelRow element) { + // put data to cache by partition + if (element.getOptions().containsKey(CommonOptions.PARTITION.getName())) { + String partitionName = + element.getOptions().get(CommonOptions.PARTITION.getName()).toString(); + if (!milvusDataCache.containsKey(partitionName)) { + Boolean hasPartition = + milvusClient.hasPartition( + HasPartitionReq.builder() + .collectionName(collectionName) + .partitionName(partitionName) + .build()); + if (!hasPartition) { + log.info("create partition: " + partitionName); + CreatePartitionReq createPartitionReq = + CreatePartitionReq.builder() + .collectionName(collectionName) + .partitionName(partitionName) + .build(); + milvusClient.createPartition(createPartitionReq); + log.info("create partition success"); + } + } + } + JsonObject data = + milvusSinkConverter.buildMilvusData( + catalogTable, config, jsonFieldNames, dynamicFieldName, element); + String partitionName = + element.getOptions() + .getOrDefault(CommonOptions.PARTITION.getName(), "_default") + .toString(); + this.milvusDataCache.computeIfAbsent(partitionName, k -> new ArrayList<>()); + milvusDataCache.get(partitionName).add(data); + writeCache.incrementAndGet(); + } + + public boolean needFlush() { + return this.writeCache.get() >= this.batchSize; + } + + public void flush() throws Exception { + log.info("Starting to put {} records to Milvus.", this.writeCache.get()); + // Flush the batch writer + // Get the number of records completed + if (this.milvusDataCache.isEmpty()) { + return; + } + writeData2Collection(); + log.info( + "Successfully put {} records to Milvus. Total records written: {}", + this.writeCache.get(), + this.writeCount.get()); + this.milvusDataCache = new HashMap<>(); + this.writeCache.set(0L); + } + + public void close() throws Exception { + String collectionName = catalogTable.getTablePath().getTableName(); + // set rate limit + Map properties = new HashMap<>(); + properties.put("collection.insertRate.max.mb", "-1"); + properties.put("collection.upsertRate.max.mb", "-1"); + AlterCollectionReq alterCollectionReq = + AlterCollectionReq.builder() + .collectionName(collectionName) + .properties(properties) + .build(); + milvusClient.alterCollection(alterCollectionReq); + this.milvusClient.close(10); + } + + private void writeData2Collection() throws Exception { + try { + for (String partitionName : milvusDataCache.keySet()) { + // default to use upsertReq, but upsert only works when autoID is disabled + List data = milvusDataCache.get(partitionName); + if (Objects.equals(partitionName, "_default") || hasPartitionKey) { + partitionName = null; + } + if (enableUpsert && !autoId) { + upsertWrite(partitionName, data); + } else { + insertWrite(partitionName, data); + } + } + } catch (Exception e) { + log.error("write data to Milvus failed", e); + log.error("error data: " + milvusDataCache); + throw new MilvusConnectorException(MilvusConnectionErrorCode.WRITE_DATA_FAIL); + } + writeCount.addAndGet(this.writeCache.get()); + } + + private void upsertWrite(String partitionName, List data) + throws InterruptedException { + UpsertReq upsertReq = + UpsertReq.builder().collectionName(this.collectionName).data(data).build(); + if (StringUtils.isNotEmpty(partitionName)) { + upsertReq.setPartitionName(partitionName); + } + try { + milvusClient.upsert(upsertReq); + } catch (Exception e) { + if (e.getMessage().contains("rate limit exceeded") + || e.getMessage().contains("received message larger than max")) { + if (data.size() > 10) { + log.warn("upsert data failed, retry in smaller chunks: {} ", data.size() / 2); + this.batchSize = this.batchSize / 2; + log.info("sleep 1 minute to avoid rate limit"); + // sleep 1 minute to avoid rate limit + Thread.sleep(60000); + log.info("sleep 1 minute success"); + // Split the data and retry in smaller chunks + List firstHalf = data.subList(0, data.size() / 2); + List secondHalf = data.subList(data.size() / 2, data.size()); + upsertWrite(partitionName, firstHalf); + upsertWrite(partitionName, secondHalf); + } else { + // If the data size is 10, throw the exception to avoid infinite recursion + throw new MilvusConnectorException( + MilvusConnectionErrorCode.WRITE_DATA_FAIL, + "upsert data failed," + " size down to 10, break", + e); + } + } else { + throw new MilvusConnectorException( + MilvusConnectionErrorCode.WRITE_DATA_FAIL, + "upsert data failed with unknown exception", + e); + } + } + log.info("upsert data success"); + } + + private void insertWrite(String partitionName, List data) { + InsertReq insertReq = + InsertReq.builder().collectionName(this.collectionName).data(data).build(); + if (StringUtils.isNotEmpty(partitionName)) { + insertReq.setPartitionName(partitionName); + } + try { + milvusClient.insert(insertReq); + } catch (Exception e) { + if (e.getMessage().contains("rate limit exceeded") + || e.getMessage().contains("received message larger than max")) { + if (data.size() > 10) { + log.warn("insert data failed, retry in smaller chunks: {} ", data.size() / 2); + // Split the data and retry in smaller chunks + List firstHalf = data.subList(0, data.size() / 2); + List secondHalf = data.subList(data.size() / 2, data.size()); + this.batchSize = this.batchSize / 2; + insertWrite(partitionName, firstHalf); + insertWrite(partitionName, secondHalf); + } else { + // If the data size is 10, throw the exception to avoid infinite recursion + throw new MilvusConnectorException( + MilvusConnectionErrorCode.WRITE_DATA_FAIL, "insert data failed", e); + } + } else { + throw new MilvusConnectorException( + MilvusConnectionErrorCode.WRITE_DATA_FAIL, + "insert data failed with unknown exception", + e); + } + } + } +} diff --git a/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/sink/MilvusSink.java b/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/sink/MilvusSink.java index 10f4b6ca69d..9167d806df1 100644 --- a/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/sink/MilvusSink.java +++ b/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/sink/MilvusSink.java @@ -38,10 +38,13 @@ import org.apache.seatunnel.connectors.seatunnel.milvus.state.MilvusCommitInfo; import org.apache.seatunnel.connectors.seatunnel.milvus.state.MilvusSinkState; +import lombok.extern.slf4j.Slf4j; + import java.util.Collections; import java.util.List; import java.util.Optional; +@Slf4j public class MilvusSink implements SeaTunnelSink< SeaTunnelRow, @@ -61,7 +64,6 @@ public MilvusSink(ReadonlyConfig config, CatalogTable catalogTable) { @Override public SinkWriter createWriter( SinkWriter.Context context) { - return new MilvusSinkWriter(context, catalogTable, config, Collections.emptyList()); } diff --git a/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/sink/MilvusSinkWriter.java b/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/sink/MilvusSinkWriter.java index 8fee6ebc68f..98b2b46c3b4 100644 --- a/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/sink/MilvusSinkWriter.java +++ b/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/sink/MilvusSinkWriter.java @@ -21,74 +21,53 @@ import org.apache.seatunnel.api.sink.SinkCommitter; import org.apache.seatunnel.api.sink.SinkWriter; import org.apache.seatunnel.api.table.catalog.CatalogTable; -import org.apache.seatunnel.api.table.catalog.PrimaryKey; import org.apache.seatunnel.api.table.type.SeaTunnelRow; -import org.apache.seatunnel.connectors.seatunnel.milvus.config.MilvusSinkConfig; -import org.apache.seatunnel.connectors.seatunnel.milvus.sink.batch.MilvusBatchWriter; -import org.apache.seatunnel.connectors.seatunnel.milvus.sink.batch.MilvusBufferBatchWriter; +import org.apache.seatunnel.connectors.seatunnel.milvus.exception.MilvusConnectionErrorCode; +import org.apache.seatunnel.connectors.seatunnel.milvus.exception.MilvusConnectorException; import org.apache.seatunnel.connectors.seatunnel.milvus.state.MilvusCommitInfo; import org.apache.seatunnel.connectors.seatunnel.milvus.state.MilvusSinkState; -import io.milvus.v2.client.ConnectConfig; -import io.milvus.v2.client.MilvusClientV2; import lombok.extern.slf4j.Slf4j; import java.io.IOException; import java.util.List; import java.util.Optional; -import static org.apache.seatunnel.connectors.seatunnel.milvus.config.MilvusSinkConfig.BATCH_SIZE; - -@Slf4j /** MilvusSinkWriter is a sink writer that will write {@link SeaTunnelRow} to Milvus. */ +@Slf4j public class MilvusSinkWriter implements SinkWriter { - private final Context context; - private final ReadonlyConfig config; - private MilvusBatchWriter batchWriter; + private final MilvusBufferBatchWriter batchWriter; + private ReadonlyConfig config; public MilvusSinkWriter( Context context, CatalogTable catalogTable, ReadonlyConfig config, List milvusSinkStates) { - this.context = context; + this.batchWriter = new MilvusBufferBatchWriter(catalogTable, config); this.config = config; - ConnectConfig connectConfig = - ConnectConfig.builder() - .uri(config.get(MilvusSinkConfig.URL)) - .token(config.get(MilvusSinkConfig.TOKEN)) - .dbName(config.get(MilvusSinkConfig.DATABASE)) - .build(); - this.batchWriter = - new MilvusBufferBatchWriter( - catalogTable, - config.get(BATCH_SIZE), - getAutoId(catalogTable.getTableSchema().getPrimaryKey()), - config.get(MilvusSinkConfig.ENABLE_UPSERT), - new MilvusClientV2(connectConfig)); + log.info("create Milvus sink writer success"); + log.info("MilvusSinkWriter config: " + config); } /** * write data to third party data receiver. * * @param element the data need be written. - * @throws IOException throw IOException when write data failed. */ @Override public void write(SeaTunnelRow element) { batchWriter.addToBatch(element); if (batchWriter.needFlush()) { - batchWriter.flush(); - } - } - - private Boolean getAutoId(PrimaryKey primaryKey) { - if (null != primaryKey && null != primaryKey.getEnableAutoId()) { - return primaryKey.getEnableAutoId(); - } else { - return config.get(MilvusSinkConfig.ENABLE_AUTO_ID); + try { + // Flush the batch writer + batchWriter.flush(); + } catch (Exception e) { + log.error("flush Milvus sink writer failed", e); + throw new MilvusConnectorException(MilvusConnectionErrorCode.WRITE_DATA_FAIL, e); + } } } @@ -102,7 +81,6 @@ private Boolean getAutoId(PrimaryKey primaryKey) { */ @Override public Optional prepareCommit() throws IOException { - batchWriter.flush(); return Optional.empty(); } @@ -122,9 +100,14 @@ public void abortPrepare() {} */ @Override public void close() throws IOException { - if (batchWriter != null) { + try { + log.info("Stopping Milvus Client"); batchWriter.flush(); batchWriter.close(); + log.info("Stop Milvus Client success"); + } catch (Exception e) { + log.error("Stop Milvus Client failed", e); + throw new MilvusConnectorException(MilvusConnectionErrorCode.CLOSE_CLIENT_ERROR, e); } } } diff --git a/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/sink/batch/MilvusBufferBatchWriter.java b/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/sink/batch/MilvusBufferBatchWriter.java deleted file mode 100644 index 46f4e7ce7c7..00000000000 --- a/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/sink/batch/MilvusBufferBatchWriter.java +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.seatunnel.connectors.seatunnel.milvus.sink.batch; - -import org.apache.seatunnel.api.table.catalog.CatalogTable; -import org.apache.seatunnel.api.table.catalog.PrimaryKey; -import org.apache.seatunnel.api.table.type.SeaTunnelDataType; -import org.apache.seatunnel.api.table.type.SeaTunnelRow; -import org.apache.seatunnel.api.table.type.SeaTunnelRowType; -import org.apache.seatunnel.common.utils.SeaTunnelException; -import org.apache.seatunnel.connectors.seatunnel.milvus.convert.MilvusConvertUtils; -import org.apache.seatunnel.connectors.seatunnel.milvus.exception.MilvusConnectionErrorCode; -import org.apache.seatunnel.connectors.seatunnel.milvus.exception.MilvusConnectorException; - -import org.apache.commons.collections4.CollectionUtils; - -import com.google.gson.Gson; -import com.google.gson.JsonObject; -import io.milvus.v2.client.MilvusClientV2; -import io.milvus.v2.service.vector.request.InsertReq; -import io.milvus.v2.service.vector.request.UpsertReq; - -import java.util.ArrayList; -import java.util.List; - -import static org.apache.seatunnel.api.table.catalog.PrimaryKey.isPrimaryKeyField; - -public class MilvusBufferBatchWriter implements MilvusBatchWriter { - - private final int batchSize; - private final CatalogTable catalogTable; - private final Boolean autoId; - private final Boolean enableUpsert; - private final String collectionName; - private MilvusClientV2 milvusClient; - - private volatile List milvusDataCache; - private volatile int writeCount = 0; - private static final Gson GSON = new Gson(); - - public MilvusBufferBatchWriter( - CatalogTable catalogTable, - Integer batchSize, - Boolean autoId, - Boolean enableUpsert, - MilvusClientV2 milvusClient) { - this.catalogTable = catalogTable; - this.autoId = autoId; - this.enableUpsert = enableUpsert; - this.milvusClient = milvusClient; - this.collectionName = catalogTable.getTablePath().getTableName(); - this.batchSize = batchSize; - this.milvusDataCache = new ArrayList<>(batchSize); - } - - @Override - public void addToBatch(SeaTunnelRow element) { - JsonObject data = buildMilvusData(element); - milvusDataCache.add(data); - writeCount++; - } - - @Override - public boolean needFlush() { - return this.writeCount >= this.batchSize; - } - - @Override - public synchronized boolean flush() { - if (CollectionUtils.isEmpty(this.milvusDataCache)) { - return true; - } - writeData2Collection(); - this.milvusDataCache = new ArrayList<>(this.batchSize); - this.writeCount = 0; - return true; - } - - @Override - public void close() { - try { - this.milvusClient.close(10); - } catch (InterruptedException e) { - throw new SeaTunnelException(e); - } - } - - private JsonObject buildMilvusData(SeaTunnelRow element) { - SeaTunnelRowType seaTunnelRowType = catalogTable.getSeaTunnelRowType(); - PrimaryKey primaryKey = catalogTable.getTableSchema().getPrimaryKey(); - - JsonObject data = new JsonObject(); - for (int i = 0; i < seaTunnelRowType.getFieldNames().length; i++) { - String fieldName = seaTunnelRowType.getFieldNames()[i]; - - if (autoId && isPrimaryKeyField(primaryKey, fieldName)) { - continue; // if create table open AutoId, then don't need insert data with - // primaryKey field. - } - - SeaTunnelDataType fieldType = seaTunnelRowType.getFieldType(i); - Object value = element.getField(i); - if (null == value) { - throw new MilvusConnectorException( - MilvusConnectionErrorCode.FIELD_IS_NULL, fieldName); - } - - data.add( - fieldName, - GSON.toJsonTree(MilvusConvertUtils.convertBySeaTunnelType(fieldType, value))); - } - return data; - } - - private void writeData2Collection() { - // default to use upsertReq, but upsert only works when autoID is disabled - if (enableUpsert && !autoId) { - UpsertReq upsertReq = - UpsertReq.builder() - .collectionName(this.collectionName) - .data(this.milvusDataCache) - .build(); - milvusClient.upsert(upsertReq); - } else { - InsertReq insertReq = - InsertReq.builder() - .collectionName(this.collectionName) - .data(this.milvusDataCache) - .build(); - milvusClient.insert(insertReq); - } - } -} diff --git a/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/source/MilvusSource.java b/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/source/MilvusSource.java index 76ccfb743e5..abb7e9c898d 100644 --- a/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/source/MilvusSource.java +++ b/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/source/MilvusSource.java @@ -28,7 +28,7 @@ import org.apache.seatunnel.api.table.catalog.TablePath; import org.apache.seatunnel.api.table.type.SeaTunnelRow; import org.apache.seatunnel.connectors.seatunnel.milvus.config.MilvusSourceConfig; -import org.apache.seatunnel.connectors.seatunnel.milvus.convert.MilvusConvertUtils; +import org.apache.seatunnel.connectors.seatunnel.milvus.utils.MilvusConvertUtils; import java.util.ArrayList; import java.util.List; @@ -42,9 +42,10 @@ public class MilvusSource private final ReadonlyConfig config; private final Map sourceTables; - public MilvusSource(ReadonlyConfig sourceConfig) { - this.config = sourceConfig; - this.sourceTables = MilvusConvertUtils.getSourceTables(config); + public MilvusSource(ReadonlyConfig sourceConfing) { + this.config = sourceConfing; + MilvusConvertUtils milvusConvertUtils = new MilvusConvertUtils(sourceConfing); + this.sourceTables = milvusConvertUtils.getSourceTables(); } @Override diff --git a/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/source/MilvusSourceReader.java b/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/source/MilvusSourceReader.java index 7464c652b31..cd8b0261248 100644 --- a/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/source/MilvusSourceReader.java +++ b/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/source/MilvusSourceReader.java @@ -24,44 +24,51 @@ import org.apache.seatunnel.api.table.catalog.CatalogTable; import org.apache.seatunnel.api.table.catalog.TablePath; import org.apache.seatunnel.api.table.catalog.TableSchema; -import org.apache.seatunnel.api.table.type.RowKind; -import org.apache.seatunnel.api.table.type.SeaTunnelDataType; +import org.apache.seatunnel.api.table.type.CommonOptions; import org.apache.seatunnel.api.table.type.SeaTunnelRow; -import org.apache.seatunnel.api.table.type.SeaTunnelRowType; -import org.apache.seatunnel.common.exception.CommonErrorCode; -import org.apache.seatunnel.common.utils.BufferUtils; import org.apache.seatunnel.connectors.seatunnel.milvus.config.MilvusSourceConfig; import org.apache.seatunnel.connectors.seatunnel.milvus.exception.MilvusConnectionErrorCode; import org.apache.seatunnel.connectors.seatunnel.milvus.exception.MilvusConnectorException; +import org.apache.seatunnel.connectors.seatunnel.milvus.utils.source.MilvusSourceConverter; import org.apache.curator.shaded.com.google.common.collect.Lists; +import org.codehaus.plexus.util.StringUtils; + import io.milvus.client.MilvusServiceClient; import io.milvus.grpc.GetLoadStateResponse; import io.milvus.grpc.LoadState; +import io.milvus.grpc.QueryResults; import io.milvus.orm.iterator.QueryIterator; import io.milvus.param.ConnectParam; import io.milvus.param.R; +import io.milvus.param.RpcStatus; +import io.milvus.param.collection.AlterCollectionParam; import io.milvus.param.collection.GetLoadStateParam; import io.milvus.param.dml.QueryIteratorParam; +import io.milvus.param.dml.QueryParam; import io.milvus.response.QueryResultsWrapper; import lombok.extern.slf4j.Slf4j; import java.io.IOException; -import java.nio.ByteBuffer; import java.util.ArrayList; +import java.util.Collections; import java.util.Deque; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentLinkedDeque; +import static org.apache.seatunnel.connectors.seatunnel.milvus.config.MilvusSourceConfig.BATCH_SIZE; +import static org.apache.seatunnel.connectors.seatunnel.milvus.config.MilvusSourceConfig.RATE_LIMIT; + @Slf4j public class MilvusSourceReader implements SourceReader { private final Deque pendingSplits = new ConcurrentLinkedDeque<>(); private final ReadonlyConfig config; private final Context context; - private Map sourceTables; + private final Map sourceTables; private MilvusServiceClient client; @@ -84,11 +91,36 @@ public void open() throws Exception { .withUri(config.get(MilvusSourceConfig.URL)) .withToken(config.get(MilvusSourceConfig.TOKEN)) .build()); + setRateLimit(config.get(RATE_LIMIT).toString()); + } + + private void setRateLimit(String rateLimit) { + log.info("Set rate limit: " + rateLimit); + for (Map.Entry entry : sourceTables.entrySet()) { + TablePath tablePath = entry.getKey(); + String collectionName = tablePath.getTableName(); + + AlterCollectionParam alterCollectionParam = + AlterCollectionParam.newBuilder() + .withDatabaseName(tablePath.getDatabaseName()) + .withCollectionName(collectionName) + .withProperty("collection.queryRate.max.qps", rateLimit) + .build(); + R response = client.alterCollection(alterCollectionParam); + if (response.getStatus() != R.Status.Success.getCode()) { + throw new MilvusConnectorException( + MilvusConnectionErrorCode.SERVER_RESPONSE_FAILED, response.getException()); + } + } + log.info("Set rate limit success"); } @Override public void close() throws IOException { + log.info("Close milvus source reader"); + setRateLimit("-1"); client.close(); + log.info("Close milvus source reader success"); } @Override @@ -96,7 +128,13 @@ public void pollNext(Collector output) throws Exception { synchronized (output.getCheckpointLock()) { MilvusSourceSplit split = pendingSplits.poll(); if (null != split) { - handleEveryRowInternal(split, output); + try { + log.info("Begin to read data from split: " + split); + pollNextData(split, output); + } catch (Exception e) { + log.error("Read data from split: " + split + " failed", e); + throw new MilvusConnectorException(MilvusConnectionErrorCode.READ_DATA_FAIL, e); + } } else { if (!noMoreSplit) { log.info("Milvus source wait split!"); @@ -113,9 +151,12 @@ public void pollNext(Collector output) throws Exception { Thread.sleep(1000L); } - private void handleEveryRowInternal(MilvusSourceSplit split, Collector output) { + private void pollNextData(MilvusSourceSplit split, Collector output) + throws InterruptedException { TablePath tablePath = split.getTablePath(); + String partitionName = split.getPartitionName(); TableSchema tableSchema = sourceTables.get(tablePath).getTableSchema(); + log.info("begin to read data from milvus, table schema: " + tableSchema); if (null == tableSchema) { throw new MilvusConnectorException( MilvusConnectionErrorCode.SOURCE_TABLE_SCHEMA_IS_NULL); @@ -136,129 +177,117 @@ private void handleEveryRowInternal(MilvusSourceSplit split, Collector response = client.queryIterator(param); - if (response.getStatus() != R.Status.Success.getCode()) { + R queryResultsR = client.query(queryParam.build()); + + if (queryResultsR.getStatus() != R.Status.Success.getCode()) { throw new MilvusConnectorException( MilvusConnectionErrorCode.SERVER_RESPONSE_FAILED, loadStateResponse.getException()); } + QueryResultsWrapper wrapper = new QueryResultsWrapper(queryResultsR.getData()); + List records = wrapper.getRowRecords(); + log.info("Total records num: " + records.get(0).getFieldValues().get("count(*)")); - QueryIterator iterator = response.getData(); - while (true) { - List next = iterator.next(); - if (next == null || next.isEmpty()) { - break; - } else { - for (QueryResultsWrapper.RowRecord record : next) { - SeaTunnelRow seaTunnelRow = - convertToSeaTunnelRow(record, tableSchema, tablePath); - output.collect(seaTunnelRow); - } - } - } + long batchSize = (long) config.get(BATCH_SIZE); + queryIteratorData(tablePath, partitionName, tableSchema, output, batchSize); } - public SeaTunnelRow convertToSeaTunnelRow( - QueryResultsWrapper.RowRecord record, TableSchema tableSchema, TablePath tablePath) { - SeaTunnelRowType typeInfo = tableSchema.toPhysicalRowDataType(); - Object[] fields = new Object[record.getFieldValues().size()]; - Map fieldValuesMap = record.getFieldValues(); - String[] fieldNames = typeInfo.getFieldNames(); - for (int fieldIndex = 0; fieldIndex < typeInfo.getTotalFields(); fieldIndex++) { - SeaTunnelDataType seaTunnelDataType = typeInfo.getFieldType(fieldIndex); - Object filedValues = fieldValuesMap.get(fieldNames[fieldIndex]); - switch (seaTunnelDataType.getSqlType()) { - case STRING: - fields[fieldIndex] = filedValues.toString(); - break; - case BOOLEAN: - if (filedValues instanceof Boolean) { - fields[fieldIndex] = filedValues; - } else { - fields[fieldIndex] = Boolean.valueOf(filedValues.toString()); - } - break; - case INT: - if (filedValues instanceof Integer) { - fields[fieldIndex] = filedValues; - } else { - fields[fieldIndex] = Integer.valueOf(filedValues.toString()); - } - break; - case BIGINT: - if (filedValues instanceof Long) { - fields[fieldIndex] = filedValues; - } else { - fields[fieldIndex] = Long.parseLong(filedValues.toString()); - } - break; - case FLOAT: - if (filedValues instanceof Float) { - fields[fieldIndex] = filedValues; - } else { - fields[fieldIndex] = Float.parseFloat(filedValues.toString()); - } - break; - case DOUBLE: - if (filedValues instanceof Double) { - fields[fieldIndex] = filedValues; - } else { - fields[fieldIndex] = Double.parseDouble(filedValues.toString()); - } - break; - case FLOAT_VECTOR: - if (filedValues instanceof List) { - List list = (List) filedValues; - Float[] arrays = new Float[list.size()]; - for (int i = 0; i < list.size(); i++) { - arrays[i] = Float.parseFloat(list.get(i).toString()); - } - fields[fieldIndex] = BufferUtils.toByteBuffer(arrays); - break; - } else { - throw new MilvusConnectorException( - CommonErrorCode.UNSUPPORTED_DATA_TYPE, - "Unexpected vector value: " + filedValues); - } - case BINARY_VECTOR: - case FLOAT16_VECTOR: - case BFLOAT16_VECTOR: - if (filedValues instanceof ByteBuffer) { - fields[fieldIndex] = filedValues; + private void queryIteratorData( + TablePath tablePath, + String partitionName, + TableSchema tableSchema, + Collector output, + long batchSize) + throws InterruptedException { + try { + MilvusSourceConverter sourceConverter = new MilvusSourceConverter(tableSchema); + + QueryIteratorParam.Builder param = + QueryIteratorParam.newBuilder() + .withDatabaseName(tablePath.getDatabaseName()) + .withCollectionName(tablePath.getTableName()) + .withOutFields(Lists.newArrayList("*")) + .withBatchSize(batchSize); + + if (StringUtils.isNotEmpty(partitionName)) { + param.withPartitionNames(Collections.singletonList(partitionName)); + } + + R response = client.queryIterator(param.build()); + if (response.getStatus() != R.Status.Success.getCode()) { + throw new MilvusConnectorException( + MilvusConnectionErrorCode.SERVER_RESPONSE_FAILED, response.getException()); + } + int maxFailRetry = 3; + QueryIterator iterator = response.getData(); + while (maxFailRetry > 0) { + try { + List next = iterator.next(); + if (next == null || next.isEmpty()) { break; } else { - throw new MilvusConnectorException( - CommonErrorCode.UNSUPPORTED_DATA_TYPE, - "Unexpected vector value: " + filedValues); + for (QueryResultsWrapper.RowRecord record : next) { + SeaTunnelRow seaTunnelRow = + sourceConverter.convertToSeaTunnelRow( + record, tableSchema, tablePath); + if (StringUtils.isNotEmpty(partitionName)) { + Map options = new HashMap<>(); + options.put(CommonOptions.PARTITION.getName(), partitionName); + seaTunnelRow.setOptions(options); + } + output.collect(seaTunnelRow); + } } - case SPARSE_FLOAT_VECTOR: - if (filedValues instanceof Map) { - fields[fieldIndex] = filedValues; - break; + } catch (Exception e) { + if (e.getMessage().contains("rate limit exceeded")) { + // for rateLimit, we can try iterator again after 30s, no need to update + // batch size directly + maxFailRetry--; + if (maxFailRetry == 0) { + log.error( + "Iterate next data from milvus failed, batchSize = {}, throw exception", + batchSize, + e); + throw new MilvusConnectorException( + MilvusConnectionErrorCode.READ_DATA_FAIL, e); + } + log.error( + "Iterate next data from milvus failed, batchSize = {}, will retry after 30 s, maxRetry: {}", + batchSize, + maxFailRetry, + e); + Thread.sleep(30000); } else { + // if this error, we need to reduce batch size and try again, so throw + // exception here throw new MilvusConnectorException( - CommonErrorCode.UNSUPPORTED_DATA_TYPE, - "Unexpected vector value: " + filedValues); + MilvusConnectionErrorCode.READ_DATA_FAIL, e); } - default: - throw new MilvusConnectorException( - CommonErrorCode.UNSUPPORTED_DATA_TYPE, - "Unexpected value: " + seaTunnelDataType.getSqlType().name()); + } + } + } catch (Exception e) { + if (e.getMessage().contains("rate limit exceeded") && batchSize > 10) { + log.error( + "Query Iterate data from milvus failed, retry from beginning with smaller batch size: {} after 30 s", + batchSize / 2, + e); + Thread.sleep(30000); + queryIteratorData(tablePath, partitionName, tableSchema, output, batchSize / 2); + } else { + throw new MilvusConnectorException(MilvusConnectionErrorCode.READ_DATA_FAIL, e); } } - - SeaTunnelRow seaTunnelRow = new SeaTunnelRow(fields); - seaTunnelRow.setTableId(tablePath.getFullName()); - seaTunnelRow.setRowKind(RowKind.INSERT); - return seaTunnelRow; } @Override @@ -268,7 +297,7 @@ public List snapshotState(long checkpointId) throws Exception @Override public void addSplits(List splits) { - log.info("Adding milvus splits to reader: {}", splits); + log.info("Adding milvus splits to reader: " + splits); pendingSplits.addAll(splits); } diff --git a/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/source/MilvusSourceSplit.java b/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/source/MilvusSourceSplit.java index e79d74b6dc0..d448242d9aa 100644 --- a/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/source/MilvusSourceSplit.java +++ b/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/source/MilvusSourceSplit.java @@ -29,6 +29,7 @@ public class MilvusSourceSplit implements SourceSplit { private TablePath tablePath; private String splitId; + private String partitionName; @Override public String splitId() { diff --git a/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/source/MilvusSourceSplitEnumertor.java b/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/source/MilvusSourceSplitEnumertor.java index e01e9c8ad5d..1c181baffc1 100644 --- a/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/source/MilvusSourceSplitEnumertor.java +++ b/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/source/MilvusSourceSplitEnumertor.java @@ -22,8 +22,19 @@ import org.apache.seatunnel.api.table.catalog.CatalogTable; import org.apache.seatunnel.api.table.catalog.TablePath; import org.apache.seatunnel.common.exception.CommonErrorCodeDeprecated; +import org.apache.seatunnel.connectors.seatunnel.milvus.config.MilvusSourceConfig; +import org.apache.seatunnel.connectors.seatunnel.milvus.exception.MilvusConnectionErrorCode; import org.apache.seatunnel.connectors.seatunnel.milvus.exception.MilvusConnectorException; +import io.milvus.client.MilvusClient; +import io.milvus.client.MilvusServiceClient; +import io.milvus.grpc.DescribeCollectionResponse; +import io.milvus.grpc.FieldSchema; +import io.milvus.grpc.ShowPartitionsResponse; +import io.milvus.param.ConnectParam; +import io.milvus.param.R; +import io.milvus.param.collection.DescribeCollectionParam; +import io.milvus.param.partition.ShowPartitionsParam; import lombok.extern.slf4j.Slf4j; import java.io.IOException; @@ -45,8 +56,9 @@ public class MilvusSourceSplitEnumertor private final ConcurrentLinkedQueue pendingTables; private final Map> pendingSplits; private final Object stateLock = new Object(); + private MilvusClient client = null; - private ReadonlyConfig config; + private final ReadonlyConfig config; public MilvusSourceSplitEnumertor( Context context, @@ -66,7 +78,14 @@ public MilvusSourceSplitEnumertor( } @Override - public void open() {} + public void open() { + ConnectParam connectParam = + ConnectParam.newBuilder() + .withUri(config.get(MilvusSourceConfig.URL)) + .withToken(config.get(MilvusSourceConfig.TOKEN)) + .build(); + this.client = new MilvusServiceClient(connectParam); + } @Override public void run() throws Exception { @@ -92,17 +111,56 @@ public void run() throws Exception { } private Collection generateSplits(CatalogTable table) { - log.info("Start splitting table {} into chunks...", table.getTablePath()); - MilvusSourceSplit milvusSourceSplit = - MilvusSourceSplit.builder() - .splitId(createSplitId(table.getTablePath(), 0)) - .tablePath(table.getTablePath()) - .build(); - - return Collections.singletonList(milvusSourceSplit); + log.info("Start splitting table {} into chunks by partition...", table.getTablePath()); + String database = table.getTablePath().getDatabaseName(); + String collection = table.getTablePath().getTableName(); + R describeCollectionResponseR = + client.describeCollection( + DescribeCollectionParam.newBuilder() + .withDatabaseName(database) + .withCollectionName(collection) + .build()); + boolean hasPartitionKey = + describeCollectionResponseR.getData().getSchema().getFieldsList().stream() + .anyMatch(FieldSchema::getIsPartitionKey); + List milvusSourceSplits = new ArrayList<>(); + if (!hasPartitionKey) { + ShowPartitionsParam showPartitionsParam = + ShowPartitionsParam.newBuilder() + .withDatabaseName(database) + .withCollectionName(collection) + .build(); + R showPartitionsResponseR = + client.showPartitions(showPartitionsParam); + if (showPartitionsResponseR.getStatus() != R.Status.Success.getCode()) { + throw new MilvusConnectorException( + MilvusConnectionErrorCode.LIST_PARTITIONS_FAILED, + "Failed to show partitions: " + showPartitionsResponseR.getMessage()); + } + List partitionList = showPartitionsResponseR.getData().getPartitionNamesList(); + for (String partitionName : partitionList) { + MilvusSourceSplit milvusSourceSplit = + MilvusSourceSplit.builder() + .tablePath(table.getTablePath()) + .splitId(createSplitId(table.getTablePath(), partitionName)) + .partitionName(partitionName) + .build(); + log.info("Generated split: {}", milvusSourceSplit); + milvusSourceSplits.add(milvusSourceSplit); + } + } else { + MilvusSourceSplit milvusSourceSplit = + MilvusSourceSplit.builder() + .tablePath(table.getTablePath()) + .splitId(createSplitId(table.getTablePath(), "0")) + .build(); + log.info("Generated split: {}", milvusSourceSplit); + milvusSourceSplits.add(milvusSourceSplit); + } + return milvusSourceSplits; } - protected String createSplitId(TablePath tablePath, int index) { + protected String createSplitId(TablePath tablePath, String index) { return String.format("%s-%s", tablePath, index); } @@ -133,7 +191,11 @@ private void assignSplit(Collection readers) { } @Override - public void close() throws IOException {} + public void close() throws IOException { + if (client != null) { + client.close(); + } + } @Override public void addSplitsBack(List splits, int subtaskId) { diff --git a/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/utils/MilvusConnectorUtils.java b/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/utils/MilvusConnectorUtils.java new file mode 100644 index 00000000000..e9b762f168c --- /dev/null +++ b/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/utils/MilvusConnectorUtils.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.seatunnel.milvus.utils; + +import org.apache.seatunnel.api.table.catalog.CatalogTable; +import org.apache.seatunnel.api.table.catalog.Column; +import org.apache.seatunnel.api.table.type.CommonOptions; + +import io.milvus.v2.client.MilvusClientV2; +import io.milvus.v2.service.collection.request.CreateCollectionReq; +import io.milvus.v2.service.collection.request.DescribeCollectionReq; +import io.milvus.v2.service.collection.response.DescribeCollectionResp; +import lombok.extern.slf4j.Slf4j; + +import java.util.ArrayList; +import java.util.List; + +@Slf4j +public class MilvusConnectorUtils { + + public static Boolean hasPartitionKey(MilvusClientV2 milvusClient, String collectionName) { + + DescribeCollectionResp describeCollectionResp = + milvusClient.describeCollection( + DescribeCollectionReq.builder().collectionName(collectionName).build()); + return describeCollectionResp.getCollectionSchema().getFieldSchemaList().stream() + .anyMatch(CreateCollectionReq.FieldSchema::getIsPartitionKey); + } + + public static String getDynamicField(CatalogTable catalogTable) { + List columns = catalogTable.getTableSchema().getColumns(); + Column dynamicField = null; + for (Column column : columns) { + if (column.getOptions() != null + && (Boolean) + column.getOptions() + .getOrDefault(CommonOptions.METADATA.getName(), false)) { + // skip dynamic field + dynamicField = column; + } + } + return dynamicField == null ? null : dynamicField.getName(); + } + + public static List getJsonField(CatalogTable catalogTable) { + List columns = catalogTable.getTableSchema().getColumns(); + List jsonColumn = new ArrayList<>(); + for (Column column : columns) { + if (column.getOptions() != null + && column.getOptions().containsKey(CommonOptions.JSON.getName()) + && (Boolean) column.getOptions().get(CommonOptions.JSON.getName())) { + // skip dynamic field + jsonColumn.add(column.getName()); + } + } + return jsonColumn; + } +} diff --git a/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/utils/MilvusConvertUtils.java b/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/utils/MilvusConvertUtils.java new file mode 100644 index 00000000000..8c8d9b616ab --- /dev/null +++ b/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/utils/MilvusConvertUtils.java @@ -0,0 +1,279 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.seatunnel.milvus.utils; + +import org.apache.seatunnel.api.configuration.ReadonlyConfig; +import org.apache.seatunnel.api.table.catalog.CatalogTable; +import org.apache.seatunnel.api.table.catalog.Column; +import org.apache.seatunnel.api.table.catalog.ConstraintKey; +import org.apache.seatunnel.api.table.catalog.PhysicalColumn; +import org.apache.seatunnel.api.table.catalog.PrimaryKey; +import org.apache.seatunnel.api.table.catalog.TableIdentifier; +import org.apache.seatunnel.api.table.catalog.TablePath; +import org.apache.seatunnel.api.table.catalog.TableSchema; +import org.apache.seatunnel.api.table.catalog.VectorIndex; +import org.apache.seatunnel.api.table.type.CommonOptions; +import org.apache.seatunnel.connectors.seatunnel.milvus.catalog.MilvusOptions; +import org.apache.seatunnel.connectors.seatunnel.milvus.config.MilvusSourceConfig; +import org.apache.seatunnel.connectors.seatunnel.milvus.exception.MilvusConnectionErrorCode; +import org.apache.seatunnel.connectors.seatunnel.milvus.exception.MilvusConnectorException; +import org.apache.seatunnel.connectors.seatunnel.milvus.utils.source.MilvusSourceConverter; + +import org.apache.commons.collections4.CollectionUtils; +import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.util.Lists; + +import com.google.protobuf.ProtocolStringList; +import io.milvus.client.MilvusServiceClient; +import io.milvus.grpc.CollectionSchema; +import io.milvus.grpc.DescribeCollectionResponse; +import io.milvus.grpc.DescribeIndexResponse; +import io.milvus.grpc.FieldSchema; +import io.milvus.grpc.IndexDescription; +import io.milvus.grpc.KeyValuePair; +import io.milvus.grpc.ShowCollectionsResponse; +import io.milvus.grpc.ShowPartitionsResponse; +import io.milvus.grpc.ShowType; +import io.milvus.param.ConnectParam; +import io.milvus.param.R; +import io.milvus.param.collection.DescribeCollectionParam; +import io.milvus.param.collection.ShowCollectionsParam; +import io.milvus.param.index.DescribeIndexParam; +import io.milvus.param.partition.ShowPartitionsParam; +import lombok.extern.slf4j.Slf4j; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import static org.apache.seatunnel.api.table.type.BasicType.STRING_TYPE; + +@Slf4j +public class MilvusConvertUtils { + private final ReadonlyConfig config; + + public MilvusConvertUtils(ReadonlyConfig config) { + this.config = config; + } + + public Map getSourceTables() { + MilvusServiceClient client = + new MilvusServiceClient( + ConnectParam.newBuilder() + .withUri(config.get(MilvusSourceConfig.URL)) + .withToken(config.get(MilvusSourceConfig.TOKEN)) + .build()); + + String database = config.get(MilvusSourceConfig.DATABASE); + List collectionList = new ArrayList<>(); + if (StringUtils.isNotEmpty(config.get(MilvusSourceConfig.COLLECTION))) { + collectionList.add(config.get(MilvusSourceConfig.COLLECTION)); + } else { + R response = + client.showCollections( + ShowCollectionsParam.newBuilder() + .withDatabaseName(database) + .withShowType(ShowType.All) + .build()); + if (response.getStatus() != R.Status.Success.getCode()) { + throw new MilvusConnectorException( + MilvusConnectionErrorCode.SHOW_COLLECTIONS_ERROR); + } + + ProtocolStringList collections = response.getData().getCollectionNamesList(); + if (CollectionUtils.isEmpty(collections)) { + throw new MilvusConnectorException( + MilvusConnectionErrorCode.DATABASE_NO_COLLECTIONS, database); + } + collectionList.addAll(collections); + } + + Map map = new HashMap<>(); + for (String collection : collectionList) { + CatalogTable catalogTable = getCatalogTable(client, database, collection); + TablePath tablePath = TablePath.of(database, null, collection); + map.put(tablePath, catalogTable); + } + client.close(); + return map; + } + + public CatalogTable getCatalogTable( + MilvusServiceClient client, String database, String collection) { + R response = + client.describeCollection( + DescribeCollectionParam.newBuilder() + .withDatabaseName(database) + .withCollectionName(collection) + .build()); + + if (response.getStatus() != R.Status.Success.getCode()) { + throw new MilvusConnectorException( + MilvusConnectionErrorCode.DESC_COLLECTION_ERROR, response.getMessage()); + } + log.info( + "describe collection database: {}, collection: {}, response: {}", + database, + collection, + response); + // collection column + DescribeCollectionResponse collectionResponse = response.getData(); + CollectionSchema schema = collectionResponse.getSchema(); + List columns = new ArrayList<>(); + boolean existPartitionKeyField = false; + String partitionKeyField = null; + for (FieldSchema fieldSchema : schema.getFieldsList()) { + PhysicalColumn physicalColumn = MilvusSourceConverter.convertColumn(fieldSchema); + columns.add(physicalColumn); + if (fieldSchema.getIsPartitionKey()) { + existPartitionKeyField = true; + partitionKeyField = fieldSchema.getName(); + } + } + if (collectionResponse.getSchema().getEnableDynamicField()) { + Map options = new HashMap<>(); + + options.put(CommonOptions.METADATA.getName(), true); + PhysicalColumn dynamicColumn = + PhysicalColumn.builder() + .name(CommonOptions.METADATA.getName()) + .dataType(STRING_TYPE) + .options(options) + .build(); + columns.add(dynamicColumn); + } + + // primary key + PrimaryKey primaryKey = buildPrimaryKey(schema.getFieldsList()); + + // index + R describeIndexResponseR = + client.describeIndex( + DescribeIndexParam.newBuilder() + .withDatabaseName(database) + .withCollectionName(collection) + .build()); + if (describeIndexResponseR.getStatus() != R.Status.Success.getCode()) { + throw new MilvusConnectorException(MilvusConnectionErrorCode.DESC_INDEX_ERROR); + } + DescribeIndexResponse indexResponse = describeIndexResponseR.getData(); + List vectorIndexes = buildVectorIndexes(indexResponse); + + // build tableSchema + TableSchema tableSchema = + TableSchema.builder() + .columns(columns) + .primaryKey(primaryKey) + .constraintKey( + ConstraintKey.of( + ConstraintKey.ConstraintType.VECTOR_INDEX_KEY, + "vector_index", + vectorIndexes)) + .build(); + + // build tableId + String CATALOG_NAME = "Milvus"; + TableIdentifier tableId = TableIdentifier.of(CATALOG_NAME, database, null, collection); + // build options info + Map options = new HashMap<>(); + options.put( + MilvusOptions.ENABLE_DYNAMIC_FIELD, String.valueOf(schema.getEnableDynamicField())); + options.put(MilvusOptions.SHARDS_NUM, String.valueOf(collectionResponse.getShardsNum())); + if (existPartitionKeyField) { + options.put(MilvusOptions.PARTITION_KEY_FIELD, partitionKeyField); + } else { + fillPartitionNames(options, client, database, collection); + } + + return CatalogTable.of( + tableId, tableSchema, options, new ArrayList<>(), schema.getDescription()); + } + + private static void fillPartitionNames( + Map options, + MilvusServiceClient client, + String database, + String collection) { + // not exist partition key, will read partition + R partitionsResponseR = + client.showPartitions( + ShowPartitionsParam.newBuilder() + .withDatabaseName(database) + .withCollectionName(collection) + .build()); + if (partitionsResponseR.getStatus() != R.Status.Success.getCode()) { + throw new MilvusConnectorException( + MilvusConnectionErrorCode.SHOW_PARTITION_ERROR, + partitionsResponseR.getMessage()); + } + + ProtocolStringList partitionNamesList = + partitionsResponseR.getData().getPartitionNamesList(); + List list = new ArrayList<>(); + for (String partition : partitionNamesList) { + if (partition.equals("_default")) { + continue; + } + list.add(partition); + } + if (CollectionUtils.isEmpty(partitionNamesList)) { + return; + } + + options.put(MilvusOptions.PARTITION_NAMES, String.join(",", list)); + } + + private static List buildVectorIndexes( + DescribeIndexResponse indexResponse) { + if (CollectionUtils.isEmpty(indexResponse.getIndexDescriptionsList())) { + return null; + } + + List list = new ArrayList<>(); + for (IndexDescription per : indexResponse.getIndexDescriptionsList()) { + Map paramsMap = + per.getParamsList().stream() + .collect( + Collectors.toMap(KeyValuePair::getKey, KeyValuePair::getValue)); + + VectorIndex index = + new VectorIndex( + per.getIndexName(), + per.getFieldName(), + paramsMap.get("index_type"), + paramsMap.get("metric_type")); + + list.add(index); + } + + return list; + } + + public static PrimaryKey buildPrimaryKey(List fields) { + for (FieldSchema field : fields) { + if (field.getIsPrimaryKey()) { + return PrimaryKey.of( + field.getName(), Lists.newArrayList(field.getName()), field.getAutoID()); + } + } + + return null; + } +} diff --git a/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/utils/sink/MilvusSinkConverter.java b/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/utils/sink/MilvusSinkConverter.java new file mode 100644 index 00000000000..0ca373468c4 --- /dev/null +++ b/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/utils/sink/MilvusSinkConverter.java @@ -0,0 +1,294 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.seatunnel.milvus.utils.sink; + +import org.apache.seatunnel.api.configuration.ReadonlyConfig; +import org.apache.seatunnel.api.table.catalog.CatalogTable; +import org.apache.seatunnel.api.table.catalog.Column; +import org.apache.seatunnel.api.table.catalog.PrimaryKey; +import org.apache.seatunnel.api.table.catalog.exception.CatalogException; +import org.apache.seatunnel.api.table.type.ArrayType; +import org.apache.seatunnel.api.table.type.CommonOptions; +import org.apache.seatunnel.api.table.type.SeaTunnelDataType; +import org.apache.seatunnel.api.table.type.SeaTunnelRow; +import org.apache.seatunnel.api.table.type.SeaTunnelRowType; +import org.apache.seatunnel.api.table.type.SqlType; +import org.apache.seatunnel.common.utils.BufferUtils; +import org.apache.seatunnel.common.utils.JsonUtils; +import org.apache.seatunnel.connectors.seatunnel.milvus.exception.MilvusConnectionErrorCode; +import org.apache.seatunnel.connectors.seatunnel.milvus.exception.MilvusConnectorException; + +import org.apache.commons.lang3.StringUtils; + +import com.google.gson.Gson; +import com.google.gson.JsonObject; +import com.google.gson.JsonParser; +import io.milvus.common.utils.JacksonUtils; +import io.milvus.grpc.DataType; +import io.milvus.param.collection.FieldType; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.stream.Collectors; + +import static org.apache.seatunnel.api.table.catalog.PrimaryKey.isPrimaryKeyField; +import static org.apache.seatunnel.connectors.seatunnel.milvus.config.MilvusSinkConfig.ENABLE_AUTO_ID; +import static org.apache.seatunnel.connectors.seatunnel.milvus.config.MilvusSinkConfig.ENABLE_DYNAMIC_FIELD; + +public class MilvusSinkConverter { + private static final Gson gson = new Gson(); + + public Object convertBySeaTunnelType( + SeaTunnelDataType fieldType, Boolean isJson, Object value) { + SqlType sqlType = fieldType.getSqlType(); + switch (sqlType) { + case INT: + return Integer.parseInt(value.toString()); + case TINYINT: + return Byte.parseByte(value.toString()); + case BIGINT: + return Long.parseLong(value.toString()); + case SMALLINT: + return Short.parseShort(value.toString()); + case STRING: + case DATE: + if (isJson) { + return gson.fromJson(value.toString(), JsonObject.class); + } + return value.toString(); + case FLOAT_VECTOR: + ByteBuffer floatVectorBuffer = (ByteBuffer) value; + Float[] floats = BufferUtils.toFloatArray(floatVectorBuffer); + return Arrays.stream(floats).collect(Collectors.toList()); + case BINARY_VECTOR: + case BFLOAT16_VECTOR: + case FLOAT16_VECTOR: + ByteBuffer binaryVector = (ByteBuffer) value; + return gson.toJsonTree(binaryVector.array()); + case SPARSE_FLOAT_VECTOR: + return JsonParser.parseString(JacksonUtils.toJsonString(value)).getAsJsonObject(); + case FLOAT: + return Float.parseFloat(value.toString()); + case BOOLEAN: + return Boolean.parseBoolean(value.toString()); + case DOUBLE: + return Double.parseDouble(value.toString()); + case ARRAY: + ArrayType arrayType = (ArrayType) fieldType; + switch (arrayType.getElementType().getSqlType()) { + case STRING: + String[] stringArray = (String[]) value; + return Arrays.asList(stringArray); + case SMALLINT: + Short[] shortArray = (Short[]) value; + return Arrays.asList(shortArray); + case TINYINT: + Byte[] byteArray = (Byte[]) value; + return Arrays.asList(byteArray); + case INT: + Integer[] intArray = (Integer[]) value; + return Arrays.asList(intArray); + case BIGINT: + Long[] longArray = (Long[]) value; + return Arrays.asList(longArray); + case FLOAT: + Float[] floatArray = (Float[]) value; + return Arrays.asList(floatArray); + case DOUBLE: + Double[] doubleArray = (Double[]) value; + return Arrays.asList(doubleArray); + } + case ROW: + SeaTunnelRow row = (SeaTunnelRow) value; + return JsonUtils.toJsonString(row.getFields()); + case MAP: + return JacksonUtils.toJsonString(value); + default: + throw new MilvusConnectorException( + MilvusConnectionErrorCode.NOT_SUPPORT_TYPE, sqlType.name()); + } + } + + public static FieldType convertToFieldType( + Column column, PrimaryKey primaryKey, String partitionKeyField, Boolean autoId) { + SeaTunnelDataType seaTunnelDataType = column.getDataType(); + DataType milvusDataType = convertSqlTypeToDataType(seaTunnelDataType.getSqlType()); + FieldType.Builder build = + FieldType.newBuilder().withName(column.getName()).withDataType(milvusDataType); + if (StringUtils.isNotEmpty(column.getComment())) { + build.withDescription(column.getComment()); + } + switch (seaTunnelDataType.getSqlType()) { + case ROW: + build.withMaxLength(65535); + break; + case DATE: + build.withMaxLength(20); + break; + case STRING: + if (column.getOptions() != null + && column.getOptions().get(CommonOptions.JSON.getName()) != null + && (Boolean) column.getOptions().get(CommonOptions.JSON.getName())) { + // check if is json + build.withDataType(DataType.JSON); + } else if (column.getColumnLength() == null || column.getColumnLength() == 0) { + build.withMaxLength(65535); + } else { + build.withMaxLength((int) (column.getColumnLength() / 4)); + } + break; + case ARRAY: + ArrayType arrayType = (ArrayType) column.getDataType(); + SeaTunnelDataType elementType = arrayType.getElementType(); + build.withElementType(convertSqlTypeToDataType(elementType.getSqlType())); + build.withMaxCapacity(4095); + switch (elementType.getSqlType()) { + case STRING: + if (column.getColumnLength() == null || column.getColumnLength() == 0) { + build.withMaxLength(65535); + } else { + build.withMaxLength((int) (column.getColumnLength() / 4)); + } + break; + } + break; + case BINARY_VECTOR: + case FLOAT_VECTOR: + case FLOAT16_VECTOR: + case BFLOAT16_VECTOR: + build.withDimension(column.getScale()); + break; + } + + // check is primaryKey + if (null != primaryKey && primaryKey.getColumnNames().contains(column.getName())) { + build.withPrimaryKey(true); + List integerTypes = new ArrayList<>(); + integerTypes.add(SqlType.INT); + integerTypes.add(SqlType.SMALLINT); + integerTypes.add(SqlType.TINYINT); + integerTypes.add(SqlType.BIGINT); + if (integerTypes.contains(seaTunnelDataType.getSqlType())) { + build.withDataType(DataType.Int64); + } else { + build.withDataType(DataType.VarChar); + build.withMaxLength(65535); + } + if (null != primaryKey.getEnableAutoId()) { + build.withAutoID(primaryKey.getEnableAutoId()); + } else { + build.withAutoID(autoId); + } + } + + // check is partitionKey + if (column.getName().equals(partitionKeyField)) { + build.withPartitionKey(true); + } + + return build.build(); + } + + public static DataType convertSqlTypeToDataType(SqlType sqlType) { + switch (sqlType) { + case BOOLEAN: + return DataType.Bool; + case TINYINT: + return DataType.Int8; + case SMALLINT: + return DataType.Int16; + case INT: + return DataType.Int32; + case BIGINT: + return DataType.Int64; + case FLOAT: + return DataType.Float; + case DOUBLE: + return DataType.Double; + case STRING: + return DataType.VarChar; + case ARRAY: + return DataType.Array; + case MAP: + return DataType.JSON; + case FLOAT_VECTOR: + return DataType.FloatVector; + case BINARY_VECTOR: + return DataType.BinaryVector; + case FLOAT16_VECTOR: + return DataType.Float16Vector; + case BFLOAT16_VECTOR: + return DataType.BFloat16Vector; + case SPARSE_FLOAT_VECTOR: + return DataType.SparseFloatVector; + case DATE: + return DataType.VarChar; + case ROW: + return DataType.VarChar; + } + throw new CatalogException( + String.format("Not support convert to milvus type, sqlType is %s", sqlType)); + } + + public JsonObject buildMilvusData( + CatalogTable catalogTable, + ReadonlyConfig config, + List jsonFields, + String dynamicField, + SeaTunnelRow element) { + SeaTunnelRowType seaTunnelRowType = catalogTable.getSeaTunnelRowType(); + PrimaryKey primaryKey = catalogTable.getTableSchema().getPrimaryKey(); + Boolean autoId = config.get(ENABLE_AUTO_ID); + + JsonObject data = new JsonObject(); + Gson gson = new Gson(); + for (int i = 0; i < seaTunnelRowType.getFieldNames().length; i++) { + String fieldName = seaTunnelRowType.getFieldNames()[i]; + Boolean isJson = jsonFields.contains(fieldName); + if (autoId && isPrimaryKeyField(primaryKey, fieldName)) { + continue; // if create table open AutoId, then don't need insert data with + // primaryKey field. + } + + SeaTunnelDataType fieldType = seaTunnelRowType.getFieldType(i); + Object value = element.getField(i); + if (null == value) { + throw new MilvusConnectorException( + MilvusConnectionErrorCode.FIELD_IS_NULL, fieldName); + } + // if the field is dynamic field, then parse the dynamic field + if (dynamicField != null + && dynamicField.equals(fieldName) + && config.get(ENABLE_DYNAMIC_FIELD)) { + JsonObject dynamicData = gson.fromJson(value.toString(), JsonObject.class); + dynamicData + .entrySet() + .forEach( + entry -> { + data.add(entry.getKey(), entry.getValue()); + }); + continue; + } + Object object = convertBySeaTunnelType(fieldType, isJson, value); + data.add(fieldName, gson.toJsonTree(object)); + } + return data; + } +} diff --git a/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/utils/source/MilvusSourceConverter.java b/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/utils/source/MilvusSourceConverter.java new file mode 100644 index 00000000000..ff456a955af --- /dev/null +++ b/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/utils/source/MilvusSourceConverter.java @@ -0,0 +1,364 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.seatunnel.milvus.utils.source; + +import org.apache.seatunnel.api.table.catalog.Column; +import org.apache.seatunnel.api.table.catalog.PhysicalColumn; +import org.apache.seatunnel.api.table.catalog.TablePath; +import org.apache.seatunnel.api.table.catalog.TableSchema; +import org.apache.seatunnel.api.table.type.ArrayType; +import org.apache.seatunnel.api.table.type.BasicType; +import org.apache.seatunnel.api.table.type.CommonOptions; +import org.apache.seatunnel.api.table.type.RowKind; +import org.apache.seatunnel.api.table.type.SeaTunnelDataType; +import org.apache.seatunnel.api.table.type.SeaTunnelRow; +import org.apache.seatunnel.api.table.type.SeaTunnelRowType; +import org.apache.seatunnel.api.table.type.SqlType; +import org.apache.seatunnel.api.table.type.VectorType; +import org.apache.seatunnel.common.exception.CommonErrorCode; +import org.apache.seatunnel.common.utils.BufferUtils; +import org.apache.seatunnel.connectors.seatunnel.milvus.exception.MilvusConnectorException; + +import com.google.gson.Gson; +import com.google.gson.JsonObject; +import io.milvus.grpc.DataType; +import io.milvus.grpc.FieldSchema; +import io.milvus.grpc.KeyValuePair; +import io.milvus.response.QueryResultsWrapper; + +import java.nio.ByteBuffer; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import static org.apache.seatunnel.api.table.type.BasicType.STRING_TYPE; + +public class MilvusSourceConverter { + private final List existField; + private Gson gson = new Gson(); + + public MilvusSourceConverter(TableSchema tableSchema) { + this.existField = + tableSchema.getColumns().stream() + .filter( + column -> + column.getOptions() == null + || !column.getOptions() + .containsValue(CommonOptions.METADATA)) + .map(Column::getName) + .collect(Collectors.toList()); + } + + public SeaTunnelRow convertToSeaTunnelRow( + QueryResultsWrapper.RowRecord record, TableSchema tableSchema, TablePath tablePath) { + // get field names and types + SeaTunnelRowType typeInfo = tableSchema.toPhysicalRowDataType(); + String[] fieldNames = typeInfo.getFieldNames(); + + Object[] seatunnelField = new Object[typeInfo.getTotalFields()]; + // get field values from source milvus + Map fieldValuesMap = record.getFieldValues(); + // filter dynamic field + JsonObject dynamicField = convertDynamicField(fieldValuesMap); + + for (int fieldIndex = 0; fieldIndex < typeInfo.getTotalFields(); fieldIndex++) { + if (fieldNames[fieldIndex].equals(CommonOptions.METADATA.getName())) { + seatunnelField[fieldIndex] = dynamicField.toString(); + continue; + } + SeaTunnelDataType seaTunnelDataType = typeInfo.getFieldType(fieldIndex); + Object filedValues = fieldValuesMap.get(fieldNames[fieldIndex]); + switch (seaTunnelDataType.getSqlType()) { + case STRING: + seatunnelField[fieldIndex] = filedValues.toString(); + break; + case BOOLEAN: + if (filedValues instanceof Boolean) { + seatunnelField[fieldIndex] = filedValues; + } else { + seatunnelField[fieldIndex] = Boolean.valueOf(filedValues.toString()); + } + break; + case TINYINT: + if (filedValues instanceof Byte) { + seatunnelField[fieldIndex] = filedValues; + } else { + seatunnelField[fieldIndex] = Byte.parseByte(filedValues.toString()); + } + break; + case SMALLINT: + if (filedValues instanceof Short) { + seatunnelField[fieldIndex] = filedValues; + } else { + seatunnelField[fieldIndex] = Short.parseShort(filedValues.toString()); + } + case INT: + if (filedValues instanceof Integer) { + seatunnelField[fieldIndex] = filedValues; + } else { + seatunnelField[fieldIndex] = Integer.valueOf(filedValues.toString()); + } + break; + case BIGINT: + if (filedValues instanceof Long) { + seatunnelField[fieldIndex] = filedValues; + } else { + seatunnelField[fieldIndex] = Long.parseLong(filedValues.toString()); + } + break; + case FLOAT: + if (filedValues instanceof Float) { + seatunnelField[fieldIndex] = filedValues; + } else { + seatunnelField[fieldIndex] = Float.parseFloat(filedValues.toString()); + } + break; + case DOUBLE: + if (filedValues instanceof Double) { + seatunnelField[fieldIndex] = filedValues; + } else { + seatunnelField[fieldIndex] = Double.parseDouble(filedValues.toString()); + } + break; + case ARRAY: + if (filedValues instanceof List) { + List list = (List) filedValues; + ArrayType arrayType = (ArrayType) seaTunnelDataType; + SqlType elementType = arrayType.getElementType().getSqlType(); + switch (elementType) { + case STRING: + String[] arrays = new String[list.size()]; + for (int i = 0; i < list.size(); i++) { + arrays[i] = list.get(i).toString(); + } + seatunnelField[fieldIndex] = arrays; + break; + case BOOLEAN: + Boolean[] booleanArrays = new Boolean[list.size()]; + for (int i = 0; i < list.size(); i++) { + booleanArrays[i] = Boolean.valueOf(list.get(i).toString()); + } + seatunnelField[fieldIndex] = booleanArrays; + break; + case TINYINT: + Byte[] byteArrays = new Byte[list.size()]; + for (int i = 0; i < list.size(); i++) { + byteArrays[i] = Byte.parseByte(list.get(i).toString()); + } + seatunnelField[fieldIndex] = byteArrays; + break; + case SMALLINT: + Short[] shortArrays = new Short[list.size()]; + for (int i = 0; i < list.size(); i++) { + shortArrays[i] = Short.parseShort(list.get(i).toString()); + } + seatunnelField[fieldIndex] = shortArrays; + break; + case INT: + Integer[] intArrays = new Integer[list.size()]; + for (int i = 0; i < list.size(); i++) { + intArrays[i] = Integer.valueOf(list.get(i).toString()); + } + seatunnelField[fieldIndex] = intArrays; + break; + case BIGINT: + Long[] longArrays = new Long[list.size()]; + for (int i = 0; i < list.size(); i++) { + longArrays[i] = Long.parseLong(list.get(i).toString()); + } + seatunnelField[fieldIndex] = longArrays; + break; + case FLOAT: + Float[] floatArrays = new Float[list.size()]; + for (int i = 0; i < list.size(); i++) { + floatArrays[i] = Float.parseFloat(list.get(i).toString()); + } + seatunnelField[fieldIndex] = floatArrays; + break; + case DOUBLE: + Double[] doubleArrays = new Double[list.size()]; + for (int i = 0; i < list.size(); i++) { + doubleArrays[i] = Double.parseDouble(list.get(i).toString()); + } + seatunnelField[fieldIndex] = doubleArrays; + break; + default: + throw new MilvusConnectorException( + CommonErrorCode.UNSUPPORTED_DATA_TYPE, + "Unexpected array value: " + filedValues); + } + } else { + throw new MilvusConnectorException( + CommonErrorCode.UNSUPPORTED_DATA_TYPE, + "Unexpected array value: " + filedValues); + } + break; + case FLOAT_VECTOR: + if (filedValues instanceof List) { + List list = (List) filedValues; + Float[] arrays = new Float[list.size()]; + for (int i = 0; i < list.size(); i++) { + arrays[i] = Float.parseFloat(list.get(i).toString()); + } + seatunnelField[fieldIndex] = BufferUtils.toByteBuffer(arrays); + break; + } else { + throw new MilvusConnectorException( + CommonErrorCode.UNSUPPORTED_DATA_TYPE, + "Unexpected vector value: " + filedValues); + } + case BINARY_VECTOR: + case FLOAT16_VECTOR: + case BFLOAT16_VECTOR: + if (filedValues instanceof ByteBuffer) { + seatunnelField[fieldIndex] = filedValues; + break; + } else { + throw new MilvusConnectorException( + CommonErrorCode.UNSUPPORTED_DATA_TYPE, + "Unexpected vector value: " + filedValues); + } + case SPARSE_FLOAT_VECTOR: + if (filedValues instanceof Map) { + seatunnelField[fieldIndex] = filedValues; + break; + } else { + throw new MilvusConnectorException( + CommonErrorCode.UNSUPPORTED_DATA_TYPE, + "Unexpected vector value: " + filedValues); + } + default: + throw new MilvusConnectorException( + CommonErrorCode.UNSUPPORTED_DATA_TYPE, + "Unexpected value: " + seaTunnelDataType.getSqlType().name()); + } + } + + SeaTunnelRow seaTunnelRow = new SeaTunnelRow(seatunnelField); + seaTunnelRow.setTableId(tablePath.getFullName()); + seaTunnelRow.setRowKind(RowKind.INSERT); + return seaTunnelRow; + } + + public static PhysicalColumn convertColumn(FieldSchema fieldSchema) { + DataType dataType = fieldSchema.getDataType(); + PhysicalColumn.PhysicalColumnBuilder builder = PhysicalColumn.builder(); + builder.name(fieldSchema.getName()); + builder.sourceType(dataType.name()); + builder.comment(fieldSchema.getDescription()); + + switch (dataType) { + case Bool: + builder.dataType(BasicType.BOOLEAN_TYPE); + break; + case Int8: + builder.dataType(BasicType.BYTE_TYPE); + break; + case Int16: + builder.dataType(BasicType.SHORT_TYPE); + break; + case Int32: + builder.dataType(BasicType.INT_TYPE); + break; + case Int64: + builder.dataType(BasicType.LONG_TYPE); + break; + case Float: + builder.dataType(BasicType.FLOAT_TYPE); + break; + case Double: + builder.dataType(BasicType.DOUBLE_TYPE); + break; + case VarChar: + builder.dataType(BasicType.STRING_TYPE); + for (KeyValuePair keyValuePair : fieldSchema.getTypeParamsList()) { + if (keyValuePair.getKey().equals("max_length")) { + builder.columnLength(Long.parseLong(keyValuePair.getValue()) * 4); + break; + } + } + break; + case String: + builder.dataType(BasicType.STRING_TYPE); + break; + case JSON: + builder.dataType(STRING_TYPE); + Map options = new HashMap<>(); + options.put(CommonOptions.JSON.getName(), true); + builder.options(options); + break; + case Array: + builder.dataType(ArrayType.STRING_ARRAY_TYPE); + break; + case FloatVector: + builder.dataType(VectorType.VECTOR_FLOAT_TYPE); + for (KeyValuePair keyValuePair : fieldSchema.getTypeParamsList()) { + if (keyValuePair.getKey().equals("dim")) { + builder.scale(Integer.valueOf(keyValuePair.getValue())); + break; + } + } + break; + case BinaryVector: + builder.dataType(VectorType.VECTOR_BINARY_TYPE); + for (KeyValuePair keyValuePair : fieldSchema.getTypeParamsList()) { + if (keyValuePair.getKey().equals("dim")) { + builder.scale(Integer.valueOf(keyValuePair.getValue())); + break; + } + } + break; + case SparseFloatVector: + builder.dataType(VectorType.VECTOR_SPARSE_FLOAT_TYPE); + break; + case Float16Vector: + builder.dataType(VectorType.VECTOR_FLOAT16_TYPE); + for (KeyValuePair keyValuePair : fieldSchema.getTypeParamsList()) { + if (keyValuePair.getKey().equals("dim")) { + builder.scale(Integer.valueOf(keyValuePair.getValue())); + break; + } + } + break; + case BFloat16Vector: + builder.dataType(VectorType.VECTOR_BFLOAT16_TYPE); + for (KeyValuePair keyValuePair : fieldSchema.getTypeParamsList()) { + if (keyValuePair.getKey().equals("dim")) { + builder.scale(Integer.valueOf(keyValuePair.getValue())); + break; + } + } + break; + default: + throw new UnsupportedOperationException("Unsupported data type: " + dataType); + } + + return builder.build(); + } + + private JsonObject convertDynamicField(Map fieldValuesMap) { + JsonObject dynamicField = new JsonObject(); + for (Map.Entry entry : fieldValuesMap.entrySet()) { + if (!existField.contains(entry.getKey())) { + dynamicField.add(entry.getKey(), gson.toJsonTree(entry.getValue())); + } + } + return dynamicField; + } +} diff --git a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/config/PaimonSinkConfig.java b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/config/PaimonSinkConfig.java index 9b358a2e8c4..87766ff96b0 100644 --- a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/config/PaimonSinkConfig.java +++ b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/config/PaimonSinkConfig.java @@ -23,16 +23,22 @@ import org.apache.seatunnel.api.sink.DataSaveMode; import org.apache.seatunnel.api.sink.SchemaSaveMode; +import org.apache.paimon.CoreOptions; + import lombok.Getter; import lombok.extern.slf4j.Slf4j; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.stream.Stream; @Getter @Slf4j public class PaimonSinkConfig extends PaimonConfig { + + public static final String CHANGELOG_TMP_PATH = "changelog-tmp-path"; + public static final Option SCHEMA_SAVE_MODE = Options.key("schema_save_mode") .enumType(SchemaSaveMode.class) @@ -44,7 +50,6 @@ public class PaimonSinkConfig extends PaimonConfig { .enumType(DataSaveMode.class) .defaultValue(DataSaveMode.APPEND_DATA) .withDescription("data_save_mode"); - public static final Option PRIMARY_KEYS = Options.key("paimon.table.primary-keys") .stringType() @@ -66,11 +71,13 @@ public class PaimonSinkConfig extends PaimonConfig { .withDescription( "Properties passed through to paimon table initialization, such as 'file.format', 'bucket'(org.apache.paimon.CoreOptions)"); - private SchemaSaveMode schemaSaveMode; - private DataSaveMode dataSaveMode; - private List primaryKeys; - private List partitionKeys; - private Map writeProps; + private final SchemaSaveMode schemaSaveMode; + private final DataSaveMode dataSaveMode; + private final CoreOptions.ChangelogProducer changelogProducer; + private final String changelogTmpPath; + private final List primaryKeys; + private final List partitionKeys; + private final Map writeProps; public PaimonSinkConfig(ReadonlyConfig readonlyConfig) { super(readonlyConfig); @@ -79,6 +86,20 @@ public PaimonSinkConfig(ReadonlyConfig readonlyConfig) { this.primaryKeys = stringToList(readonlyConfig.get(PRIMARY_KEYS), ","); this.partitionKeys = stringToList(readonlyConfig.get(PARTITION_KEYS), ","); this.writeProps = readonlyConfig.get(WRITE_PROPS); + this.changelogProducer = + Stream.of(CoreOptions.ChangelogProducer.values()) + .filter( + cp -> + cp.toString() + .equalsIgnoreCase( + writeProps.getOrDefault( + CoreOptions.CHANGELOG_PRODUCER + .key(), + ""))) + .findFirst() + .orElse(null); + this.changelogTmpPath = + writeProps.getOrDefault(CHANGELOG_TMP_PATH, System.getProperty("java.io.tmpdir")); checkConfig(); } diff --git a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/sink/PaimonSink.java b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/sink/PaimonSink.java index 73d2151b896..86828c9a587 100644 --- a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/sink/PaimonSink.java +++ b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/sink/PaimonSink.java @@ -94,7 +94,12 @@ public String getPluginName() { @Override public PaimonSinkWriter createWriter(SinkWriter.Context context) throws IOException { return new PaimonSinkWriter( - context, table, seaTunnelRowType, jobContext, paimonHadoopConfiguration); + context, + table, + seaTunnelRowType, + jobContext, + paimonSinkConfig, + paimonHadoopConfiguration); } @Override @@ -108,7 +113,13 @@ public PaimonSinkWriter createWriter(SinkWriter.Context context) throws IOExcept public SinkWriter restoreWriter( SinkWriter.Context context, List states) throws IOException { return new PaimonSinkWriter( - context, table, seaTunnelRowType, states, jobContext, paimonHadoopConfiguration); + context, + table, + seaTunnelRowType, + states, + jobContext, + paimonSinkConfig, + paimonHadoopConfiguration); } @Override diff --git a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/sink/PaimonSinkWriter.java b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/sink/PaimonSinkWriter.java index 7a3fe6d0336..e57e62c9814 100644 --- a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/sink/PaimonSinkWriter.java +++ b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/sink/PaimonSinkWriter.java @@ -24,6 +24,7 @@ import org.apache.seatunnel.api.table.type.SeaTunnelRowType; import org.apache.seatunnel.common.utils.SeaTunnelException; import org.apache.seatunnel.connectors.seatunnel.paimon.config.PaimonHadoopConfiguration; +import org.apache.seatunnel.connectors.seatunnel.paimon.config.PaimonSinkConfig; import org.apache.seatunnel.connectors.seatunnel.paimon.exception.PaimonConnectorErrorCode; import org.apache.seatunnel.connectors.seatunnel.paimon.exception.PaimonConnectorException; import org.apache.seatunnel.connectors.seatunnel.paimon.security.PaimonSecurityContext; @@ -33,7 +34,9 @@ import org.apache.seatunnel.connectors.seatunnel.paimon.utils.JobContextUtil; import org.apache.seatunnel.connectors.seatunnel.paimon.utils.RowConverter; +import org.apache.paimon.CoreOptions; import org.apache.paimon.data.InternalRow; +import org.apache.paimon.disk.IOManager; import org.apache.paimon.schema.TableSchema; import org.apache.paimon.table.BucketMode; import org.apache.paimon.table.FileStoreTable; @@ -58,6 +61,8 @@ import java.util.UUID; import java.util.stream.Collectors; +import static org.apache.paimon.disk.IOManagerImpl.splitPaths; + @Slf4j public class PaimonSinkWriter implements SinkWriter, @@ -65,14 +70,14 @@ public class PaimonSinkWriter private String commitUser = UUID.randomUUID().toString(); + private final FileStoreTable table; + private final WriteBuilder tableWriteBuilder; private final TableWrite tableWrite; private List committables = new ArrayList<>(); - private final Table table; - private final SeaTunnelRowType seaTunnelRowType; private final SinkWriter.Context context; @@ -90,18 +95,30 @@ public PaimonSinkWriter( Table table, SeaTunnelRowType seaTunnelRowType, JobContext jobContext, + PaimonSinkConfig paimonSinkConfig, PaimonHadoopConfiguration paimonHadoopConfiguration) { - this.table = table; + this.table = (FileStoreTable) table; + CoreOptions.ChangelogProducer changelogProducer = + this.table.coreOptions().changelogProducer(); + if (Objects.nonNull(paimonSinkConfig.getChangelogProducer()) + && changelogProducer != paimonSinkConfig.getChangelogProducer()) { + log.warn( + "configured the props named 'changelog-producer' which is not compatible with the options in table , so it will use the table's 'changelog-producer'"); + } + String changelogTmpPath = paimonSinkConfig.getChangelogTmpPath(); this.tableWriteBuilder = JobContextUtil.isBatchJob(jobContext) ? this.table.newBatchWriteBuilder() : this.table.newStreamWriteBuilder(); - this.tableWrite = tableWriteBuilder.newWrite(); + this.tableWrite = + tableWriteBuilder + .newWrite() + .withIOManager(IOManager.create(splitPaths(changelogTmpPath))); this.seaTunnelRowType = seaTunnelRowType; this.context = context; this.jobContext = jobContext; - this.tableSchema = ((FileStoreTable) table).schema(); - BucketMode bucketMode = ((FileStoreTable) table).bucketMode(); + this.tableSchema = this.table.schema(); + BucketMode bucketMode = this.table.bucketMode(); this.dynamicBucket = BucketMode.DYNAMIC == bucketMode || BucketMode.GLOBAL_DYNAMIC == bucketMode; int bucket = ((FileStoreTable) table).coreOptions().bucket(); @@ -124,12 +141,20 @@ public PaimonSinkWriter( SeaTunnelRowType seaTunnelRowType, List states, JobContext jobContext, + PaimonSinkConfig paimonSinkConfig, PaimonHadoopConfiguration paimonHadoopConfiguration) { - this(context, table, seaTunnelRowType, jobContext, paimonHadoopConfiguration); + this( + context, + table, + seaTunnelRowType, + jobContext, + paimonSinkConfig, + paimonHadoopConfiguration); if (Objects.isNull(states) || states.isEmpty()) { return; } this.commitUser = states.get(0).getCommitUser(); + long checkpointId = states.get(0).getCheckpointId(); try (TableCommit tableCommit = tableWriteBuilder.newCommit()) { List commitables = states.stream() @@ -142,7 +167,7 @@ public PaimonSinkWriter( ((BatchTableCommit) tableCommit).commit(commitables); } else { log.debug("Trying to recommit states streaming mode"); - ((StreamTableCommit) tableCommit).commit(Objects.hash(commitables), commitables); + ((StreamTableCommit) tableCommit).commit(checkpointId, commitables); } } catch (Exception e) { throw new PaimonConnectorException( @@ -174,20 +199,26 @@ public void write(SeaTunnelRow element) throws IOException { @Override public Optional prepareCommit() throws IOException { + return Optional.empty(); + } + + @Override + public Optional prepareCommit(long checkpointId) throws IOException { try { List fileCommittables; if (JobContextUtil.isBatchJob(jobContext)) { fileCommittables = ((BatchTableWrite) tableWrite).prepareCommit(); } else { fileCommittables = - ((StreamTableWrite) tableWrite).prepareCommit(false, committables.size()); + ((StreamTableWrite) tableWrite) + .prepareCommit(waitCompaction(), checkpointId); } committables.addAll(fileCommittables); - return Optional.of(new PaimonCommitInfo(fileCommittables)); + return Optional.of(new PaimonCommitInfo(fileCommittables, checkpointId)); } catch (Exception e) { throw new PaimonConnectorException( PaimonConnectorErrorCode.TABLE_PRE_COMMIT_FAILED, - "Flink table store failed to prepare commit", + "Paimon pre-commit failed.", e); } } @@ -218,4 +249,11 @@ public void close() throws IOException { committables.clear(); } } + + private boolean waitCompaction() { + CoreOptions.ChangelogProducer changelogProducer = + this.table.coreOptions().changelogProducer(); + return changelogProducer == CoreOptions.ChangelogProducer.LOOKUP + || changelogProducer == CoreOptions.ChangelogProducer.FULL_COMPACTION; + } } diff --git a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/sink/commit/PaimonAggregatedCommitInfo.java b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/sink/commit/PaimonAggregatedCommitInfo.java index 8a7ad84a2e8..83ed71f6151 100644 --- a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/sink/commit/PaimonAggregatedCommitInfo.java +++ b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/sink/commit/PaimonAggregatedCommitInfo.java @@ -24,6 +24,7 @@ import java.io.Serializable; import java.util.List; +import java.util.Map; /** Paimon connector aggregate commit information class. */ @Data @@ -32,5 +33,6 @@ public class PaimonAggregatedCommitInfo implements Serializable { private static final long serialVersionUID = 1; - private List> committables; + // key: checkpointId value: Paimon commit message List + private Map> committablesMap; } diff --git a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/sink/commit/PaimonAggregatedCommitter.java b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/sink/commit/PaimonAggregatedCommitter.java index 5c3f68f3365..8009135346c 100644 --- a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/sink/commit/PaimonAggregatedCommitter.java +++ b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/sink/commit/PaimonAggregatedCommitter.java @@ -36,10 +36,11 @@ import lombok.extern.slf4j.Slf4j; import java.io.IOException; -import java.util.ArrayList; import java.util.Collections; +import java.util.HashMap; import java.util.List; -import java.util.Objects; +import java.util.Map; +import java.util.concurrent.CopyOnWriteArrayList; import java.util.stream.Collectors; /** Paimon connector aggregated committer class */ @@ -70,28 +71,39 @@ public PaimonAggregatedCommitter( public List commit( List aggregatedCommitInfo) throws IOException { try (TableCommit tableCommit = tableWriteBuilder.newCommit()) { - List fileCommittables = - aggregatedCommitInfo.stream() - .map(PaimonAggregatedCommitInfo::getCommittables) - .flatMap(List::stream) - .flatMap(List::stream) - .collect(Collectors.toList()); PaimonSecurityContext.runSecured( () -> { if (JobContextUtil.isBatchJob(jobContext)) { log.debug("Trying to commit states batch mode"); + List fileCommittables = + aggregatedCommitInfo.stream() + .flatMap( + info -> + info.getCommittablesMap().values() + .stream()) + .flatMap(List::stream) + .collect(Collectors.toList()); ((BatchTableCommit) tableCommit).commit(fileCommittables); } else { log.debug("Trying to commit states streaming mode"); - ((StreamTableCommit) tableCommit) - .commit(Objects.hash(fileCommittables), fileCommittables); + aggregatedCommitInfo.stream() + .flatMap( + paimonAggregatedCommitInfo -> + paimonAggregatedCommitInfo.getCommittablesMap() + .entrySet().stream()) + .forEach( + entry -> + ((StreamTableCommit) tableCommit) + .commit( + entry.getKey(), + entry.getValue())); } return null; }); } catch (Exception e) { throw new PaimonConnectorException( PaimonConnectorErrorCode.TABLE_WRITE_COMMIT_FAILED, - "Flink table store commit operation failed", + "Paimon table storage write-commit Failed.", e); } return Collections.emptyList(); @@ -99,8 +111,14 @@ public List commit( @Override public PaimonAggregatedCommitInfo combine(List commitInfos) { - List> committables = new ArrayList<>(); - commitInfos.forEach(commitInfo -> committables.add(commitInfo.getCommittables())); + Map> committables = new HashMap<>(); + commitInfos.forEach( + commitInfo -> + committables + .computeIfAbsent( + commitInfo.getCheckpointId(), + id -> new CopyOnWriteArrayList<>()) + .addAll(commitInfo.getCommittables())); return new PaimonAggregatedCommitInfo(committables); } diff --git a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/sink/commit/PaimonCommitInfo.java b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/sink/commit/PaimonCommitInfo.java index 9927973821c..1d9844103fc 100644 --- a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/sink/commit/PaimonCommitInfo.java +++ b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/sink/commit/PaimonCommitInfo.java @@ -32,4 +32,6 @@ public class PaimonCommitInfo implements Serializable { private static final long serialVersionUID = 1L; List committables; + + Long checkpointId; } diff --git a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/source/converter/SqlToPaimonPredicateConverter.java b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/source/converter/SqlToPaimonPredicateConverter.java index 212bfd6e8b8..0bf47b13105 100644 --- a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/source/converter/SqlToPaimonPredicateConverter.java +++ b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/source/converter/SqlToPaimonPredicateConverter.java @@ -54,8 +54,6 @@ import net.sf.jsqlparser.statement.select.AllColumns; import net.sf.jsqlparser.statement.select.PlainSelect; import net.sf.jsqlparser.statement.select.Select; -import net.sf.jsqlparser.statement.select.SelectBody; -import net.sf.jsqlparser.statement.select.SelectExpressionItem; import net.sf.jsqlparser.statement.select.SelectItem; import java.math.BigDecimal; @@ -83,7 +81,7 @@ public static PlainSelect convertToPlainSelect(String query) { throw new IllegalArgumentException("Only SELECT statements are supported."); } Select select = (Select) statement; - SelectBody selectBody = select.getSelectBody(); + Select selectBody = select.getSelectBody(); if (!(selectBody instanceof PlainSelect)) { throw new IllegalArgumentException("Only simple SELECT statements are supported."); } @@ -101,18 +99,15 @@ public static PlainSelect convertToPlainSelect(String query) { public static int[] convertSqlSelectToPaimonProjectionIndex( String[] fieldNames, PlainSelect plainSelect) { int[] projectionIndex = null; - List selectItems = plainSelect.getSelectItems(); + List> selectItems = plainSelect.getSelectItems(); List columnNames = new ArrayList<>(); for (SelectItem selectItem : selectItems) { - if (selectItem instanceof AllColumns) { + if (selectItem.getExpression() instanceof AllColumns) { return null; - } else if (selectItem instanceof SelectExpressionItem) { - SelectExpressionItem selectExpressionItem = (SelectExpressionItem) selectItem; - String columnName = selectExpressionItem.getExpression().toString(); - columnNames.add(columnName); } else { - throw new IllegalArgumentException("Error encountered parsing query fields."); + String columnName = ((Column) selectItem.getExpression()).getColumnName(); + columnNames.add(columnName); } } diff --git a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/utils/SchemaUtil.java b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/utils/SchemaUtil.java index fa8ed338208..ca825a269f9 100644 --- a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/utils/SchemaUtil.java +++ b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/utils/SchemaUtil.java @@ -25,6 +25,7 @@ import org.apache.seatunnel.connectors.seatunnel.paimon.exception.PaimonConnectorErrorCode; import org.apache.seatunnel.connectors.seatunnel.paimon.exception.PaimonConnectorException; +import org.apache.paimon.CoreOptions; import org.apache.paimon.schema.Schema; import org.apache.paimon.types.DataField; import org.apache.paimon.types.DataType; @@ -61,6 +62,10 @@ public static Schema toPaimonSchema( paiSchemaBuilder.partitionKeys(partitionKeys); } Map writeProps = paimonSinkConfig.getWriteProps(); + CoreOptions.ChangelogProducer changelogProducer = paimonSinkConfig.getChangelogProducer(); + if (changelogProducer != null) { + writeProps.remove(PaimonSinkConfig.CHANGELOG_TMP_PATH); + } if (!writeProps.isEmpty()) { paiSchemaBuilder.options(writeProps); } diff --git a/seatunnel-connectors-v2/connector-prometheus/pom.xml b/seatunnel-connectors-v2/connector-prometheus/pom.xml new file mode 100644 index 00000000000..f9591dedf1b --- /dev/null +++ b/seatunnel-connectors-v2/connector-prometheus/pom.xml @@ -0,0 +1,118 @@ + + + + 4.0.0 + + org.apache.seatunnel + seatunnel-connectors-v2 + ${revision} + + + connector-prometheus + SeaTunnel : Connectors V2 : Prometheus + + + 0.16.0 + 3.23.2 + 1.1.7.3 + 3.25.4 + + + + org.apache.seatunnel + connector-common + ${project.version} + + + org.apache.seatunnel + connector-http-base + ${project.version} + + + io.prometheus + simpleclient + ${prometheus-client.version} + + + io.prometheus + simpleclient_httpserver + ${prometheus-client.version} + + + + com.google.protobuf + protobuf-java + ${protobuf-java.version} + + + com.google.protobuf + protobuf-java-util + ${protobuf-java.version} + + + + + org.xerial.snappy + snappy-java + ${snappy-java.version} + + + + + + + + org.apache.maven.plugins + maven-shade-plugin + + + + shade + + package + + false + true + false + false + + + *:* + + META-INF/*.SF + META-INF/*.DSA + META-INF/*.RSA + + + + + + com.google.protobuf + ${seatunnel.shade.package}.com.google.protobuf + + + + + + + + + diff --git a/seatunnel-connectors-v2/connector-prometheus/src/main/java/org/apache/seatunnel/connectors/seatunnel/prometheus/Exception/PrometheusConnectorException.java b/seatunnel-connectors-v2/connector-prometheus/src/main/java/org/apache/seatunnel/connectors/seatunnel/prometheus/Exception/PrometheusConnectorException.java new file mode 100644 index 00000000000..d351f2ba15e --- /dev/null +++ b/seatunnel-connectors-v2/connector-prometheus/src/main/java/org/apache/seatunnel/connectors/seatunnel/prometheus/Exception/PrometheusConnectorException.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.seatunnel.prometheus.Exception; + +import org.apache.seatunnel.common.exception.SeaTunnelErrorCode; +import org.apache.seatunnel.common.exception.SeaTunnelRuntimeException; + +public class PrometheusConnectorException extends SeaTunnelRuntimeException { + + public PrometheusConnectorException( + SeaTunnelErrorCode seaTunnelErrorCode, String errorMessage) { + super(seaTunnelErrorCode, errorMessage); + } + + public PrometheusConnectorException( + SeaTunnelErrorCode seaTunnelErrorCode, String errorMessage, Throwable cause) { + super(seaTunnelErrorCode, errorMessage, cause); + } + + public PrometheusConnectorException(SeaTunnelErrorCode seaTunnelErrorCode, Throwable cause) { + super(seaTunnelErrorCode, cause); + } +} diff --git a/seatunnel-connectors-v2/connector-prometheus/src/main/java/org/apache/seatunnel/connectors/seatunnel/prometheus/config/PrometheusSinkConfig.java b/seatunnel-connectors-v2/connector-prometheus/src/main/java/org/apache/seatunnel/connectors/seatunnel/prometheus/config/PrometheusSinkConfig.java new file mode 100644 index 00000000000..959cff607ce --- /dev/null +++ b/seatunnel-connectors-v2/connector-prometheus/src/main/java/org/apache/seatunnel/connectors/seatunnel/prometheus/config/PrometheusSinkConfig.java @@ -0,0 +1,99 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.seatunnel.connectors.seatunnel.prometheus.config; + +import org.apache.seatunnel.api.configuration.Option; +import org.apache.seatunnel.api.configuration.Options; +import org.apache.seatunnel.api.configuration.ReadonlyConfig; +import org.apache.seatunnel.connectors.seatunnel.http.config.HttpConfig; + +import lombok.Getter; +import lombok.Setter; +import lombok.ToString; + +import static org.apache.seatunnel.shade.com.google.common.base.Preconditions.checkArgument; + +@Setter +@Getter +@ToString +public class PrometheusSinkConfig extends HttpConfig { + + private static final int DEFAULT_BATCH_SIZE = 1024; + + private static final Long DEFAULT_FLUSH_INTERVAL = 300000L; + + public static final Option KEY_TIMESTAMP = + Options.key("key_timestamp") + .stringType() + .noDefaultValue() + .withDescription("key timestamp"); + + public static final Option KEY_LABEL = + Options.key("key_label").stringType().noDefaultValue().withDescription("key label"); + + public static final Option KEY_VALUE = + Options.key("key_value").stringType().noDefaultValue().withDescription("key value"); + + public static final Option BATCH_SIZE = + Options.key("batch_size") + .intType() + .defaultValue(DEFAULT_BATCH_SIZE) + .withDescription("the batch size writer to prometheus"); + + public static final Option FLUSH_INTERVAL = + Options.key("flush_interval") + .longType() + .defaultValue(DEFAULT_FLUSH_INTERVAL) + .withDescription("the flush interval writer to prometheus"); + + private String keyTimestamp; + + private String keyValue; + + private String keyLabel; + + private int batchSize = BATCH_SIZE.defaultValue(); + + private long flushInterval = FLUSH_INTERVAL.defaultValue(); + + public static PrometheusSinkConfig loadConfig(ReadonlyConfig pluginConfig) { + PrometheusSinkConfig sinkConfig = new PrometheusSinkConfig(); + if (pluginConfig.getOptional(KEY_VALUE).isPresent()) { + sinkConfig.setKeyValue(pluginConfig.get(KEY_VALUE)); + } + if (pluginConfig.getOptional(KEY_LABEL).isPresent()) { + sinkConfig.setKeyLabel(pluginConfig.get(KEY_LABEL)); + } + if (pluginConfig.getOptional(KEY_TIMESTAMP).isPresent()) { + sinkConfig.setKeyTimestamp(pluginConfig.get(KEY_TIMESTAMP)); + } + if (pluginConfig.getOptional(BATCH_SIZE).isPresent()) { + int batchSize = checkIntArgument(pluginConfig.get(BATCH_SIZE)); + sinkConfig.setBatchSize(batchSize); + } + if (pluginConfig.getOptional(FLUSH_INTERVAL).isPresent()) { + long flushInterval = pluginConfig.get(FLUSH_INTERVAL); + sinkConfig.setFlushInterval(flushInterval); + } + return sinkConfig; + } + + private static int checkIntArgument(int args) { + checkArgument(args > 0); + return args; + } +} diff --git a/seatunnel-connectors-v2/connector-prometheus/src/main/java/org/apache/seatunnel/connectors/seatunnel/prometheus/config/PrometheusSourceConfig.java b/seatunnel-connectors-v2/connector-prometheus/src/main/java/org/apache/seatunnel/connectors/seatunnel/prometheus/config/PrometheusSourceConfig.java new file mode 100644 index 00000000000..1c51f6b8f68 --- /dev/null +++ b/seatunnel-connectors-v2/connector-prometheus/src/main/java/org/apache/seatunnel/connectors/seatunnel/prometheus/config/PrometheusSourceConfig.java @@ -0,0 +1,87 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.seatunnel.prometheus.config; + +import org.apache.seatunnel.api.configuration.Option; +import org.apache.seatunnel.api.configuration.Options; +import org.apache.seatunnel.connectors.seatunnel.http.config.HttpConfig; + +/** + * SourceConfig is the configuration for the PrometheusSource. + * + *

please see the following link for more details: + * https://prometheus.io/docs/prometheus/latest/querying/api/ + */ +public class PrometheusSourceConfig extends HttpConfig { + + public static final String INSTANT_QUERY_URL = "/api/v1/query"; + + public static final String RANGE_QUERY = "Range"; + + public static final String INSTANT_QUERY = "Instant"; + + public static final String RANGE_QUERY_URL = "/api/v1/query_range"; + + public static final Option QUERY = + Options.key("query") + .stringType() + .noDefaultValue() + .withDescription("Prometheus expression query string"); + + public static final Option QUERY_TYPE = + Options.key("query_type") + .stringType() + .defaultValue("Instant") + .withDescription("Prometheus expression query string"); + + public static final Option TIMEOUT = + Options.key("timeout") + .longType() + .noDefaultValue() + .withDescription("Evaluation timeout"); + + public static class RangeConfig { + + public static final Option START = + Options.key("start") + .stringType() + .noDefaultValue() + .withDescription("Start timestamp, inclusive."); + + public static final Option END = + Options.key("end") + .stringType() + .noDefaultValue() + .withDescription("End timestamp, inclusive."); + + public static final Option STEP = + Options.key("step") + .stringType() + .noDefaultValue() + .withDescription( + " Query resolution step width in duration format or float number of seconds."); + } + + public static class InstantQueryConfig { + public static final Option TIME = + Options.key("time") + .longType() + .noDefaultValue() + .withDescription("Evaluation timestamp,unix_timestamp"); + } +} diff --git a/seatunnel-connectors-v2/connector-prometheus/src/main/java/org/apache/seatunnel/connectors/seatunnel/prometheus/config/PrometheusSourceParameter.java b/seatunnel-connectors-v2/connector-prometheus/src/main/java/org/apache/seatunnel/connectors/seatunnel/prometheus/config/PrometheusSourceParameter.java new file mode 100644 index 00000000000..bec3eb2ea1d --- /dev/null +++ b/seatunnel-connectors-v2/connector-prometheus/src/main/java/org/apache/seatunnel/connectors/seatunnel/prometheus/config/PrometheusSourceParameter.java @@ -0,0 +1,103 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.seatunnel.prometheus.config; + +import org.apache.seatunnel.shade.com.typesafe.config.Config; + +import org.apache.seatunnel.common.exception.CommonErrorCode; +import org.apache.seatunnel.connectors.seatunnel.http.config.HttpParameter; +import org.apache.seatunnel.connectors.seatunnel.http.config.HttpRequestMethod; +import org.apache.seatunnel.connectors.seatunnel.prometheus.Exception.PrometheusConnectorException; + +import java.time.ZonedDateTime; +import java.time.format.DateTimeFormatter; +import java.time.format.DateTimeParseException; +import java.util.HashMap; + +import static org.apache.seatunnel.connectors.seatunnel.prometheus.config.PrometheusSourceConfig.INSTANT_QUERY_URL; +import static org.apache.seatunnel.connectors.seatunnel.prometheus.config.PrometheusSourceConfig.InstantQueryConfig.TIME; +import static org.apache.seatunnel.connectors.seatunnel.prometheus.config.PrometheusSourceConfig.QUERY; +import static org.apache.seatunnel.connectors.seatunnel.prometheus.config.PrometheusSourceConfig.QUERY_TYPE; +import static org.apache.seatunnel.connectors.seatunnel.prometheus.config.PrometheusSourceConfig.RANGE_QUERY; +import static org.apache.seatunnel.connectors.seatunnel.prometheus.config.PrometheusSourceConfig.RANGE_QUERY_URL; +import static org.apache.seatunnel.connectors.seatunnel.prometheus.config.PrometheusSourceConfig.RangeConfig.END; +import static org.apache.seatunnel.connectors.seatunnel.prometheus.config.PrometheusSourceConfig.RangeConfig.START; +import static org.apache.seatunnel.connectors.seatunnel.prometheus.config.PrometheusSourceConfig.RangeConfig.STEP; +import static org.apache.seatunnel.connectors.seatunnel.prometheus.config.PrometheusSourceConfig.TIMEOUT; + +public class PrometheusSourceParameter extends HttpParameter { + public static final String CURRENT_TIMESTAMP = "CURRENT_TIMESTAMP"; + + public void buildWithConfig(Config pluginConfig) { + super.buildWithConfig(pluginConfig); + + String query = pluginConfig.getString(QUERY.key()); + + String queryType = + pluginConfig.hasPath(QUERY_TYPE.key()) + ? pluginConfig.getString(QUERY_TYPE.key()) + : QUERY_TYPE.defaultValue(); + + this.params = this.getParams() == null ? new HashMap<>() : this.getParams(); + + params.put(PrometheusSourceConfig.QUERY.key(), query); + + this.setMethod(HttpRequestMethod.GET); + + if (pluginConfig.hasPath(TIMEOUT.key())) { + params.put(TIMEOUT.key(), pluginConfig.getString(TIMEOUT.key())); + } + + if (RANGE_QUERY.equals(queryType)) { + this.setUrl(this.getUrl() + RANGE_QUERY_URL); + params.put(START.key(), checkTimeParam(pluginConfig.getString(START.key()))); + params.put(END.key(), checkTimeParam(pluginConfig.getString(END.key()))); + params.put(STEP.key(), pluginConfig.getString(STEP.key())); + + } else { + this.setUrl(this.getUrl() + INSTANT_QUERY_URL); + if (pluginConfig.hasPath(TIME.key())) { + String time = pluginConfig.getString(TIME.key()); + params.put(TIME.key(), time); + } + } + this.setParams(params); + } + + private String checkTimeParam(String time) { + if (CURRENT_TIMESTAMP.equals(time)) { + ZonedDateTime now = ZonedDateTime.now(); + return now.format(DateTimeFormatter.ISO_OFFSET_DATE_TIME); + } + if (isValidISO8601(time)) { + return time; + } + throw new PrometheusConnectorException( + CommonErrorCode.UNSUPPORTED_DATA_TYPE, "unsupported time type"); + } + + private boolean isValidISO8601(String dateTimeString) { + try { + DateTimeFormatter formatter = DateTimeFormatter.ISO_INSTANT; + ZonedDateTime.parse(dateTimeString, formatter); + return true; + } catch (DateTimeParseException e) { + return false; + } + } +} diff --git a/seatunnel-connectors-v2/connector-prometheus/src/main/java/org/apache/seatunnel/connectors/seatunnel/prometheus/pojo/InstantPoint.java b/seatunnel-connectors-v2/connector-prometheus/src/main/java/org/apache/seatunnel/connectors/seatunnel/prometheus/pojo/InstantPoint.java new file mode 100644 index 00000000000..7e12562b355 --- /dev/null +++ b/seatunnel-connectors-v2/connector-prometheus/src/main/java/org/apache/seatunnel/connectors/seatunnel/prometheus/pojo/InstantPoint.java @@ -0,0 +1,29 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.seatunnel.connectors.seatunnel.prometheus.pojo; + +import lombok.Data; + +import java.util.List; +import java.util.Map; + +@Data +public class InstantPoint { + private Map metric; + + private List value; +} diff --git a/seatunnel-connectors-v2/connector-prometheus/src/main/java/org/apache/seatunnel/connectors/seatunnel/prometheus/pojo/RangePoint.java b/seatunnel-connectors-v2/connector-prometheus/src/main/java/org/apache/seatunnel/connectors/seatunnel/prometheus/pojo/RangePoint.java new file mode 100644 index 00000000000..a597fd0c98e --- /dev/null +++ b/seatunnel-connectors-v2/connector-prometheus/src/main/java/org/apache/seatunnel/connectors/seatunnel/prometheus/pojo/RangePoint.java @@ -0,0 +1,30 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.seatunnel.connectors.seatunnel.prometheus.pojo; + +import lombok.Data; + +import java.util.List; +import java.util.Map; + +@Data +public class RangePoint { + + private Map metric; + + private List values; +} diff --git a/seatunnel-connectors-v2/connector-prometheus/src/main/java/org/apache/seatunnel/connectors/seatunnel/prometheus/serialize/PrometheusSerializer.java b/seatunnel-connectors-v2/connector-prometheus/src/main/java/org/apache/seatunnel/connectors/seatunnel/prometheus/serialize/PrometheusSerializer.java new file mode 100644 index 00000000000..7ce0dfc8dd1 --- /dev/null +++ b/seatunnel-connectors-v2/connector-prometheus/src/main/java/org/apache/seatunnel/connectors/seatunnel/prometheus/serialize/PrometheusSerializer.java @@ -0,0 +1,148 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.seatunnel.connectors.seatunnel.prometheus.serialize; + +import org.apache.seatunnel.api.table.type.SeaTunnelDataType; +import org.apache.seatunnel.api.table.type.SeaTunnelRow; +import org.apache.seatunnel.api.table.type.SeaTunnelRowType; +import org.apache.seatunnel.common.exception.CommonErrorCodeDeprecated; +import org.apache.seatunnel.connectors.seatunnel.prometheus.Exception.PrometheusConnectorException; +import org.apache.seatunnel.connectors.seatunnel.prometheus.sink.Point; + +import com.google.common.base.Strings; +import lombok.NonNull; +import lombok.extern.slf4j.Slf4j; + +import java.time.LocalDateTime; +import java.time.ZoneId; +import java.util.HashMap; +import java.util.Map; +import java.util.function.Function; + +@Slf4j +public class PrometheusSerializer implements Serializer { + + private final Function timestampExtractor; + private final Function valueExtractor; + private final Function labelExtractor; + + public PrometheusSerializer( + @NonNull SeaTunnelRowType seaTunnelRowType, + String timestampKey, + String labelKey, + String valueKey) { + this.valueExtractor = createValueExtractor(seaTunnelRowType, valueKey); + this.timestampExtractor = createTimestampExtractor(seaTunnelRowType, timestampKey); + this.labelExtractor = createLabelExtractor(seaTunnelRowType, labelKey); + } + + @Override + public Point serialize(SeaTunnelRow seaTunnelRow) { + Long timestamp = timestampExtractor.apply(seaTunnelRow); + Double value = valueExtractor.apply(seaTunnelRow); + Map label = labelExtractor.apply(seaTunnelRow); + Point point = Point.builder().metric(label).value(value).timestamp(timestamp).build(); + + return point; + } + + private Function createLabelExtractor( + SeaTunnelRowType seaTunnelRowType, String labelKey) { + if (Strings.isNullOrEmpty(labelKey)) { + return row -> new HashMap(); + } + int labelFieldIndex = seaTunnelRowType.indexOf(labelKey); + return row -> { + Object value = row.getField(labelFieldIndex); + if (value == null) { + return new HashMap(); + } + SeaTunnelDataType valueFieldType = seaTunnelRowType.getFieldType(labelFieldIndex); + switch (valueFieldType.getSqlType()) { + case MAP: + return (Map) value; + default: + throw new PrometheusConnectorException( + CommonErrorCodeDeprecated.UNSUPPORTED_DATA_TYPE, + "Unsupported data type: " + valueFieldType); + } + }; + } + + private Function createValueExtractor( + SeaTunnelRowType seaTunnelRowType, String valueKey) { + if (Strings.isNullOrEmpty(valueKey)) { + return row -> Double.NaN; + } + + int valueFieldIndex = seaTunnelRowType.indexOf(valueKey); + return row -> { + Object value = row.getField(valueFieldIndex); + if (value == null) { + return Double.NaN; + } + SeaTunnelDataType valueFieldType = seaTunnelRowType.getFieldType(valueFieldIndex); + switch (valueFieldType.getSqlType()) { + case STRING: + case INT: + case FLOAT: + return Double.parseDouble((String) value); + case DOUBLE: + return (Double) value; + default: + throw new PrometheusConnectorException( + CommonErrorCodeDeprecated.UNSUPPORTED_DATA_TYPE, + "Unsupported data type: " + valueFieldType); + } + }; + } + + private Function createTimestampExtractor( + SeaTunnelRowType seaTunnelRowType, String timestampKey) { + if (Strings.isNullOrEmpty(timestampKey)) { + return row -> System.currentTimeMillis(); + } + + int timestampFieldIndex = seaTunnelRowType.indexOf(timestampKey); + return row -> { + Object timestamp = row.getField(timestampFieldIndex); + if (timestamp == null) { + return System.currentTimeMillis(); + } + SeaTunnelDataType timestampFieldType = + seaTunnelRowType.getFieldType(timestampFieldIndex); + switch (timestampFieldType.getSqlType()) { + case STRING: + return Long.parseLong((String) timestamp); + case TIMESTAMP: + return ((LocalDateTime) timestamp) + .atZone(ZoneId.systemDefault()) + .toInstant() + .toEpochMilli(); + case BIGINT: + return (Long) timestamp; + case DOUBLE: + double timestampDouble = (double) timestamp; + return (long) (timestampDouble * 1000); + default: + throw new PrometheusConnectorException( + CommonErrorCodeDeprecated.UNSUPPORTED_DATA_TYPE, + "Unsupported data type: " + timestampFieldType); + } + }; + } +} diff --git a/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/sink/batch/MilvusBatchWriter.java b/seatunnel-connectors-v2/connector-prometheus/src/main/java/org/apache/seatunnel/connectors/seatunnel/prometheus/serialize/Serializer.java similarity index 79% rename from seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/sink/batch/MilvusBatchWriter.java rename to seatunnel-connectors-v2/connector-prometheus/src/main/java/org/apache/seatunnel/connectors/seatunnel/prometheus/serialize/Serializer.java index 91e04342dc6..ad830f5baa0 100644 --- a/seatunnel-connectors-v2/connector-milvus/src/main/java/org/apache/seatunnel/connectors/seatunnel/milvus/sink/batch/MilvusBatchWriter.java +++ b/seatunnel-connectors-v2/connector-prometheus/src/main/java/org/apache/seatunnel/connectors/seatunnel/prometheus/serialize/Serializer.java @@ -15,17 +15,11 @@ * limitations under the License. */ -package org.apache.seatunnel.connectors.seatunnel.milvus.sink.batch; +package org.apache.seatunnel.connectors.seatunnel.prometheus.serialize; import org.apache.seatunnel.api.table.type.SeaTunnelRow; +import org.apache.seatunnel.connectors.seatunnel.prometheus.sink.Point; -public interface MilvusBatchWriter { - - void addToBatch(SeaTunnelRow element); - - boolean needFlush(); - - boolean flush(); - - void close(); +public interface Serializer { + Point serialize(SeaTunnelRow seaTunnelRow); } diff --git a/seatunnel-connectors-v2/connector-prometheus/src/main/java/org/apache/seatunnel/connectors/seatunnel/prometheus/sink/Point.java b/seatunnel-connectors-v2/connector-prometheus/src/main/java/org/apache/seatunnel/connectors/seatunnel/prometheus/sink/Point.java new file mode 100644 index 00000000000..fb78bff56fa --- /dev/null +++ b/seatunnel-connectors-v2/connector-prometheus/src/main/java/org/apache/seatunnel/connectors/seatunnel/prometheus/sink/Point.java @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.seatunnel.connectors.seatunnel.prometheus.sink; + +import lombok.Builder; +import lombok.Data; + +import java.util.Map; + +@Data +@Builder +public class Point { + + private Map metric; + + private Double value; + + private Long timestamp; +} diff --git a/seatunnel-connectors-v2/connector-prometheus/src/main/java/org/apache/seatunnel/connectors/seatunnel/prometheus/sink/PrometheusSink.java b/seatunnel-connectors-v2/connector-prometheus/src/main/java/org/apache/seatunnel/connectors/seatunnel/prometheus/sink/PrometheusSink.java new file mode 100644 index 00000000000..35ec257fc93 --- /dev/null +++ b/seatunnel-connectors-v2/connector-prometheus/src/main/java/org/apache/seatunnel/connectors/seatunnel/prometheus/sink/PrometheusSink.java @@ -0,0 +1,79 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.seatunnel.connectors.seatunnel.prometheus.sink; + +import org.apache.seatunnel.api.configuration.ReadonlyConfig; +import org.apache.seatunnel.api.sink.SinkWriter; +import org.apache.seatunnel.api.sink.SupportMultiTableSink; +import org.apache.seatunnel.api.table.catalog.CatalogTable; +import org.apache.seatunnel.api.table.type.SeaTunnelRow; +import org.apache.seatunnel.connectors.seatunnel.common.sink.AbstractSimpleSink; +import org.apache.seatunnel.connectors.seatunnel.http.config.HttpConfig; +import org.apache.seatunnel.connectors.seatunnel.http.config.HttpParameter; + +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; + +public class PrometheusSink extends AbstractSimpleSink + implements SupportMultiTableSink { + + protected final HttpParameter httpParameter = new HttpParameter(); + protected CatalogTable catalogTable; + protected ReadonlyConfig pluginConfig; + + public PrometheusSink(ReadonlyConfig pluginConfig, CatalogTable catalogTable) { + this.pluginConfig = pluginConfig; + httpParameter.setUrl(pluginConfig.get(HttpConfig.URL)); + if (pluginConfig.getOptional(HttpConfig.HEADERS).isPresent()) { + httpParameter.setHeaders(pluginConfig.get(HttpConfig.HEADERS)); + } + if (pluginConfig.getOptional(HttpConfig.PARAMS).isPresent()) { + httpParameter.setHeaders(pluginConfig.get(HttpConfig.PARAMS)); + } + this.catalogTable = catalogTable; + + if (Objects.isNull(httpParameter.getHeaders())) { + Map headers = new HashMap<>(); + headers.put("Content-type", "application/x-protobuf"); + headers.put("Content-Encoding", "snappy"); + headers.put("X-Prometheus-Remote-Write-Version", "0.1.0"); + httpParameter.setHeaders(headers); + } else { + httpParameter.getHeaders().put("Content-type", "application/x-protobuf"); + httpParameter.getHeaders().put("Content-Encoding", "snappy"); + httpParameter.getHeaders().put("X-Prometheus-Remote-Write-Version", "0.1.0"); + } + } + + @Override + public String getPluginName() { + return "Prometheus"; + } + + @Override + public PrometheusWriter createWriter(SinkWriter.Context context) { + return new PrometheusWriter( + catalogTable.getSeaTunnelRowType(), httpParameter, pluginConfig); + } + + @Override + public Optional getWriteCatalogTable() { + return Optional.ofNullable(catalogTable); + } +} diff --git a/seatunnel-connectors-v2/connector-prometheus/src/main/java/org/apache/seatunnel/connectors/seatunnel/prometheus/sink/PrometheusSinkFactory.java b/seatunnel-connectors-v2/connector-prometheus/src/main/java/org/apache/seatunnel/connectors/seatunnel/prometheus/sink/PrometheusSinkFactory.java new file mode 100644 index 00000000000..544f17c9a6f --- /dev/null +++ b/seatunnel-connectors-v2/connector-prometheus/src/main/java/org/apache/seatunnel/connectors/seatunnel/prometheus/sink/PrometheusSinkFactory.java @@ -0,0 +1,61 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.seatunnel.connectors.seatunnel.prometheus.sink; + +import org.apache.seatunnel.api.configuration.ReadonlyConfig; +import org.apache.seatunnel.api.configuration.util.OptionRule; +import org.apache.seatunnel.api.sink.SinkCommonOptions; +import org.apache.seatunnel.api.table.catalog.CatalogTable; +import org.apache.seatunnel.api.table.connector.TableSink; +import org.apache.seatunnel.api.table.factory.Factory; +import org.apache.seatunnel.api.table.factory.TableSinkFactoryContext; +import org.apache.seatunnel.connectors.seatunnel.http.sink.HttpSinkFactory; +import org.apache.seatunnel.connectors.seatunnel.prometheus.config.PrometheusSinkConfig; + +import com.google.auto.service.AutoService; + +@AutoService(Factory.class) +public class PrometheusSinkFactory extends HttpSinkFactory { + @Override + public String factoryIdentifier() { + return "Prometheus"; + } + + public TableSink createSink(TableSinkFactoryContext context) { + + ReadonlyConfig readonlyConfig = context.getOptions(); + CatalogTable catalogTable = context.getCatalogTable(); + return () -> new PrometheusSink(readonlyConfig, catalogTable); + } + + @Override + public OptionRule optionRule() { + return OptionRule.builder() + .required(PrometheusSinkConfig.URL) + .required(PrometheusSinkConfig.KEY_LABEL) + .required(PrometheusSinkConfig.KEY_VALUE) + .optional(PrometheusSinkConfig.KEY_TIMESTAMP) + .optional(PrometheusSinkConfig.HEADERS) + .optional(PrometheusSinkConfig.RETRY) + .optional(PrometheusSinkConfig.RETRY_BACKOFF_MULTIPLIER_MS) + .optional(PrometheusSinkConfig.RETRY_BACKOFF_MAX_MS) + .optional(PrometheusSinkConfig.BATCH_SIZE) + .optional(PrometheusSinkConfig.FLUSH_INTERVAL) + .optional(SinkCommonOptions.MULTI_TABLE_SINK_REPLICA) + .build(); + } +} diff --git a/seatunnel-connectors-v2/connector-prometheus/src/main/java/org/apache/seatunnel/connectors/seatunnel/prometheus/sink/PrometheusWriter.java b/seatunnel-connectors-v2/connector-prometheus/src/main/java/org/apache/seatunnel/connectors/seatunnel/prometheus/sink/PrometheusWriter.java new file mode 100644 index 00000000000..307abb8eed6 --- /dev/null +++ b/seatunnel-connectors-v2/connector-prometheus/src/main/java/org/apache/seatunnel/connectors/seatunnel/prometheus/sink/PrometheusWriter.java @@ -0,0 +1,214 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.seatunnel.connectors.seatunnel.prometheus.sink; + +import org.apache.seatunnel.api.configuration.ReadonlyConfig; +import org.apache.seatunnel.api.table.type.SeaTunnelRow; +import org.apache.seatunnel.api.table.type.SeaTunnelRowType; +import org.apache.seatunnel.common.exception.CommonErrorCodeDeprecated; +import org.apache.seatunnel.connectors.seatunnel.http.client.HttpClientProvider; +import org.apache.seatunnel.connectors.seatunnel.http.client.HttpResponse; +import org.apache.seatunnel.connectors.seatunnel.http.config.HttpParameter; +import org.apache.seatunnel.connectors.seatunnel.http.sink.HttpSinkWriter; +import org.apache.seatunnel.connectors.seatunnel.prometheus.Exception.PrometheusConnectorException; +import org.apache.seatunnel.connectors.seatunnel.prometheus.config.PrometheusSinkConfig; +import org.apache.seatunnel.connectors.seatunnel.prometheus.serialize.PrometheusSerializer; +import org.apache.seatunnel.connectors.seatunnel.prometheus.serialize.Serializer; +import org.apache.seatunnel.connectors.seatunnel.prometheus.sink.proto.Remote; +import org.apache.seatunnel.connectors.seatunnel.prometheus.sink.proto.Types; + +import org.apache.http.HttpStatus; +import org.apache.http.entity.ByteArrayEntity; + +import org.xerial.snappy.Snappy; + +import lombok.extern.slf4j.Slf4j; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; + +@Slf4j +public class PrometheusWriter extends HttpSinkWriter { + private final List batchList; + private volatile Exception flushException; + private final Integer batchSize; + private final long flushInterval; + private PrometheusSinkConfig sinkConfig; + private final Serializer serializer; + protected final HttpClientProvider httpClient; + private ScheduledExecutorService executor; + private ScheduledFuture scheduledFuture; + + public PrometheusWriter( + SeaTunnelRowType seaTunnelRowType, + HttpParameter httpParameter, + ReadonlyConfig pluginConfig) { + + super(seaTunnelRowType, httpParameter); + this.batchList = new ArrayList<>(); + this.sinkConfig = PrometheusSinkConfig.loadConfig(pluginConfig); + this.batchSize = sinkConfig.getBatchSize(); + this.flushInterval = sinkConfig.getFlushInterval(); + this.serializer = + new PrometheusSerializer( + seaTunnelRowType, + sinkConfig.getKeyTimestamp(), + sinkConfig.getKeyLabel(), + sinkConfig.getKeyValue()); + this.httpClient = new HttpClientProvider(httpParameter); + if (flushInterval > 0) { + log.info("start schedule submit message,interval:{}", flushInterval); + this.executor = + Executors.newScheduledThreadPool( + 1, + runnable -> { + Thread thread = new Thread(runnable); + thread.setDaemon(true); + thread.setName("Prometheus-Metric-Sender"); + return thread; + }); + this.scheduledFuture = + executor.scheduleAtFixedRate( + this::flushSchedule, + flushInterval, + flushInterval, + TimeUnit.MILLISECONDS); + } + } + + @Override + public void write(SeaTunnelRow element) { + Point record = serializer.serialize(element); + this.write(record); + } + + public void write(Point record) { + checkFlushException(); + + synchronized (batchList) { + batchList.add(record); + if (batchSize > 0 && batchList.size() >= batchSize) { + flush(); + } + } + } + + private void flushSchedule() { + synchronized (batchList) { + if (!batchList.isEmpty()) { + flush(); + } + } + } + + private void checkFlushException() { + if (flushException != null) { + throw new PrometheusConnectorException( + CommonErrorCodeDeprecated.FLUSH_DATA_FAILED, + "Writing records to prometheus failed.", + flushException); + } + } + + private void flush() { + checkFlushException(); + if (batchList.isEmpty()) { + return; + } + try { + byte[] body = snappy(batchList); + ByteArrayEntity byteArrayEntity = new ByteArrayEntity(body); + HttpResponse response = + httpClient.doPost( + httpParameter.getUrl(), httpParameter.getHeaders(), byteArrayEntity); + if (HttpStatus.SC_NO_CONTENT == response.getCode()) { + return; + } + log.error( + "http client execute exception, http response status code:[{}], content:[{}]", + response.getCode(), + response.getContent()); + } catch (Exception e) { + log.error(e.getMessage(), e); + } finally { + batchList.clear(); + } + } + + /** + * snappy data + * + * @param points list of series data + * @return byte data + * @throws IOException IOException + */ + private byte[] snappy(List points) throws IOException { + Remote.WriteRequest writeRequest = createRemoteWriteRequest(points); + byte[] serializedData = writeRequest.toByteArray(); + byte[] compressedData = Snappy.compress(serializedData); + return compressedData; + } + + /** + * create Remote Write Request + * + * @param points list of series data + * @return Remote.WriteRequest + */ + private Remote.WriteRequest createRemoteWriteRequest(List points) { + Remote.WriteRequest.Builder writeRequestBuilder = Remote.WriteRequest.newBuilder(); + for (Point point : points) { + List labels = new ArrayList<>(); + Types.TimeSeries.Builder timeSeriesBuilder = Types.TimeSeries.newBuilder(); + for (Map.Entry entry : point.getMetric().entrySet()) { + Types.Label label = + Types.Label.newBuilder() + .setName(entry.getKey()) + .setValue(entry.getValue()) + .build(); + labels.add(label); + } + Types.Sample sample = + Types.Sample.newBuilder() + .setTimestamp(point.getTimestamp()) + .setValue(point.getValue()) + .build(); + timeSeriesBuilder.addAllLabels(labels); + timeSeriesBuilder.addSamples(sample); + writeRequestBuilder.addTimeseries(timeSeriesBuilder); + } + return writeRequestBuilder.build(); + } + + @Override + public void close() throws IOException { + super.close(); + if (scheduledFuture != null) { + scheduledFuture.cancel(false); + if (executor != null) { + executor.shutdownNow(); + } + } + this.flush(); + } +} diff --git a/seatunnel-connectors-v2/connector-prometheus/src/main/java/org/apache/seatunnel/connectors/seatunnel/prometheus/sink/proto/GoGoProtos.java b/seatunnel-connectors-v2/connector-prometheus/src/main/java/org/apache/seatunnel/connectors/seatunnel/prometheus/sink/proto/GoGoProtos.java new file mode 100644 index 00000000000..2ebbc9d97f1 --- /dev/null +++ b/seatunnel-connectors-v2/connector-prometheus/src/main/java/org/apache/seatunnel/connectors/seatunnel/prometheus/sink/proto/GoGoProtos.java @@ -0,0 +1,919 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.seatunnel.connectors.seatunnel.prometheus.sink.proto; + +public final class GoGoProtos { + private GoGoProtos() {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) { + registry.add(GoGoProtos.goprotoEnumPrefix); + registry.add(GoGoProtos.goprotoEnumStringer); + registry.add(GoGoProtos.enumStringer); + registry.add(GoGoProtos.enumCustomname); + registry.add(GoGoProtos.enumdecl); + registry.add(GoGoProtos.enumvalueCustomname); + registry.add(GoGoProtos.goprotoGettersAll); + registry.add(GoGoProtos.goprotoEnumPrefixAll); + registry.add(GoGoProtos.goprotoStringerAll); + registry.add(GoGoProtos.verboseEqualAll); + registry.add(GoGoProtos.faceAll); + registry.add(GoGoProtos.gostringAll); + registry.add(GoGoProtos.populateAll); + registry.add(GoGoProtos.stringerAll); + registry.add(GoGoProtos.onlyoneAll); + registry.add(GoGoProtos.equalAll); + registry.add(GoGoProtos.descriptionAll); + registry.add(GoGoProtos.testgenAll); + registry.add(GoGoProtos.benchgenAll); + registry.add(GoGoProtos.marshalerAll); + registry.add(GoGoProtos.unmarshalerAll); + registry.add(GoGoProtos.stableMarshalerAll); + registry.add(GoGoProtos.sizerAll); + registry.add(GoGoProtos.goprotoEnumStringerAll); + registry.add(GoGoProtos.enumStringerAll); + registry.add(GoGoProtos.unsafeMarshalerAll); + registry.add(GoGoProtos.unsafeUnmarshalerAll); + registry.add(GoGoProtos.goprotoExtensionsMapAll); + registry.add(GoGoProtos.goprotoUnrecognizedAll); + registry.add(GoGoProtos.gogoprotoImport); + registry.add(GoGoProtos.protosizerAll); + registry.add(GoGoProtos.compareAll); + registry.add(GoGoProtos.typedeclAll); + registry.add(GoGoProtos.enumdeclAll); + registry.add(GoGoProtos.goprotoRegistration); + registry.add(GoGoProtos.messagenameAll); + registry.add(GoGoProtos.goprotoSizecacheAll); + registry.add(GoGoProtos.goprotoUnkeyedAll); + registry.add(GoGoProtos.goprotoGetters); + registry.add(GoGoProtos.goprotoStringer); + registry.add(GoGoProtos.verboseEqual); + registry.add(GoGoProtos.face); + registry.add(GoGoProtos.gostring); + registry.add(GoGoProtos.populate); + registry.add(GoGoProtos.stringer); + registry.add(GoGoProtos.onlyone); + registry.add(GoGoProtos.equal); + registry.add(GoGoProtos.description); + registry.add(GoGoProtos.testgen); + registry.add(GoGoProtos.benchgen); + registry.add(GoGoProtos.marshaler); + registry.add(GoGoProtos.unmarshaler); + registry.add(GoGoProtos.stableMarshaler); + registry.add(GoGoProtos.sizer); + registry.add(GoGoProtos.unsafeMarshaler); + registry.add(GoGoProtos.unsafeUnmarshaler); + registry.add(GoGoProtos.goprotoExtensionsMap); + registry.add(GoGoProtos.goprotoUnrecognized); + registry.add(GoGoProtos.protosizer); + registry.add(GoGoProtos.compare); + registry.add(GoGoProtos.typedecl); + registry.add(GoGoProtos.messagename); + registry.add(GoGoProtos.goprotoSizecache); + registry.add(GoGoProtos.goprotoUnkeyed); + registry.add(GoGoProtos.nullable); + registry.add(GoGoProtos.embed); + registry.add(GoGoProtos.customtype); + registry.add(GoGoProtos.customname); + registry.add(GoGoProtos.jsontag); + registry.add(GoGoProtos.moretags); + registry.add(GoGoProtos.casttype); + registry.add(GoGoProtos.castkey); + registry.add(GoGoProtos.castvalue); + registry.add(GoGoProtos.stdtime); + registry.add(GoGoProtos.stdduration); + registry.add(GoGoProtos.wktpointer); + } + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + public static final int GOPROTO_ENUM_PREFIX_FIELD_NUMBER = 62001; + /** extend .google.protobuf.EnumOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.EnumOptions, Boolean> + goprotoEnumPrefix = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int GOPROTO_ENUM_STRINGER_FIELD_NUMBER = 62021; + /** extend .google.protobuf.EnumOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.EnumOptions, Boolean> + goprotoEnumStringer = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int ENUM_STRINGER_FIELD_NUMBER = 62022; + /** extend .google.protobuf.EnumOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.EnumOptions, Boolean> + enumStringer = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int ENUM_CUSTOMNAME_FIELD_NUMBER = 62023; + /** extend .google.protobuf.EnumOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.EnumOptions, String> + enumCustomname = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + String.class, null); + + public static final int ENUMDECL_FIELD_NUMBER = 62024; + /** extend .google.protobuf.EnumOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.EnumOptions, Boolean> + enumdecl = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int ENUMVALUE_CUSTOMNAME_FIELD_NUMBER = 66001; + /** extend .google.protobuf.EnumValueOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.EnumValueOptions, String> + enumvalueCustomname = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + String.class, null); + + public static final int GOPROTO_GETTERS_ALL_FIELD_NUMBER = 63001; + /** extend .google.protobuf.FileOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.FileOptions, Boolean> + goprotoGettersAll = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int GOPROTO_ENUM_PREFIX_ALL_FIELD_NUMBER = 63002; + /** extend .google.protobuf.FileOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.FileOptions, Boolean> + goprotoEnumPrefixAll = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int GOPROTO_STRINGER_ALL_FIELD_NUMBER = 63003; + /** extend .google.protobuf.FileOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.FileOptions, Boolean> + goprotoStringerAll = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int VERBOSE_EQUAL_ALL_FIELD_NUMBER = 63004; + /** extend .google.protobuf.FileOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.FileOptions, Boolean> + verboseEqualAll = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int FACE_ALL_FIELD_NUMBER = 63005; + /** extend .google.protobuf.FileOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.FileOptions, Boolean> + faceAll = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int GOSTRING_ALL_FIELD_NUMBER = 63006; + /** extend .google.protobuf.FileOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.FileOptions, Boolean> + gostringAll = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int POPULATE_ALL_FIELD_NUMBER = 63007; + /** extend .google.protobuf.FileOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.FileOptions, Boolean> + populateAll = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int STRINGER_ALL_FIELD_NUMBER = 63008; + /** extend .google.protobuf.FileOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.FileOptions, Boolean> + stringerAll = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int ONLYONE_ALL_FIELD_NUMBER = 63009; + /** extend .google.protobuf.FileOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.FileOptions, Boolean> + onlyoneAll = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int EQUAL_ALL_FIELD_NUMBER = 63013; + /** extend .google.protobuf.FileOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.FileOptions, Boolean> + equalAll = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int DESCRIPTION_ALL_FIELD_NUMBER = 63014; + /** extend .google.protobuf.FileOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.FileOptions, Boolean> + descriptionAll = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int TESTGEN_ALL_FIELD_NUMBER = 63015; + /** extend .google.protobuf.FileOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.FileOptions, Boolean> + testgenAll = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int BENCHGEN_ALL_FIELD_NUMBER = 63016; + /** extend .google.protobuf.FileOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.FileOptions, Boolean> + benchgenAll = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int MARSHALER_ALL_FIELD_NUMBER = 63017; + /** extend .google.protobuf.FileOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.FileOptions, Boolean> + marshalerAll = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int UNMARSHALER_ALL_FIELD_NUMBER = 63018; + /** extend .google.protobuf.FileOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.FileOptions, Boolean> + unmarshalerAll = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int STABLE_MARSHALER_ALL_FIELD_NUMBER = 63019; + /** extend .google.protobuf.FileOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.FileOptions, Boolean> + stableMarshalerAll = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int SIZER_ALL_FIELD_NUMBER = 63020; + /** extend .google.protobuf.FileOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.FileOptions, Boolean> + sizerAll = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int GOPROTO_ENUM_STRINGER_ALL_FIELD_NUMBER = 63021; + /** extend .google.protobuf.FileOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.FileOptions, Boolean> + goprotoEnumStringerAll = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int ENUM_STRINGER_ALL_FIELD_NUMBER = 63022; + /** extend .google.protobuf.FileOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.FileOptions, Boolean> + enumStringerAll = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int UNSAFE_MARSHALER_ALL_FIELD_NUMBER = 63023; + /** extend .google.protobuf.FileOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.FileOptions, Boolean> + unsafeMarshalerAll = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int UNSAFE_UNMARSHALER_ALL_FIELD_NUMBER = 63024; + /** extend .google.protobuf.FileOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.FileOptions, Boolean> + unsafeUnmarshalerAll = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int GOPROTO_EXTENSIONS_MAP_ALL_FIELD_NUMBER = 63025; + /** extend .google.protobuf.FileOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.FileOptions, Boolean> + goprotoExtensionsMapAll = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int GOPROTO_UNRECOGNIZED_ALL_FIELD_NUMBER = 63026; + /** extend .google.protobuf.FileOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.FileOptions, Boolean> + goprotoUnrecognizedAll = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int GOGOPROTO_IMPORT_FIELD_NUMBER = 63027; + /** extend .google.protobuf.FileOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.FileOptions, Boolean> + gogoprotoImport = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int PROTOSIZER_ALL_FIELD_NUMBER = 63028; + /** extend .google.protobuf.FileOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.FileOptions, Boolean> + protosizerAll = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int COMPARE_ALL_FIELD_NUMBER = 63029; + /** extend .google.protobuf.FileOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.FileOptions, Boolean> + compareAll = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int TYPEDECL_ALL_FIELD_NUMBER = 63030; + /** extend .google.protobuf.FileOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.FileOptions, Boolean> + typedeclAll = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int ENUMDECL_ALL_FIELD_NUMBER = 63031; + /** extend .google.protobuf.FileOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.FileOptions, Boolean> + enumdeclAll = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int GOPROTO_REGISTRATION_FIELD_NUMBER = 63032; + /** extend .google.protobuf.FileOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.FileOptions, Boolean> + goprotoRegistration = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int MESSAGENAME_ALL_FIELD_NUMBER = 63033; + /** extend .google.protobuf.FileOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.FileOptions, Boolean> + messagenameAll = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int GOPROTO_SIZECACHE_ALL_FIELD_NUMBER = 63034; + /** extend .google.protobuf.FileOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.FileOptions, Boolean> + goprotoSizecacheAll = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int GOPROTO_UNKEYED_ALL_FIELD_NUMBER = 63035; + /** extend .google.protobuf.FileOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.FileOptions, Boolean> + goprotoUnkeyedAll = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int GOPROTO_GETTERS_FIELD_NUMBER = 64001; + /** extend .google.protobuf.MessageOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.MessageOptions, Boolean> + goprotoGetters = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int GOPROTO_STRINGER_FIELD_NUMBER = 64003; + /** extend .google.protobuf.MessageOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.MessageOptions, Boolean> + goprotoStringer = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int VERBOSE_EQUAL_FIELD_NUMBER = 64004; + /** extend .google.protobuf.MessageOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.MessageOptions, Boolean> + verboseEqual = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int FACE_FIELD_NUMBER = 64005; + /** extend .google.protobuf.MessageOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.MessageOptions, Boolean> + face = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int GOSTRING_FIELD_NUMBER = 64006; + /** extend .google.protobuf.MessageOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.MessageOptions, Boolean> + gostring = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int POPULATE_FIELD_NUMBER = 64007; + /** extend .google.protobuf.MessageOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.MessageOptions, Boolean> + populate = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int STRINGER_FIELD_NUMBER = 67008; + /** extend .google.protobuf.MessageOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.MessageOptions, Boolean> + stringer = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int ONLYONE_FIELD_NUMBER = 64009; + /** extend .google.protobuf.MessageOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.MessageOptions, Boolean> + onlyone = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int EQUAL_FIELD_NUMBER = 64013; + /** extend .google.protobuf.MessageOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.MessageOptions, Boolean> + equal = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int DESCRIPTION_FIELD_NUMBER = 64014; + /** extend .google.protobuf.MessageOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.MessageOptions, Boolean> + description = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int TESTGEN_FIELD_NUMBER = 64015; + /** extend .google.protobuf.MessageOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.MessageOptions, Boolean> + testgen = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int BENCHGEN_FIELD_NUMBER = 64016; + /** extend .google.protobuf.MessageOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.MessageOptions, Boolean> + benchgen = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int MARSHALER_FIELD_NUMBER = 64017; + /** extend .google.protobuf.MessageOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.MessageOptions, Boolean> + marshaler = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int UNMARSHALER_FIELD_NUMBER = 64018; + /** extend .google.protobuf.MessageOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.MessageOptions, Boolean> + unmarshaler = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int STABLE_MARSHALER_FIELD_NUMBER = 64019; + /** extend .google.protobuf.MessageOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.MessageOptions, Boolean> + stableMarshaler = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int SIZER_FIELD_NUMBER = 64020; + /** extend .google.protobuf.MessageOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.MessageOptions, Boolean> + sizer = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int UNSAFE_MARSHALER_FIELD_NUMBER = 64023; + /** extend .google.protobuf.MessageOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.MessageOptions, Boolean> + unsafeMarshaler = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int UNSAFE_UNMARSHALER_FIELD_NUMBER = 64024; + /** extend .google.protobuf.MessageOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.MessageOptions, Boolean> + unsafeUnmarshaler = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int GOPROTO_EXTENSIONS_MAP_FIELD_NUMBER = 64025; + /** extend .google.protobuf.MessageOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.MessageOptions, Boolean> + goprotoExtensionsMap = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int GOPROTO_UNRECOGNIZED_FIELD_NUMBER = 64026; + /** extend .google.protobuf.MessageOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.MessageOptions, Boolean> + goprotoUnrecognized = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int PROTOSIZER_FIELD_NUMBER = 64028; + /** extend .google.protobuf.MessageOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.MessageOptions, Boolean> + protosizer = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int COMPARE_FIELD_NUMBER = 64029; + /** extend .google.protobuf.MessageOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.MessageOptions, Boolean> + compare = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int TYPEDECL_FIELD_NUMBER = 64030; + /** extend .google.protobuf.MessageOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.MessageOptions, Boolean> + typedecl = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int MESSAGENAME_FIELD_NUMBER = 64033; + /** extend .google.protobuf.MessageOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.MessageOptions, Boolean> + messagename = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int GOPROTO_SIZECACHE_FIELD_NUMBER = 64034; + /** extend .google.protobuf.MessageOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.MessageOptions, Boolean> + goprotoSizecache = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int GOPROTO_UNKEYED_FIELD_NUMBER = 64035; + /** extend .google.protobuf.MessageOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.MessageOptions, Boolean> + goprotoUnkeyed = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int NULLABLE_FIELD_NUMBER = 65001; + /** extend .google.protobuf.FieldOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.FieldOptions, Boolean> + nullable = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int EMBED_FIELD_NUMBER = 65002; + /** extend .google.protobuf.FieldOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.FieldOptions, Boolean> + embed = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int CUSTOMTYPE_FIELD_NUMBER = 65003; + /** extend .google.protobuf.FieldOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.FieldOptions, String> + customtype = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + String.class, null); + + public static final int CUSTOMNAME_FIELD_NUMBER = 65004; + /** extend .google.protobuf.FieldOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.FieldOptions, String> + customname = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + String.class, null); + + public static final int JSONTAG_FIELD_NUMBER = 65005; + /** extend .google.protobuf.FieldOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.FieldOptions, String> + jsontag = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + String.class, null); + + public static final int MORETAGS_FIELD_NUMBER = 65006; + /** extend .google.protobuf.FieldOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.FieldOptions, String> + moretags = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + String.class, null); + + public static final int CASTTYPE_FIELD_NUMBER = 65007; + /** extend .google.protobuf.FieldOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.FieldOptions, String> + casttype = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + String.class, null); + + public static final int CASTKEY_FIELD_NUMBER = 65008; + /** extend .google.protobuf.FieldOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.FieldOptions, String> + castkey = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + String.class, null); + + public static final int CASTVALUE_FIELD_NUMBER = 65009; + /** extend .google.protobuf.FieldOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.FieldOptions, String> + castvalue = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + String.class, null); + + public static final int STDTIME_FIELD_NUMBER = 65010; + /** extend .google.protobuf.FieldOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.FieldOptions, Boolean> + stdtime = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int STDDURATION_FIELD_NUMBER = 65011; + /** extend .google.protobuf.FieldOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.FieldOptions, Boolean> + stdduration = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static final int WKTPOINTER_FIELD_NUMBER = 65012; + /** extend .google.protobuf.FieldOptions { ... } */ + public static final com.google.protobuf.GeneratedMessage.GeneratedExtension< + com.google.protobuf.DescriptorProtos.FieldOptions, Boolean> + wktpointer = + com.google.protobuf.GeneratedMessage.newFileScopedGeneratedExtension( + Boolean.class, null); + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + String[] descriptorData = { + "\n\ngogo.proto\022\tgogoproto\032 google/protobuf" + + "/descriptor.proto:;\n\023goproto_enum_prefix" + + "\022\034.google.protobuf.EnumOptions\030\261\344\003 \001(\010:=" + + "\n\025goproto_enum_stringer\022\034.google.protobu" + + "f.EnumOptions\030\305\344\003 \001(\010:5\n\renum_stringer\022\034" + + ".google.protobuf.EnumOptions\030\306\344\003 \001(\010:7\n\017" + + "enum_customname\022\034.google.protobuf.EnumOp" + + "tions\030\307\344\003 \001(\t:0\n\010enumdecl\022\034.google.proto" + + "buf.EnumOptions\030\310\344\003 \001(\010:A\n\024enumvalue_cus" + + "tomname\022!.google.protobuf.EnumValueOptio" + + "ns\030\321\203\004 \001(\t:;\n\023goproto_getters_all\022\034.goog" + + "le.protobuf.FileOptions\030\231\354\003 \001(\010:?\n\027gopro" + + "to_enum_prefix_all\022\034.google.protobuf.Fil" + + "eOptions\030\232\354\003 \001(\010:<\n\024goproto_stringer_all" + + "\022\034.google.protobuf.FileOptions\030\233\354\003 \001(\010:9" + + "\n\021verbose_equal_all\022\034.google.protobuf.Fi" + + "leOptions\030\234\354\003 \001(\010:0\n\010face_all\022\034.google.p" + + "rotobuf.FileOptions\030\235\354\003 \001(\010:4\n\014gostring_" + + "all\022\034.google.protobuf.FileOptions\030\236\354\003 \001(" + + "\010:4\n\014populate_all\022\034.google.protobuf.File" + + "Options\030\237\354\003 \001(\010:4\n\014stringer_all\022\034.google" + + ".protobuf.FileOptions\030\240\354\003 \001(\010:3\n\013onlyone" + + "_all\022\034.google.protobuf.FileOptions\030\241\354\003 \001" + + "(\010:1\n\tequal_all\022\034.google.protobuf.FileOp" + + "tions\030\245\354\003 \001(\010:7\n\017description_all\022\034.googl" + + "e.protobuf.FileOptions\030\246\354\003 \001(\010:3\n\013testge" + + "n_all\022\034.google.protobuf.FileOptions\030\247\354\003 " + + "\001(\010:4\n\014benchgen_all\022\034.google.protobuf.Fi" + + "leOptions\030\250\354\003 \001(\010:5\n\rmarshaler_all\022\034.goo" + + "gle.protobuf.FileOptions\030\251\354\003 \001(\010:7\n\017unma" + + "rshaler_all\022\034.google.protobuf.FileOption" + + "s\030\252\354\003 \001(\010:<\n\024stable_marshaler_all\022\034.goog" + + "le.protobuf.FileOptions\030\253\354\003 \001(\010:1\n\tsizer" + + "_all\022\034.google.protobuf.FileOptions\030\254\354\003 \001" + + "(\010:A\n\031goproto_enum_stringer_all\022\034.google" + + ".protobuf.FileOptions\030\255\354\003 \001(\010:9\n\021enum_st" + + "ringer_all\022\034.google.protobuf.FileOptions" + + "\030\256\354\003 \001(\010:<\n\024unsafe_marshaler_all\022\034.googl" + + "e.protobuf.FileOptions\030\257\354\003 \001(\010:>\n\026unsafe" + + "_unmarshaler_all\022\034.google.protobuf.FileO" + + "ptions\030\260\354\003 \001(\010:B\n\032goproto_extensions_map" + + "_all\022\034.google.protobuf.FileOptions\030\261\354\003 \001" + + "(\010:@\n\030goproto_unrecognized_all\022\034.google." + + "protobuf.FileOptions\030\262\354\003 \001(\010:8\n\020gogoprot" + + "o_import\022\034.google.protobuf.FileOptions\030\263" + + "\354\003 \001(\010:6\n\016protosizer_all\022\034.google.protob" + + "uf.FileOptions\030\264\354\003 \001(\010:3\n\013compare_all\022\034." + + "google.protobuf.FileOptions\030\265\354\003 \001(\010:4\n\014t" + + "ypedecl_all\022\034.google.protobuf.FileOption" + + "s\030\266\354\003 \001(\010:4\n\014enumdecl_all\022\034.google.proto" + + "buf.FileOptions\030\267\354\003 \001(\010:<\n\024goproto_regis" + + "tration\022\034.google.protobuf.FileOptions\030\270\354" + + "\003 \001(\010:7\n\017messagename_all\022\034.google.protob" + + "uf.FileOptions\030\271\354\003 \001(\010:=\n\025goproto_sizeca" + + "che_all\022\034.google.protobuf.FileOptions\030\272\354" + + "\003 \001(\010:;\n\023goproto_unkeyed_all\022\034.google.pr" + + "otobuf.FileOptions\030\273\354\003 \001(\010::\n\017goproto_ge" + + "tters\022\037.google.protobuf.MessageOptions\030\201" + + "\364\003 \001(\010:;\n\020goproto_stringer\022\037.google.prot" + + "obuf.MessageOptions\030\203\364\003 \001(\010:8\n\rverbose_e" + + "qual\022\037.google.protobuf.MessageOptions\030\204\364" + + "\003 \001(\010:/\n\004face\022\037.google.protobuf.MessageO" + + "ptions\030\205\364\003 \001(\010:3\n\010gostring\022\037.google.prot" + + "obuf.MessageOptions\030\206\364\003 \001(\010:3\n\010populate\022" + + "\037.google.protobuf.MessageOptions\030\207\364\003 \001(\010" + + ":3\n\010stringer\022\037.google.protobuf.MessageOp" + + "tions\030\300\213\004 \001(\010:2\n\007onlyone\022\037.google.protob" + + "uf.MessageOptions\030\211\364\003 \001(\010:0\n\005equal\022\037.goo" + + "gle.protobuf.MessageOptions\030\215\364\003 \001(\010:6\n\013d" + + "escription\022\037.google.protobuf.MessageOpti" + + "ons\030\216\364\003 \001(\010:2\n\007testgen\022\037.google.protobuf" + + ".MessageOptions\030\217\364\003 \001(\010:3\n\010benchgen\022\037.go" + + "ogle.protobuf.MessageOptions\030\220\364\003 \001(\010:4\n\t" + + "marshaler\022\037.google.protobuf.MessageOptio" + + "ns\030\221\364\003 \001(\010:6\n\013unmarshaler\022\037.google.proto" + + "buf.MessageOptions\030\222\364\003 \001(\010:;\n\020stable_mar" + + "shaler\022\037.google.protobuf.MessageOptions\030" + + "\223\364\003 \001(\010:0\n\005sizer\022\037.google.protobuf.Messa" + + "geOptions\030\224\364\003 \001(\010:;\n\020unsafe_marshaler\022\037." + + "google.protobuf.MessageOptions\030\227\364\003 \001(\010:=" + + "\n\022unsafe_unmarshaler\022\037.google.protobuf.M" + + "essageOptions\030\230\364\003 \001(\010:A\n\026goproto_extensi" + + "ons_map\022\037.google.protobuf.MessageOptions" + + "\030\231\364\003 \001(\010:?\n\024goproto_unrecognized\022\037.googl" + + "e.protobuf.MessageOptions\030\232\364\003 \001(\010:5\n\npro" + + "tosizer\022\037.google.protobuf.MessageOptions" + + "\030\234\364\003 \001(\010:2\n\007compare\022\037.google.protobuf.Me" + + "ssageOptions\030\235\364\003 \001(\010:3\n\010typedecl\022\037.googl" + + "e.protobuf.MessageOptions\030\236\364\003 \001(\010:6\n\013mes" + + "sagename\022\037.google.protobuf.MessageOption" + + "s\030\241\364\003 \001(\010:<\n\021goproto_sizecache\022\037.google." + + "protobuf.MessageOptions\030\242\364\003 \001(\010::\n\017gopro" + + "to_unkeyed\022\037.google.protobuf.MessageOpti" + + "ons\030\243\364\003 \001(\010:1\n\010nullable\022\035.google.protobu" + + "f.FieldOptions\030\351\373\003 \001(\010:.\n\005embed\022\035.google" + + ".protobuf.FieldOptions\030\352\373\003 \001(\010:3\n\ncustom" + + "type\022\035.google.protobuf.FieldOptions\030\353\373\003 " + + "\001(\t:3\n\ncustomname\022\035.google.protobuf.Fiel" + + "dOptions\030\354\373\003 \001(\t:0\n\007jsontag\022\035.google.pro" + + "tobuf.FieldOptions\030\355\373\003 \001(\t:1\n\010moretags\022\035" + + ".google.protobuf.FieldOptions\030\356\373\003 \001(\t:1\n" + + "\010casttype\022\035.google.protobuf.FieldOptions" + + "\030\357\373\003 \001(\t:0\n\007castkey\022\035.google.protobuf.Fi" + + "eldOptions\030\360\373\003 \001(\t:2\n\tcastvalue\022\035.google" + + ".protobuf.FieldOptions\030\361\373\003 \001(\t:0\n\007stdtim" + + "e\022\035.google.protobuf.FieldOptions\030\362\373\003 \001(\010" + + ":4\n\013stdduration\022\035.google.protobuf.FieldO" + + "ptions\030\363\373\003 \001(\010:3\n\nwktpointer\022\035.google.pr" + + "otobuf.FieldOptions\030\364\373\003 \001(\010BE\n\023com.googl" + + "e.protobufB\nGoGoProtosZ\"github.com/gogo/" + + "protobuf/gogoproto" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.protobuf.DescriptorProtos.getDescriptor(), + }); + goprotoEnumPrefix.internalInit(descriptor.getExtensions().get(0)); + goprotoEnumStringer.internalInit(descriptor.getExtensions().get(1)); + enumStringer.internalInit(descriptor.getExtensions().get(2)); + enumCustomname.internalInit(descriptor.getExtensions().get(3)); + enumdecl.internalInit(descriptor.getExtensions().get(4)); + enumvalueCustomname.internalInit(descriptor.getExtensions().get(5)); + goprotoGettersAll.internalInit(descriptor.getExtensions().get(6)); + goprotoEnumPrefixAll.internalInit(descriptor.getExtensions().get(7)); + goprotoStringerAll.internalInit(descriptor.getExtensions().get(8)); + verboseEqualAll.internalInit(descriptor.getExtensions().get(9)); + faceAll.internalInit(descriptor.getExtensions().get(10)); + gostringAll.internalInit(descriptor.getExtensions().get(11)); + populateAll.internalInit(descriptor.getExtensions().get(12)); + stringerAll.internalInit(descriptor.getExtensions().get(13)); + onlyoneAll.internalInit(descriptor.getExtensions().get(14)); + equalAll.internalInit(descriptor.getExtensions().get(15)); + descriptionAll.internalInit(descriptor.getExtensions().get(16)); + testgenAll.internalInit(descriptor.getExtensions().get(17)); + benchgenAll.internalInit(descriptor.getExtensions().get(18)); + marshalerAll.internalInit(descriptor.getExtensions().get(19)); + unmarshalerAll.internalInit(descriptor.getExtensions().get(20)); + stableMarshalerAll.internalInit(descriptor.getExtensions().get(21)); + sizerAll.internalInit(descriptor.getExtensions().get(22)); + goprotoEnumStringerAll.internalInit(descriptor.getExtensions().get(23)); + enumStringerAll.internalInit(descriptor.getExtensions().get(24)); + unsafeMarshalerAll.internalInit(descriptor.getExtensions().get(25)); + unsafeUnmarshalerAll.internalInit(descriptor.getExtensions().get(26)); + goprotoExtensionsMapAll.internalInit(descriptor.getExtensions().get(27)); + goprotoUnrecognizedAll.internalInit(descriptor.getExtensions().get(28)); + gogoprotoImport.internalInit(descriptor.getExtensions().get(29)); + protosizerAll.internalInit(descriptor.getExtensions().get(30)); + compareAll.internalInit(descriptor.getExtensions().get(31)); + typedeclAll.internalInit(descriptor.getExtensions().get(32)); + enumdeclAll.internalInit(descriptor.getExtensions().get(33)); + goprotoRegistration.internalInit(descriptor.getExtensions().get(34)); + messagenameAll.internalInit(descriptor.getExtensions().get(35)); + goprotoSizecacheAll.internalInit(descriptor.getExtensions().get(36)); + goprotoUnkeyedAll.internalInit(descriptor.getExtensions().get(37)); + goprotoGetters.internalInit(descriptor.getExtensions().get(38)); + goprotoStringer.internalInit(descriptor.getExtensions().get(39)); + verboseEqual.internalInit(descriptor.getExtensions().get(40)); + face.internalInit(descriptor.getExtensions().get(41)); + gostring.internalInit(descriptor.getExtensions().get(42)); + populate.internalInit(descriptor.getExtensions().get(43)); + stringer.internalInit(descriptor.getExtensions().get(44)); + onlyone.internalInit(descriptor.getExtensions().get(45)); + equal.internalInit(descriptor.getExtensions().get(46)); + description.internalInit(descriptor.getExtensions().get(47)); + testgen.internalInit(descriptor.getExtensions().get(48)); + benchgen.internalInit(descriptor.getExtensions().get(49)); + marshaler.internalInit(descriptor.getExtensions().get(50)); + unmarshaler.internalInit(descriptor.getExtensions().get(51)); + stableMarshaler.internalInit(descriptor.getExtensions().get(52)); + sizer.internalInit(descriptor.getExtensions().get(53)); + unsafeMarshaler.internalInit(descriptor.getExtensions().get(54)); + unsafeUnmarshaler.internalInit(descriptor.getExtensions().get(55)); + goprotoExtensionsMap.internalInit(descriptor.getExtensions().get(56)); + goprotoUnrecognized.internalInit(descriptor.getExtensions().get(57)); + protosizer.internalInit(descriptor.getExtensions().get(58)); + compare.internalInit(descriptor.getExtensions().get(59)); + typedecl.internalInit(descriptor.getExtensions().get(60)); + messagename.internalInit(descriptor.getExtensions().get(61)); + goprotoSizecache.internalInit(descriptor.getExtensions().get(62)); + goprotoUnkeyed.internalInit(descriptor.getExtensions().get(63)); + nullable.internalInit(descriptor.getExtensions().get(64)); + embed.internalInit(descriptor.getExtensions().get(65)); + customtype.internalInit(descriptor.getExtensions().get(66)); + customname.internalInit(descriptor.getExtensions().get(67)); + jsontag.internalInit(descriptor.getExtensions().get(68)); + moretags.internalInit(descriptor.getExtensions().get(69)); + casttype.internalInit(descriptor.getExtensions().get(70)); + castkey.internalInit(descriptor.getExtensions().get(71)); + castvalue.internalInit(descriptor.getExtensions().get(72)); + stdtime.internalInit(descriptor.getExtensions().get(73)); + stdduration.internalInit(descriptor.getExtensions().get(74)); + wktpointer.internalInit(descriptor.getExtensions().get(75)); + com.google.protobuf.DescriptorProtos.getDescriptor(); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/seatunnel-connectors-v2/connector-prometheus/src/main/java/org/apache/seatunnel/connectors/seatunnel/prometheus/sink/proto/Remote.java b/seatunnel-connectors-v2/connector-prometheus/src/main/java/org/apache/seatunnel/connectors/seatunnel/prometheus/sink/proto/Remote.java new file mode 100644 index 00000000000..17a67d4446b --- /dev/null +++ b/seatunnel-connectors-v2/connector-prometheus/src/main/java/org/apache/seatunnel/connectors/seatunnel/prometheus/sink/proto/Remote.java @@ -0,0 +1,6998 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.seatunnel.connectors.seatunnel.prometheus.sink.proto; + +public final class Remote { + private Remote() {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + public interface WriteRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:prometheus.WriteRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * repeated .prometheus.TimeSeries timeseries = 1 [(.gogoproto.nullable) = false]; + * + */ + java.util.List getTimeseriesList(); + + /** + * repeated .prometheus.TimeSeries timeseries = 1 [(.gogoproto.nullable) = false]; + * + */ + Types.TimeSeries getTimeseries(int index); + + /** + * repeated .prometheus.TimeSeries timeseries = 1 [(.gogoproto.nullable) = false]; + * + */ + int getTimeseriesCount(); + + /** + * repeated .prometheus.TimeSeries timeseries = 1 [(.gogoproto.nullable) = false]; + * + */ + java.util.List getTimeseriesOrBuilderList(); + + /** + * repeated .prometheus.TimeSeries timeseries = 1 [(.gogoproto.nullable) = false]; + * + */ + Types.TimeSeriesOrBuilder getTimeseriesOrBuilder(int index); + + /** + * repeated .prometheus.MetricMetadata metadata = 3 [(.gogoproto.nullable) = false]; + * + */ + java.util.List getMetadataList(); + + /** + * repeated .prometheus.MetricMetadata metadata = 3 [(.gogoproto.nullable) = false]; + * + */ + Types.MetricMetadata getMetadata(int index); + + /** + * repeated .prometheus.MetricMetadata metadata = 3 [(.gogoproto.nullable) = false]; + * + */ + int getMetadataCount(); + + /** + * repeated .prometheus.MetricMetadata metadata = 3 [(.gogoproto.nullable) = false]; + * + */ + java.util.List getMetadataOrBuilderList(); + + /** + * repeated .prometheus.MetricMetadata metadata = 3 [(.gogoproto.nullable) = false]; + * + */ + Types.MetricMetadataOrBuilder getMetadataOrBuilder(int index); + } + + /** Protobuf type {@code prometheus.WriteRequest} */ + public static final class WriteRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:prometheus.WriteRequest) + WriteRequestOrBuilder { + private static final long serialVersionUID = 0L; + + // Use WriteRequest.newBuilder() to construct. + private WriteRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private WriteRequest() { + timeseries_ = java.util.Collections.emptyList(); + metadata_ = java.util.Collections.emptyList(); + } + + @Override + @SuppressWarnings({"unused"}) + protected Object newInstance(UnusedPrivateParameter unused) { + return new WriteRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return Remote.internal_static_prometheus_WriteRequest_descriptor; + } + + @Override + protected FieldAccessorTable internalGetFieldAccessorTable() { + return Remote.internal_static_prometheus_WriteRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + Remote.WriteRequest.class, Remote.WriteRequest.Builder.class); + } + + public static final int TIMESERIES_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List timeseries_; + + /** + * repeated .prometheus.TimeSeries timeseries = 1 [(.gogoproto.nullable) = false]; + * + */ + @Override + public java.util.List getTimeseriesList() { + return timeseries_; + } + + /** + * repeated .prometheus.TimeSeries timeseries = 1 [(.gogoproto.nullable) = false]; + * + */ + @Override + public java.util.List getTimeseriesOrBuilderList() { + return timeseries_; + } + + /** + * repeated .prometheus.TimeSeries timeseries = 1 [(.gogoproto.nullable) = false]; + * + */ + @Override + public int getTimeseriesCount() { + return timeseries_.size(); + } + + /** + * repeated .prometheus.TimeSeries timeseries = 1 [(.gogoproto.nullable) = false]; + * + */ + @Override + public Types.TimeSeries getTimeseries(int index) { + return timeseries_.get(index); + } + + /** + * repeated .prometheus.TimeSeries timeseries = 1 [(.gogoproto.nullable) = false]; + * + */ + @Override + public Types.TimeSeriesOrBuilder getTimeseriesOrBuilder(int index) { + return timeseries_.get(index); + } + + public static final int METADATA_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private java.util.List metadata_; + + /** + * repeated .prometheus.MetricMetadata metadata = 3 [(.gogoproto.nullable) = false]; + * + */ + @Override + public java.util.List getMetadataList() { + return metadata_; + } + + /** + * repeated .prometheus.MetricMetadata metadata = 3 [(.gogoproto.nullable) = false]; + * + */ + @Override + public java.util.List getMetadataOrBuilderList() { + return metadata_; + } + + /** + * repeated .prometheus.MetricMetadata metadata = 3 [(.gogoproto.nullable) = false]; + * + */ + @Override + public int getMetadataCount() { + return metadata_.size(); + } + + /** + * repeated .prometheus.MetricMetadata metadata = 3 [(.gogoproto.nullable) = false]; + * + */ + @Override + public Types.MetricMetadata getMetadata(int index) { + return metadata_.get(index); + } + + /** + * repeated .prometheus.MetricMetadata metadata = 3 [(.gogoproto.nullable) = false]; + * + */ + @Override + public Types.MetricMetadataOrBuilder getMetadataOrBuilder(int index) { + return metadata_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) { + return true; + } + if (isInitialized == 0) { + return false; + } + + memoizedIsInitialized = 1; + return true; + } + + @Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + for (int i = 0; i < timeseries_.size(); i++) { + output.writeMessage(1, timeseries_.get(i)); + } + for (int i = 0; i < metadata_.size(); i++) { + output.writeMessage(3, metadata_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) { + return size; + } + + size = 0; + for (int i = 0; i < timeseries_.size(); i++) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 1, timeseries_.get(i)); + } + for (int i = 0; i < metadata_.size(); i++) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 3, metadata_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @Override + public boolean equals(final Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof Remote.WriteRequest)) { + return super.equals(obj); + } + Remote.WriteRequest other = (Remote.WriteRequest) obj; + + if (!getTimeseriesList().equals(other.getTimeseriesList())) { + return false; + } + if (!getMetadataList().equals(other.getMetadataList())) { + return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) { + return false; + } + return true; + } + + @Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getTimeseriesCount() > 0) { + hash = (37 * hash) + TIMESERIES_FIELD_NUMBER; + hash = (53 * hash) + getTimeseriesList().hashCode(); + } + if (getMetadataCount() > 0) { + hash = (37 * hash) + METADATA_FIELD_NUMBER; + hash = (53 * hash) + getMetadataList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static Remote.WriteRequest parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static Remote.WriteRequest parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static Remote.WriteRequest parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static Remote.WriteRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static Remote.WriteRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static Remote.WriteRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static Remote.WriteRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static Remote.WriteRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static Remote.WriteRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input); + } + + public static Remote.WriteRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static Remote.WriteRequest parseFrom(com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static Remote.WriteRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(Remote.WriteRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @Override + protected Builder newBuilderForType(BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** Protobuf type {@code prometheus.WriteRequest} */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:prometheus.WriteRequest) + Remote.WriteRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return Remote.internal_static_prometheus_WriteRequest_descriptor; + } + + @Override + protected FieldAccessorTable internalGetFieldAccessorTable() { + return Remote.internal_static_prometheus_WriteRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + Remote.WriteRequest.class, Remote.WriteRequest.Builder.class); + } + + // Construct using Remote.WriteRequest.newBuilder() + private Builder() {} + + private Builder(BuilderParent parent) { + super(parent); + } + + @Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (timeseriesBuilder_ == null) { + timeseries_ = java.util.Collections.emptyList(); + } else { + timeseries_ = null; + timeseriesBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + if (metadataBuilder_ == null) { + metadata_ = java.util.Collections.emptyList(); + } else { + metadata_ = null; + metadataBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + @Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return Remote.internal_static_prometheus_WriteRequest_descriptor; + } + + @Override + public Remote.WriteRequest getDefaultInstanceForType() { + return Remote.WriteRequest.getDefaultInstance(); + } + + @Override + public Remote.WriteRequest build() { + Remote.WriteRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @Override + public Remote.WriteRequest buildPartial() { + Remote.WriteRequest result = new Remote.WriteRequest(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields(Remote.WriteRequest result) { + if (timeseriesBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + timeseries_ = java.util.Collections.unmodifiableList(timeseries_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.timeseries_ = timeseries_; + } else { + result.timeseries_ = timeseriesBuilder_.build(); + } + if (metadataBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0)) { + metadata_ = java.util.Collections.unmodifiableList(metadata_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.metadata_ = metadata_; + } else { + result.metadata_ = metadataBuilder_.build(); + } + } + + private void buildPartial0(Remote.WriteRequest result) { + int from_bitField0_ = bitField0_; + } + + @Override + public Builder clone() { + return super.clone(); + } + + @Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + return super.setField(field, value); + } + + @Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + Object value) { + return super.setRepeatedField(field, index, value); + } + + @Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + return super.addRepeatedField(field, value); + } + + @Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof Remote.WriteRequest) { + return mergeFrom((Remote.WriteRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(Remote.WriteRequest other) { + if (other == Remote.WriteRequest.getDefaultInstance()) { + return this; + } + if (timeseriesBuilder_ == null) { + if (!other.timeseries_.isEmpty()) { + if (timeseries_.isEmpty()) { + timeseries_ = other.timeseries_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureTimeseriesIsMutable(); + timeseries_.addAll(other.timeseries_); + } + onChanged(); + } + } else { + if (!other.timeseries_.isEmpty()) { + if (timeseriesBuilder_.isEmpty()) { + timeseriesBuilder_.dispose(); + timeseriesBuilder_ = null; + timeseries_ = other.timeseries_; + bitField0_ = (bitField0_ & ~0x00000001); + timeseriesBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getTimeseriesFieldBuilder() + : null; + } else { + timeseriesBuilder_.addAllMessages(other.timeseries_); + } + } + } + if (metadataBuilder_ == null) { + if (!other.metadata_.isEmpty()) { + if (metadata_.isEmpty()) { + metadata_ = other.metadata_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureMetadataIsMutable(); + metadata_.addAll(other.metadata_); + } + onChanged(); + } + } else { + if (!other.metadata_.isEmpty()) { + if (metadataBuilder_.isEmpty()) { + metadataBuilder_.dispose(); + metadataBuilder_ = null; + metadata_ = other.metadata_; + bitField0_ = (bitField0_ & ~0x00000002); + metadataBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getMetadataFieldBuilder() + : null; + } else { + metadataBuilder_.addAllMessages(other.metadata_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @Override + public final boolean isInitialized() { + return true; + } + + @Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + Types.TimeSeries m = + input.readMessage( + Types.TimeSeries.parser(), extensionRegistry); + if (timeseriesBuilder_ == null) { + ensureTimeseriesIsMutable(); + timeseries_.add(m); + } else { + timeseriesBuilder_.addMessage(m); + } + break; + } // case 10 + case 26: + { + Types.MetricMetadata m = + input.readMessage( + Types.MetricMetadata.parser(), + extensionRegistry); + if (metadataBuilder_ == null) { + ensureMetadataIsMutable(); + metadata_.add(m); + } else { + metadataBuilder_.addMessage(m); + } + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List timeseries_ = + java.util.Collections.emptyList(); + + private void ensureTimeseriesIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + timeseries_ = new java.util.ArrayList(timeseries_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + Types.TimeSeries, Types.TimeSeries.Builder, Types.TimeSeriesOrBuilder> + timeseriesBuilder_; + + /** + * repeated .prometheus.TimeSeries timeseries = 1 [(.gogoproto.nullable) = false]; + * + */ + public java.util.List getTimeseriesList() { + if (timeseriesBuilder_ == null) { + return java.util.Collections.unmodifiableList(timeseries_); + } else { + return timeseriesBuilder_.getMessageList(); + } + } + + /** + * repeated .prometheus.TimeSeries timeseries = 1 [(.gogoproto.nullable) = false]; + * + */ + public int getTimeseriesCount() { + if (timeseriesBuilder_ == null) { + return timeseries_.size(); + } else { + return timeseriesBuilder_.getCount(); + } + } + + /** + * repeated .prometheus.TimeSeries timeseries = 1 [(.gogoproto.nullable) = false]; + * + */ + public Types.TimeSeries getTimeseries(int index) { + if (timeseriesBuilder_ == null) { + return timeseries_.get(index); + } else { + return timeseriesBuilder_.getMessage(index); + } + } + + /** + * repeated .prometheus.TimeSeries timeseries = 1 [(.gogoproto.nullable) = false]; + * + */ + public Builder setTimeseries(int index, Types.TimeSeries value) { + if (timeseriesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTimeseriesIsMutable(); + timeseries_.set(index, value); + onChanged(); + } else { + timeseriesBuilder_.setMessage(index, value); + } + return this; + } + + /** + * repeated .prometheus.TimeSeries timeseries = 1 [(.gogoproto.nullable) = false]; + * + */ + public Builder setTimeseries(int index, Types.TimeSeries.Builder builderForValue) { + if (timeseriesBuilder_ == null) { + ensureTimeseriesIsMutable(); + timeseries_.set(index, builderForValue.build()); + onChanged(); + } else { + timeseriesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * repeated .prometheus.TimeSeries timeseries = 1 [(.gogoproto.nullable) = false]; + * + */ + public Builder addTimeseries(Types.TimeSeries value) { + if (timeseriesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTimeseriesIsMutable(); + timeseries_.add(value); + onChanged(); + } else { + timeseriesBuilder_.addMessage(value); + } + return this; + } + + /** + * repeated .prometheus.TimeSeries timeseries = 1 [(.gogoproto.nullable) = false]; + * + */ + public Builder addTimeseries(int index, Types.TimeSeries value) { + if (timeseriesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTimeseriesIsMutable(); + timeseries_.add(index, value); + onChanged(); + } else { + timeseriesBuilder_.addMessage(index, value); + } + return this; + } + + /** + * repeated .prometheus.TimeSeries timeseries = 1 [(.gogoproto.nullable) = false]; + * + */ + public Builder addTimeseries(Types.TimeSeries.Builder builderForValue) { + if (timeseriesBuilder_ == null) { + ensureTimeseriesIsMutable(); + timeseries_.add(builderForValue.build()); + onChanged(); + } else { + timeseriesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * repeated .prometheus.TimeSeries timeseries = 1 [(.gogoproto.nullable) = false]; + * + */ + public Builder addTimeseries(int index, Types.TimeSeries.Builder builderForValue) { + if (timeseriesBuilder_ == null) { + ensureTimeseriesIsMutable(); + timeseries_.add(index, builderForValue.build()); + onChanged(); + } else { + timeseriesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * repeated .prometheus.TimeSeries timeseries = 1 [(.gogoproto.nullable) = false]; + * + */ + public Builder addAllTimeseries(Iterable values) { + if (timeseriesBuilder_ == null) { + ensureTimeseriesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, timeseries_); + onChanged(); + } else { + timeseriesBuilder_.addAllMessages(values); + } + return this; + } + + /** + * repeated .prometheus.TimeSeries timeseries = 1 [(.gogoproto.nullable) = false]; + * + */ + public Builder clearTimeseries() { + if (timeseriesBuilder_ == null) { + timeseries_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + timeseriesBuilder_.clear(); + } + return this; + } + + /** + * repeated .prometheus.TimeSeries timeseries = 1 [(.gogoproto.nullable) = false]; + * + */ + public Builder removeTimeseries(int index) { + if (timeseriesBuilder_ == null) { + ensureTimeseriesIsMutable(); + timeseries_.remove(index); + onChanged(); + } else { + timeseriesBuilder_.remove(index); + } + return this; + } + + /** + * repeated .prometheus.TimeSeries timeseries = 1 [(.gogoproto.nullable) = false]; + * + */ + public Types.TimeSeries.Builder getTimeseriesBuilder(int index) { + return getTimeseriesFieldBuilder().getBuilder(index); + } + + /** + * repeated .prometheus.TimeSeries timeseries = 1 [(.gogoproto.nullable) = false]; + * + */ + public Types.TimeSeriesOrBuilder getTimeseriesOrBuilder(int index) { + if (timeseriesBuilder_ == null) { + return timeseries_.get(index); + } else { + return timeseriesBuilder_.getMessageOrBuilder(index); + } + } + + /** + * repeated .prometheus.TimeSeries timeseries = 1 [(.gogoproto.nullable) = false]; + * + */ + public java.util.List + getTimeseriesOrBuilderList() { + if (timeseriesBuilder_ != null) { + return timeseriesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(timeseries_); + } + } + + /** + * repeated .prometheus.TimeSeries timeseries = 1 [(.gogoproto.nullable) = false]; + * + */ + public Types.TimeSeries.Builder addTimeseriesBuilder() { + return getTimeseriesFieldBuilder() + .addBuilder(Types.TimeSeries.getDefaultInstance()); + } + + /** + * repeated .prometheus.TimeSeries timeseries = 1 [(.gogoproto.nullable) = false]; + * + */ + public Types.TimeSeries.Builder addTimeseriesBuilder(int index) { + return getTimeseriesFieldBuilder() + .addBuilder(index, Types.TimeSeries.getDefaultInstance()); + } + + /** + * repeated .prometheus.TimeSeries timeseries = 1 [(.gogoproto.nullable) = false]; + * + */ + public java.util.List getTimeseriesBuilderList() { + return getTimeseriesFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + Types.TimeSeries, Types.TimeSeries.Builder, Types.TimeSeriesOrBuilder> + getTimeseriesFieldBuilder() { + if (timeseriesBuilder_ == null) { + timeseriesBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + Types.TimeSeries, + Types.TimeSeries.Builder, + Types.TimeSeriesOrBuilder>( + timeseries_, + ((bitField0_ & 0x00000001) != 0), + getParentForChildren(), + isClean()); + timeseries_ = null; + } + return timeseriesBuilder_; + } + + private java.util.List metadata_ = + java.util.Collections.emptyList(); + + private void ensureMetadataIsMutable() { + if (!((bitField0_ & 0x00000002) != 0)) { + metadata_ = new java.util.ArrayList(metadata_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + Types.MetricMetadata, + Types.MetricMetadata.Builder, + Types.MetricMetadataOrBuilder> + metadataBuilder_; + + /** + * + * repeated .prometheus.MetricMetadata metadata = 3 [(.gogoproto.nullable) = false]; + * + */ + public java.util.List getMetadataList() { + if (metadataBuilder_ == null) { + return java.util.Collections.unmodifiableList(metadata_); + } else { + return metadataBuilder_.getMessageList(); + } + } + + /** + * + * repeated .prometheus.MetricMetadata metadata = 3 [(.gogoproto.nullable) = false]; + * + */ + public int getMetadataCount() { + if (metadataBuilder_ == null) { + return metadata_.size(); + } else { + return metadataBuilder_.getCount(); + } + } + + /** + * + * repeated .prometheus.MetricMetadata metadata = 3 [(.gogoproto.nullable) = false]; + * + */ + public Types.MetricMetadata getMetadata(int index) { + if (metadataBuilder_ == null) { + return metadata_.get(index); + } else { + return metadataBuilder_.getMessage(index); + } + } + + /** + * + * repeated .prometheus.MetricMetadata metadata = 3 [(.gogoproto.nullable) = false]; + * + */ + public Builder setMetadata(int index, Types.MetricMetadata value) { + if (metadataBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureMetadataIsMutable(); + metadata_.set(index, value); + onChanged(); + } else { + metadataBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * repeated .prometheus.MetricMetadata metadata = 3 [(.gogoproto.nullable) = false]; + * + */ + public Builder setMetadata(int index, Types.MetricMetadata.Builder builderForValue) { + if (metadataBuilder_ == null) { + ensureMetadataIsMutable(); + metadata_.set(index, builderForValue.build()); + onChanged(); + } else { + metadataBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * repeated .prometheus.MetricMetadata metadata = 3 [(.gogoproto.nullable) = false]; + * + */ + public Builder addMetadata(Types.MetricMetadata value) { + if (metadataBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureMetadataIsMutable(); + metadata_.add(value); + onChanged(); + } else { + metadataBuilder_.addMessage(value); + } + return this; + } + + /** + * + * repeated .prometheus.MetricMetadata metadata = 3 [(.gogoproto.nullable) = false]; + * + */ + public Builder addMetadata(int index, Types.MetricMetadata value) { + if (metadataBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureMetadataIsMutable(); + metadata_.add(index, value); + onChanged(); + } else { + metadataBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * repeated .prometheus.MetricMetadata metadata = 3 [(.gogoproto.nullable) = false]; + * + */ + public Builder addMetadata(Types.MetricMetadata.Builder builderForValue) { + if (metadataBuilder_ == null) { + ensureMetadataIsMutable(); + metadata_.add(builderForValue.build()); + onChanged(); + } else { + metadataBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * repeated .prometheus.MetricMetadata metadata = 3 [(.gogoproto.nullable) = false]; + * + */ + public Builder addMetadata(int index, Types.MetricMetadata.Builder builderForValue) { + if (metadataBuilder_ == null) { + ensureMetadataIsMutable(); + metadata_.add(index, builderForValue.build()); + onChanged(); + } else { + metadataBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * repeated .prometheus.MetricMetadata metadata = 3 [(.gogoproto.nullable) = false]; + * + */ + public Builder addAllMetadata(Iterable values) { + if (metadataBuilder_ == null) { + ensureMetadataIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, metadata_); + onChanged(); + } else { + metadataBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * repeated .prometheus.MetricMetadata metadata = 3 [(.gogoproto.nullable) = false]; + * + */ + public Builder clearMetadata() { + if (metadataBuilder_ == null) { + metadata_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + metadataBuilder_.clear(); + } + return this; + } + + /** + * + * repeated .prometheus.MetricMetadata metadata = 3 [(.gogoproto.nullable) = false]; + * + */ + public Builder removeMetadata(int index) { + if (metadataBuilder_ == null) { + ensureMetadataIsMutable(); + metadata_.remove(index); + onChanged(); + } else { + metadataBuilder_.remove(index); + } + return this; + } + + /** + * + * repeated .prometheus.MetricMetadata metadata = 3 [(.gogoproto.nullable) = false]; + * + */ + public Types.MetricMetadata.Builder getMetadataBuilder(int index) { + return getMetadataFieldBuilder().getBuilder(index); + } + + /** + * + * repeated .prometheus.MetricMetadata metadata = 3 [(.gogoproto.nullable) = false]; + * + */ + public Types.MetricMetadataOrBuilder getMetadataOrBuilder(int index) { + if (metadataBuilder_ == null) { + return metadata_.get(index); + } else { + return metadataBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * repeated .prometheus.MetricMetadata metadata = 3 [(.gogoproto.nullable) = false]; + * + */ + public java.util.List + getMetadataOrBuilderList() { + if (metadataBuilder_ != null) { + return metadataBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(metadata_); + } + } + + /** + * + * repeated .prometheus.MetricMetadata metadata = 3 [(.gogoproto.nullable) = false]; + * + */ + public Types.MetricMetadata.Builder addMetadataBuilder() { + return getMetadataFieldBuilder() + .addBuilder(Types.MetricMetadata.getDefaultInstance()); + } + + /** + * + * repeated .prometheus.MetricMetadata metadata = 3 [(.gogoproto.nullable) = false]; + * + */ + public Types.MetricMetadata.Builder addMetadataBuilder(int index) { + return getMetadataFieldBuilder() + .addBuilder(index, Types.MetricMetadata.getDefaultInstance()); + } + + /** + * + * repeated .prometheus.MetricMetadata metadata = 3 [(.gogoproto.nullable) = false]; + * + */ + public java.util.List getMetadataBuilderList() { + return getMetadataFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + Types.MetricMetadata, + Types.MetricMetadata.Builder, + Types.MetricMetadataOrBuilder> + getMetadataFieldBuilder() { + if (metadataBuilder_ == null) { + metadataBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + Types.MetricMetadata, + Types.MetricMetadata.Builder, + Types.MetricMetadataOrBuilder>( + metadata_, + ((bitField0_ & 0x00000002) != 0), + getParentForChildren(), + isClean()); + metadata_ = null; + } + return metadataBuilder_; + } + + @Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:prometheus.WriteRequest) + } + + // @@protoc_insertion_point(class_scope:prometheus.WriteRequest) + private static final Remote.WriteRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new Remote.WriteRequest(); + } + + public static Remote.WriteRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @Override + public WriteRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @Override + public Remote.WriteRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface ReadRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:prometheus.ReadRequest) + com.google.protobuf.MessageOrBuilder { + + /** repeated .prometheus.Query queries = 1; */ + java.util.List getQueriesList(); + + /** repeated .prometheus.Query queries = 1; */ + Remote.Query getQueries(int index); + + /** repeated .prometheus.Query queries = 1; */ + int getQueriesCount(); + + /** repeated .prometheus.Query queries = 1; */ + java.util.List getQueriesOrBuilderList(); + + /** repeated .prometheus.Query queries = 1; */ + Remote.QueryOrBuilder getQueriesOrBuilder(int index); + + /** + * + * + *

+         * accepted_response_types allows negotiating the content type of the response.
+         *
+         * Response types are taken from the list in the FIFO order. If no response type in `accepted_response_types` is
+         * implemented by server, error is returned.
+         * For request that do not contain `accepted_response_types` field the SAMPLES response type will be used.
+         * 
+ * + * repeated .prometheus.ReadRequest.ResponseType accepted_response_types = 2; + * + * @return A list containing the acceptedResponseTypes. + */ + java.util.List getAcceptedResponseTypesList(); + + /** + * + * + *
+         * accepted_response_types allows negotiating the content type of the response.
+         *
+         * Response types are taken from the list in the FIFO order. If no response type in `accepted_response_types` is
+         * implemented by server, error is returned.
+         * For request that do not contain `accepted_response_types` field the SAMPLES response type will be used.
+         * 
+ * + * repeated .prometheus.ReadRequest.ResponseType accepted_response_types = 2; + * + * @return The count of acceptedResponseTypes. + */ + int getAcceptedResponseTypesCount(); + + /** + * + * + *
+         * accepted_response_types allows negotiating the content type of the response.
+         *
+         * Response types are taken from the list in the FIFO order. If no response type in `accepted_response_types` is
+         * implemented by server, error is returned.
+         * For request that do not contain `accepted_response_types` field the SAMPLES response type will be used.
+         * 
+ * + * repeated .prometheus.ReadRequest.ResponseType accepted_response_types = 2; + * + * @param index The index of the element to return. + * @return The acceptedResponseTypes at the given index. + */ + Remote.ReadRequest.ResponseType getAcceptedResponseTypes(int index); + + /** + * + * + *
+         * accepted_response_types allows negotiating the content type of the response.
+         *
+         * Response types are taken from the list in the FIFO order. If no response type in `accepted_response_types` is
+         * implemented by server, error is returned.
+         * For request that do not contain `accepted_response_types` field the SAMPLES response type will be used.
+         * 
+ * + * repeated .prometheus.ReadRequest.ResponseType accepted_response_types = 2; + * + * @return A list containing the enum numeric values on the wire for acceptedResponseTypes. + */ + java.util.List getAcceptedResponseTypesValueList(); + + /** + * + * + *
+         * accepted_response_types allows negotiating the content type of the response.
+         *
+         * Response types are taken from the list in the FIFO order. If no response type in `accepted_response_types` is
+         * implemented by server, error is returned.
+         * For request that do not contain `accepted_response_types` field the SAMPLES response type will be used.
+         * 
+ * + * repeated .prometheus.ReadRequest.ResponseType accepted_response_types = 2; + * + * @param index The index of the value to return. + * @return The enum numeric value on the wire of acceptedResponseTypes at the given index. + */ + int getAcceptedResponseTypesValue(int index); + } + + /** + * + * + *
+     * ReadRequest represents a remote read request.
+     * 
+ * + *

Protobuf type {@code prometheus.ReadRequest} + */ + public static final class ReadRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:prometheus.ReadRequest) + ReadRequestOrBuilder { + private static final long serialVersionUID = 0L; + + // Use ReadRequest.newBuilder() to construct. + private ReadRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ReadRequest() { + queries_ = java.util.Collections.emptyList(); + acceptedResponseTypes_ = java.util.Collections.emptyList(); + } + + @Override + @SuppressWarnings({"unused"}) + protected Object newInstance(UnusedPrivateParameter unused) { + return new ReadRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return Remote.internal_static_prometheus_ReadRequest_descriptor; + } + + @Override + protected FieldAccessorTable internalGetFieldAccessorTable() { + return Remote.internal_static_prometheus_ReadRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + Remote.ReadRequest.class, Remote.ReadRequest.Builder.class); + } + + /** Protobuf enum {@code prometheus.ReadRequest.ResponseType} */ + public enum ResponseType implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *

+             * Server will return a single ReadResponse message with matched series that includes list of raw samples.
+             * It's recommended to use streamed response types instead.
+             *
+             * Response headers:
+             * Content-Type: "application/x-protobuf"
+             * Content-Encoding: "snappy"
+             * 
+ * + * SAMPLES = 0; + */ + SAMPLES(0), + /** + * + * + *
+             * Server will stream a delimited ChunkedReadResponse message that
+             * contains XOR or HISTOGRAM(!) encoded chunks for a single series.
+             * Each message is following varint size and fixed size bigendian
+             * uint32 for CRC32 Castagnoli checksum.
+             *
+             * Response headers:
+             * Content-Type: "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse"
+             * Content-Encoding: ""
+             * 
+ * + * STREAMED_XOR_CHUNKS = 1; + */ + STREAMED_XOR_CHUNKS(1), + UNRECOGNIZED(-1), + ; + + /** + * + * + *
+             * Server will return a single ReadResponse message with matched series that includes list of raw samples.
+             * It's recommended to use streamed response types instead.
+             *
+             * Response headers:
+             * Content-Type: "application/x-protobuf"
+             * Content-Encoding: "snappy"
+             * 
+ * + * SAMPLES = 0; + */ + public static final int SAMPLES_VALUE = 0; + /** + * + * + *
+             * Server will stream a delimited ChunkedReadResponse message that
+             * contains XOR or HISTOGRAM(!) encoded chunks for a single series.
+             * Each message is following varint size and fixed size bigendian
+             * uint32 for CRC32 Castagnoli checksum.
+             *
+             * Response headers:
+             * Content-Type: "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse"
+             * Content-Encoding: ""
+             * 
+ * + * STREAMED_XOR_CHUNKS = 1; + */ + public static final int STREAMED_XOR_CHUNKS_VALUE = 1; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @Deprecated + public static ResponseType valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static ResponseType forNumber(int value) { + switch (value) { + case 0: + return SAMPLES; + case 1: + return STREAMED_XOR_CHUNKS; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public ResponseType findValueByNumber(int number) { + return ResponseType.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return Remote.ReadRequest.getDescriptor().getEnumTypes().get(0); + } + + private static final ResponseType[] VALUES = values(); + + public static ResponseType valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private ResponseType(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:prometheus.ReadRequest.ResponseType) + } + + public static final int QUERIES_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List queries_; + + /** repeated .prometheus.Query queries = 1; */ + @Override + public java.util.List getQueriesList() { + return queries_; + } + + /** repeated .prometheus.Query queries = 1; */ + @Override + public java.util.List getQueriesOrBuilderList() { + return queries_; + } + + /** repeated .prometheus.Query queries = 1; */ + @Override + public int getQueriesCount() { + return queries_.size(); + } + + /** repeated .prometheus.Query queries = 1; */ + @Override + public Remote.Query getQueries(int index) { + return queries_.get(index); + } + + /** repeated .prometheus.Query queries = 1; */ + @Override + public Remote.QueryOrBuilder getQueriesOrBuilder(int index) { + return queries_.get(index); + } + + public static final int ACCEPTED_RESPONSE_TYPES_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private java.util.List acceptedResponseTypes_; + + private static final com.google.protobuf.Internal.ListAdapter.Converter< + Integer, Remote.ReadRequest.ResponseType> + acceptedResponseTypes_converter_ = + new com.google.protobuf.Internal.ListAdapter.Converter< + Integer, Remote.ReadRequest.ResponseType>() { + public Remote.ReadRequest.ResponseType convert(Integer from) { + Remote.ReadRequest.ResponseType result = + Remote.ReadRequest.ResponseType.forNumber(from); + return result == null + ? Remote.ReadRequest.ResponseType.UNRECOGNIZED + : result; + } + }; + + /** + * + * + *
+         * accepted_response_types allows negotiating the content type of the response.
+         *
+         * Response types are taken from the list in the FIFO order. If no response type in `accepted_response_types` is
+         * implemented by server, error is returned.
+         * For request that do not contain `accepted_response_types` field the SAMPLES response type will be used.
+         * 
+ * + * repeated .prometheus.ReadRequest.ResponseType accepted_response_types = 2; + * + * @return A list containing the acceptedResponseTypes. + */ + @Override + public java.util.List getAcceptedResponseTypesList() { + return new com.google.protobuf.Internal.ListAdapter< + Integer, Remote.ReadRequest.ResponseType>( + acceptedResponseTypes_, acceptedResponseTypes_converter_); + } + + /** + * + * + *
+         * accepted_response_types allows negotiating the content type of the response.
+         *
+         * Response types are taken from the list in the FIFO order. If no response type in `accepted_response_types` is
+         * implemented by server, error is returned.
+         * For request that do not contain `accepted_response_types` field the SAMPLES response type will be used.
+         * 
+ * + * repeated .prometheus.ReadRequest.ResponseType accepted_response_types = 2; + * + * @return The count of acceptedResponseTypes. + */ + @Override + public int getAcceptedResponseTypesCount() { + return acceptedResponseTypes_.size(); + } + + /** + * + * + *
+         * accepted_response_types allows negotiating the content type of the response.
+         *
+         * Response types are taken from the list in the FIFO order. If no response type in `accepted_response_types` is
+         * implemented by server, error is returned.
+         * For request that do not contain `accepted_response_types` field the SAMPLES response type will be used.
+         * 
+ * + * repeated .prometheus.ReadRequest.ResponseType accepted_response_types = 2; + * + * @param index The index of the element to return. + * @return The acceptedResponseTypes at the given index. + */ + @Override + public Remote.ReadRequest.ResponseType getAcceptedResponseTypes(int index) { + return acceptedResponseTypes_converter_.convert(acceptedResponseTypes_.get(index)); + } + + /** + * + * + *
+         * accepted_response_types allows negotiating the content type of the response.
+         *
+         * Response types are taken from the list in the FIFO order. If no response type in `accepted_response_types` is
+         * implemented by server, error is returned.
+         * For request that do not contain `accepted_response_types` field the SAMPLES response type will be used.
+         * 
+ * + * repeated .prometheus.ReadRequest.ResponseType accepted_response_types = 2; + * + * @return A list containing the enum numeric values on the wire for acceptedResponseTypes. + */ + @Override + public java.util.List getAcceptedResponseTypesValueList() { + return acceptedResponseTypes_; + } + + /** + * + * + *
+         * accepted_response_types allows negotiating the content type of the response.
+         *
+         * Response types are taken from the list in the FIFO order. If no response type in `accepted_response_types` is
+         * implemented by server, error is returned.
+         * For request that do not contain `accepted_response_types` field the SAMPLES response type will be used.
+         * 
+ * + * repeated .prometheus.ReadRequest.ResponseType accepted_response_types = 2; + * + * @param index The index of the value to return. + * @return The enum numeric value on the wire of acceptedResponseTypes at the given index. + */ + @Override + public int getAcceptedResponseTypesValue(int index) { + return acceptedResponseTypes_.get(index); + } + + private int acceptedResponseTypesMemoizedSerializedSize; + + private byte memoizedIsInitialized = -1; + + @Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) { + return true; + } + if (isInitialized == 0) { + return false; + } + + memoizedIsInitialized = 1; + return true; + } + + @Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < queries_.size(); i++) { + output.writeMessage(1, queries_.get(i)); + } + if (getAcceptedResponseTypesList().size() > 0) { + output.writeUInt32NoTag(18); + output.writeUInt32NoTag(acceptedResponseTypesMemoizedSerializedSize); + } + for (int i = 0; i < acceptedResponseTypes_.size(); i++) { + output.writeEnumNoTag(acceptedResponseTypes_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) { + return size; + } + + size = 0; + for (int i = 0; i < queries_.size(); i++) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 1, queries_.get(i)); + } + { + int dataSize = 0; + for (int i = 0; i < acceptedResponseTypes_.size(); i++) { + dataSize += + com.google.protobuf.CodedOutputStream.computeEnumSizeNoTag( + acceptedResponseTypes_.get(i)); + } + size += dataSize; + if (!getAcceptedResponseTypesList().isEmpty()) { + size += 1; + size += com.google.protobuf.CodedOutputStream.computeUInt32SizeNoTag(dataSize); + } + acceptedResponseTypesMemoizedSerializedSize = dataSize; + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @Override + public boolean equals(final Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof Remote.ReadRequest)) { + return super.equals(obj); + } + Remote.ReadRequest other = (Remote.ReadRequest) obj; + + if (!getQueriesList().equals(other.getQueriesList())) { + return false; + } + if (!acceptedResponseTypes_.equals(other.acceptedResponseTypes_)) { + return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) { + return false; + } + return true; + } + + @Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getQueriesCount() > 0) { + hash = (37 * hash) + QUERIES_FIELD_NUMBER; + hash = (53 * hash) + getQueriesList().hashCode(); + } + if (getAcceptedResponseTypesCount() > 0) { + hash = (37 * hash) + ACCEPTED_RESPONSE_TYPES_FIELD_NUMBER; + hash = (53 * hash) + acceptedResponseTypes_.hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static Remote.ReadRequest parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static Remote.ReadRequest parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static Remote.ReadRequest parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static Remote.ReadRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static Remote.ReadRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static Remote.ReadRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static Remote.ReadRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static Remote.ReadRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static Remote.ReadRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input); + } + + public static Remote.ReadRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static Remote.ReadRequest parseFrom(com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static Remote.ReadRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(Remote.ReadRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @Override + protected Builder newBuilderForType(BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+         * ReadRequest represents a remote read request.
+         * 
+ * + *

Protobuf type {@code prometheus.ReadRequest} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:prometheus.ReadRequest) + Remote.ReadRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return Remote.internal_static_prometheus_ReadRequest_descriptor; + } + + @Override + protected FieldAccessorTable internalGetFieldAccessorTable() { + return Remote.internal_static_prometheus_ReadRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + Remote.ReadRequest.class, Remote.ReadRequest.Builder.class); + } + + // Construct using Remote.ReadRequest.newBuilder() + private Builder() {} + + private Builder(BuilderParent parent) { + super(parent); + } + + @Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (queriesBuilder_ == null) { + queries_ = java.util.Collections.emptyList(); + } else { + queries_ = null; + queriesBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + acceptedResponseTypes_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + @Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return Remote.internal_static_prometheus_ReadRequest_descriptor; + } + + @Override + public Remote.ReadRequest getDefaultInstanceForType() { + return Remote.ReadRequest.getDefaultInstance(); + } + + @Override + public Remote.ReadRequest build() { + Remote.ReadRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @Override + public Remote.ReadRequest buildPartial() { + Remote.ReadRequest result = new Remote.ReadRequest(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields(Remote.ReadRequest result) { + if (queriesBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + queries_ = java.util.Collections.unmodifiableList(queries_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.queries_ = queries_; + } else { + result.queries_ = queriesBuilder_.build(); + } + if (((bitField0_ & 0x00000002) != 0)) { + acceptedResponseTypes_ = + java.util.Collections.unmodifiableList(acceptedResponseTypes_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.acceptedResponseTypes_ = acceptedResponseTypes_; + } + + private void buildPartial0(Remote.ReadRequest result) { + int from_bitField0_ = bitField0_; + } + + @Override + public Builder clone() { + return super.clone(); + } + + @Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + return super.setField(field, value); + } + + @Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + Object value) { + return super.setRepeatedField(field, index, value); + } + + @Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + return super.addRepeatedField(field, value); + } + + @Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof Remote.ReadRequest) { + return mergeFrom((Remote.ReadRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(Remote.ReadRequest other) { + if (other == Remote.ReadRequest.getDefaultInstance()) { + return this; + } + if (queriesBuilder_ == null) { + if (!other.queries_.isEmpty()) { + if (queries_.isEmpty()) { + queries_ = other.queries_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureQueriesIsMutable(); + queries_.addAll(other.queries_); + } + onChanged(); + } + } else { + if (!other.queries_.isEmpty()) { + if (queriesBuilder_.isEmpty()) { + queriesBuilder_.dispose(); + queriesBuilder_ = null; + queries_ = other.queries_; + bitField0_ = (bitField0_ & ~0x00000001); + queriesBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getQueriesFieldBuilder() + : null; + } else { + queriesBuilder_.addAllMessages(other.queries_); + } + } + } + if (!other.acceptedResponseTypes_.isEmpty()) { + if (acceptedResponseTypes_.isEmpty()) { + acceptedResponseTypes_ = other.acceptedResponseTypes_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureAcceptedResponseTypesIsMutable(); + acceptedResponseTypes_.addAll(other.acceptedResponseTypes_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @Override + public final boolean isInitialized() { + return true; + } + + @Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + Remote.Query m = + input.readMessage( + Remote.Query.parser(), extensionRegistry); + if (queriesBuilder_ == null) { + ensureQueriesIsMutable(); + queries_.add(m); + } else { + queriesBuilder_.addMessage(m); + } + break; + } // case 10 + case 16: + { + int tmpRaw = input.readEnum(); + ensureAcceptedResponseTypesIsMutable(); + acceptedResponseTypes_.add(tmpRaw); + break; + } // case 16 + case 18: + { + int length = input.readRawVarint32(); + int oldLimit = input.pushLimit(length); + while (input.getBytesUntilLimit() > 0) { + int tmpRaw = input.readEnum(); + ensureAcceptedResponseTypesIsMutable(); + acceptedResponseTypes_.add(tmpRaw); + } + input.popLimit(oldLimit); + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List queries_ = java.util.Collections.emptyList(); + + private void ensureQueriesIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + queries_ = new java.util.ArrayList(queries_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + Remote.Query, Remote.Query.Builder, Remote.QueryOrBuilder> + queriesBuilder_; + + /** repeated .prometheus.Query queries = 1; */ + public java.util.List getQueriesList() { + if (queriesBuilder_ == null) { + return java.util.Collections.unmodifiableList(queries_); + } else { + return queriesBuilder_.getMessageList(); + } + } + + /** repeated .prometheus.Query queries = 1; */ + public int getQueriesCount() { + if (queriesBuilder_ == null) { + return queries_.size(); + } else { + return queriesBuilder_.getCount(); + } + } + + /** repeated .prometheus.Query queries = 1; */ + public Remote.Query getQueries(int index) { + if (queriesBuilder_ == null) { + return queries_.get(index); + } else { + return queriesBuilder_.getMessage(index); + } + } + + /** repeated .prometheus.Query queries = 1; */ + public Builder setQueries(int index, Remote.Query value) { + if (queriesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureQueriesIsMutable(); + queries_.set(index, value); + onChanged(); + } else { + queriesBuilder_.setMessage(index, value); + } + return this; + } + + /** repeated .prometheus.Query queries = 1; */ + public Builder setQueries(int index, Remote.Query.Builder builderForValue) { + if (queriesBuilder_ == null) { + ensureQueriesIsMutable(); + queries_.set(index, builderForValue.build()); + onChanged(); + } else { + queriesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** repeated .prometheus.Query queries = 1; */ + public Builder addQueries(Remote.Query value) { + if (queriesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureQueriesIsMutable(); + queries_.add(value); + onChanged(); + } else { + queriesBuilder_.addMessage(value); + } + return this; + } + + /** repeated .prometheus.Query queries = 1; */ + public Builder addQueries(int index, Remote.Query value) { + if (queriesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureQueriesIsMutable(); + queries_.add(index, value); + onChanged(); + } else { + queriesBuilder_.addMessage(index, value); + } + return this; + } + + /** repeated .prometheus.Query queries = 1; */ + public Builder addQueries(Remote.Query.Builder builderForValue) { + if (queriesBuilder_ == null) { + ensureQueriesIsMutable(); + queries_.add(builderForValue.build()); + onChanged(); + } else { + queriesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** repeated .prometheus.Query queries = 1; */ + public Builder addQueries(int index, Remote.Query.Builder builderForValue) { + if (queriesBuilder_ == null) { + ensureQueriesIsMutable(); + queries_.add(index, builderForValue.build()); + onChanged(); + } else { + queriesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** repeated .prometheus.Query queries = 1; */ + public Builder addAllQueries(Iterable values) { + if (queriesBuilder_ == null) { + ensureQueriesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, queries_); + onChanged(); + } else { + queriesBuilder_.addAllMessages(values); + } + return this; + } + + /** repeated .prometheus.Query queries = 1; */ + public Builder clearQueries() { + if (queriesBuilder_ == null) { + queries_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + queriesBuilder_.clear(); + } + return this; + } + + /** repeated .prometheus.Query queries = 1; */ + public Builder removeQueries(int index) { + if (queriesBuilder_ == null) { + ensureQueriesIsMutable(); + queries_.remove(index); + onChanged(); + } else { + queriesBuilder_.remove(index); + } + return this; + } + + /** repeated .prometheus.Query queries = 1; */ + public Remote.Query.Builder getQueriesBuilder(int index) { + return getQueriesFieldBuilder().getBuilder(index); + } + + /** repeated .prometheus.Query queries = 1; */ + public Remote.QueryOrBuilder getQueriesOrBuilder(int index) { + if (queriesBuilder_ == null) { + return queries_.get(index); + } else { + return queriesBuilder_.getMessageOrBuilder(index); + } + } + + /** repeated .prometheus.Query queries = 1; */ + public java.util.List getQueriesOrBuilderList() { + if (queriesBuilder_ != null) { + return queriesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(queries_); + } + } + + /** repeated .prometheus.Query queries = 1; */ + public Remote.Query.Builder addQueriesBuilder() { + return getQueriesFieldBuilder().addBuilder(Remote.Query.getDefaultInstance()); + } + + /** repeated .prometheus.Query queries = 1; */ + public Remote.Query.Builder addQueriesBuilder(int index) { + return getQueriesFieldBuilder() + .addBuilder(index, Remote.Query.getDefaultInstance()); + } + + /** repeated .prometheus.Query queries = 1; */ + public java.util.List getQueriesBuilderList() { + return getQueriesFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + Remote.Query, Remote.Query.Builder, Remote.QueryOrBuilder> + getQueriesFieldBuilder() { + if (queriesBuilder_ == null) { + queriesBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + Remote.Query, Remote.Query.Builder, Remote.QueryOrBuilder>( + queries_, + ((bitField0_ & 0x00000001) != 0), + getParentForChildren(), + isClean()); + queries_ = null; + } + return queriesBuilder_; + } + + private java.util.List acceptedResponseTypes_ = + java.util.Collections.emptyList(); + + private void ensureAcceptedResponseTypesIsMutable() { + if (!((bitField0_ & 0x00000002) != 0)) { + acceptedResponseTypes_ = + new java.util.ArrayList(acceptedResponseTypes_); + bitField0_ |= 0x00000002; + } + } + + /** + * + * + *

+             * accepted_response_types allows negotiating the content type of the response.
+             *
+             * Response types are taken from the list in the FIFO order. If no response type in `accepted_response_types` is
+             * implemented by server, error is returned.
+             * For request that do not contain `accepted_response_types` field the SAMPLES response type will be used.
+             * 
+ * + * repeated .prometheus.ReadRequest.ResponseType accepted_response_types = 2; + * + * + * @return A list containing the acceptedResponseTypes. + */ + public java.util.List getAcceptedResponseTypesList() { + return new com.google.protobuf.Internal.ListAdapter< + Integer, Remote.ReadRequest.ResponseType>( + acceptedResponseTypes_, acceptedResponseTypes_converter_); + } + + /** + * + * + *
+             * accepted_response_types allows negotiating the content type of the response.
+             *
+             * Response types are taken from the list in the FIFO order. If no response type in `accepted_response_types` is
+             * implemented by server, error is returned.
+             * For request that do not contain `accepted_response_types` field the SAMPLES response type will be used.
+             * 
+ * + * repeated .prometheus.ReadRequest.ResponseType accepted_response_types = 2; + * + * + * @return The count of acceptedResponseTypes. + */ + public int getAcceptedResponseTypesCount() { + return acceptedResponseTypes_.size(); + } + + /** + * + * + *
+             * accepted_response_types allows negotiating the content type of the response.
+             *
+             * Response types are taken from the list in the FIFO order. If no response type in `accepted_response_types` is
+             * implemented by server, error is returned.
+             * For request that do not contain `accepted_response_types` field the SAMPLES response type will be used.
+             * 
+ * + * repeated .prometheus.ReadRequest.ResponseType accepted_response_types = 2; + * + * + * @param index The index of the element to return. + * @return The acceptedResponseTypes at the given index. + */ + public Remote.ReadRequest.ResponseType getAcceptedResponseTypes(int index) { + return acceptedResponseTypes_converter_.convert(acceptedResponseTypes_.get(index)); + } + + /** + * + * + *
+             * accepted_response_types allows negotiating the content type of the response.
+             *
+             * Response types are taken from the list in the FIFO order. If no response type in `accepted_response_types` is
+             * implemented by server, error is returned.
+             * For request that do not contain `accepted_response_types` field the SAMPLES response type will be used.
+             * 
+ * + * repeated .prometheus.ReadRequest.ResponseType accepted_response_types = 2; + * + * + * @param index The index to set the value at. + * @param value The acceptedResponseTypes to set. + * @return This builder for chaining. + */ + public Builder setAcceptedResponseTypes( + int index, Remote.ReadRequest.ResponseType value) { + if (value == null) { + throw new NullPointerException(); + } + ensureAcceptedResponseTypesIsMutable(); + acceptedResponseTypes_.set(index, value.getNumber()); + onChanged(); + return this; + } + + /** + * + * + *
+             * accepted_response_types allows negotiating the content type of the response.
+             *
+             * Response types are taken from the list in the FIFO order. If no response type in `accepted_response_types` is
+             * implemented by server, error is returned.
+             * For request that do not contain `accepted_response_types` field the SAMPLES response type will be used.
+             * 
+ * + * repeated .prometheus.ReadRequest.ResponseType accepted_response_types = 2; + * + * + * @param value The acceptedResponseTypes to add. + * @return This builder for chaining. + */ + public Builder addAcceptedResponseTypes(Remote.ReadRequest.ResponseType value) { + if (value == null) { + throw new NullPointerException(); + } + ensureAcceptedResponseTypesIsMutable(); + acceptedResponseTypes_.add(value.getNumber()); + onChanged(); + return this; + } + + /** + * + * + *
+             * accepted_response_types allows negotiating the content type of the response.
+             *
+             * Response types are taken from the list in the FIFO order. If no response type in `accepted_response_types` is
+             * implemented by server, error is returned.
+             * For request that do not contain `accepted_response_types` field the SAMPLES response type will be used.
+             * 
+ * + * repeated .prometheus.ReadRequest.ResponseType accepted_response_types = 2; + * + * + * @param values The acceptedResponseTypes to add. + * @return This builder for chaining. + */ + public Builder addAllAcceptedResponseTypes( + Iterable values) { + ensureAcceptedResponseTypesIsMutable(); + for (Remote.ReadRequest.ResponseType value : values) { + acceptedResponseTypes_.add(value.getNumber()); + } + onChanged(); + return this; + } + + /** + * + * + *
+             * accepted_response_types allows negotiating the content type of the response.
+             *
+             * Response types are taken from the list in the FIFO order. If no response type in `accepted_response_types` is
+             * implemented by server, error is returned.
+             * For request that do not contain `accepted_response_types` field the SAMPLES response type will be used.
+             * 
+ * + * repeated .prometheus.ReadRequest.ResponseType accepted_response_types = 2; + * + * + * @return This builder for chaining. + */ + public Builder clearAcceptedResponseTypes() { + acceptedResponseTypes_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+             * accepted_response_types allows negotiating the content type of the response.
+             *
+             * Response types are taken from the list in the FIFO order. If no response type in `accepted_response_types` is
+             * implemented by server, error is returned.
+             * For request that do not contain `accepted_response_types` field the SAMPLES response type will be used.
+             * 
+ * + * repeated .prometheus.ReadRequest.ResponseType accepted_response_types = 2; + * + * + * @return A list containing the enum numeric values on the wire for + * acceptedResponseTypes. + */ + public java.util.List getAcceptedResponseTypesValueList() { + return java.util.Collections.unmodifiableList(acceptedResponseTypes_); + } + + /** + * + * + *
+             * accepted_response_types allows negotiating the content type of the response.
+             *
+             * Response types are taken from the list in the FIFO order. If no response type in `accepted_response_types` is
+             * implemented by server, error is returned.
+             * For request that do not contain `accepted_response_types` field the SAMPLES response type will be used.
+             * 
+ * + * repeated .prometheus.ReadRequest.ResponseType accepted_response_types = 2; + * + * + * @param index The index of the value to return. + * @return The enum numeric value on the wire of acceptedResponseTypes at the given + * index. + */ + public int getAcceptedResponseTypesValue(int index) { + return acceptedResponseTypes_.get(index); + } + + /** + * + * + *
+             * accepted_response_types allows negotiating the content type of the response.
+             *
+             * Response types are taken from the list in the FIFO order. If no response type in `accepted_response_types` is
+             * implemented by server, error is returned.
+             * For request that do not contain `accepted_response_types` field the SAMPLES response type will be used.
+             * 
+ * + * repeated .prometheus.ReadRequest.ResponseType accepted_response_types = 2; + * + * + * @param index The index to set the value at. + * @param value The enum numeric value on the wire for acceptedResponseTypes to set. + * @return This builder for chaining. + */ + public Builder setAcceptedResponseTypesValue(int index, int value) { + ensureAcceptedResponseTypesIsMutable(); + acceptedResponseTypes_.set(index, value); + onChanged(); + return this; + } + + /** + * + * + *
+             * accepted_response_types allows negotiating the content type of the response.
+             *
+             * Response types are taken from the list in the FIFO order. If no response type in `accepted_response_types` is
+             * implemented by server, error is returned.
+             * For request that do not contain `accepted_response_types` field the SAMPLES response type will be used.
+             * 
+ * + * repeated .prometheus.ReadRequest.ResponseType accepted_response_types = 2; + * + * + * @param value The enum numeric value on the wire for acceptedResponseTypes to add. + * @return This builder for chaining. + */ + public Builder addAcceptedResponseTypesValue(int value) { + ensureAcceptedResponseTypesIsMutable(); + acceptedResponseTypes_.add(value); + onChanged(); + return this; + } + + /** + * + * + *
+             * accepted_response_types allows negotiating the content type of the response.
+             *
+             * Response types are taken from the list in the FIFO order. If no response type in `accepted_response_types` is
+             * implemented by server, error is returned.
+             * For request that do not contain `accepted_response_types` field the SAMPLES response type will be used.
+             * 
+ * + * repeated .prometheus.ReadRequest.ResponseType accepted_response_types = 2; + * + * + * @param values The enum numeric values on the wire for acceptedResponseTypes to add. + * @return This builder for chaining. + */ + public Builder addAllAcceptedResponseTypesValue(Iterable values) { + ensureAcceptedResponseTypesIsMutable(); + for (int value : values) { + acceptedResponseTypes_.add(value); + } + onChanged(); + return this; + } + + @Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:prometheus.ReadRequest) + } + + // @@protoc_insertion_point(class_scope:prometheus.ReadRequest) + private static final Remote.ReadRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new Remote.ReadRequest(); + } + + public static Remote.ReadRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @Override + public ReadRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @Override + public Remote.ReadRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface ReadResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:prometheus.ReadResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+         * In same order as the request's queries.
+         * 
+ * + * repeated .prometheus.QueryResult results = 1; + */ + java.util.List getResultsList(); + + /** + * + * + *
+         * In same order as the request's queries.
+         * 
+ * + * repeated .prometheus.QueryResult results = 1; + */ + Remote.QueryResult getResults(int index); + + /** + * + * + *
+         * In same order as the request's queries.
+         * 
+ * + * repeated .prometheus.QueryResult results = 1; + */ + int getResultsCount(); + + /** + * + * + *
+         * In same order as the request's queries.
+         * 
+ * + * repeated .prometheus.QueryResult results = 1; + */ + java.util.List getResultsOrBuilderList(); + + /** + * + * + *
+         * In same order as the request's queries.
+         * 
+ * + * repeated .prometheus.QueryResult results = 1; + */ + Remote.QueryResultOrBuilder getResultsOrBuilder(int index); + } + + /** + * + * + *
+     * ReadResponse is a response when response_type equals SAMPLES.
+     * 
+ * + *

Protobuf type {@code prometheus.ReadResponse} + */ + public static final class ReadResponse extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:prometheus.ReadResponse) + ReadResponseOrBuilder { + private static final long serialVersionUID = 0L; + + // Use ReadResponse.newBuilder() to construct. + private ReadResponse(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ReadResponse() { + results_ = java.util.Collections.emptyList(); + } + + @Override + @SuppressWarnings({"unused"}) + protected Object newInstance(UnusedPrivateParameter unused) { + return new ReadResponse(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return Remote.internal_static_prometheus_ReadResponse_descriptor; + } + + @Override + protected FieldAccessorTable internalGetFieldAccessorTable() { + return Remote.internal_static_prometheus_ReadResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + Remote.ReadResponse.class, Remote.ReadResponse.Builder.class); + } + + public static final int RESULTS_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List results_; + + /** + * + * + *

+         * In same order as the request's queries.
+         * 
+ * + * repeated .prometheus.QueryResult results = 1; + */ + @Override + public java.util.List getResultsList() { + return results_; + } + + /** + * + * + *
+         * In same order as the request's queries.
+         * 
+ * + * repeated .prometheus.QueryResult results = 1; + */ + @Override + public java.util.List getResultsOrBuilderList() { + return results_; + } + + /** + * + * + *
+         * In same order as the request's queries.
+         * 
+ * + * repeated .prometheus.QueryResult results = 1; + */ + @Override + public int getResultsCount() { + return results_.size(); + } + + /** + * + * + *
+         * In same order as the request's queries.
+         * 
+ * + * repeated .prometheus.QueryResult results = 1; + */ + @Override + public Remote.QueryResult getResults(int index) { + return results_.get(index); + } + + /** + * + * + *
+         * In same order as the request's queries.
+         * 
+ * + * repeated .prometheus.QueryResult results = 1; + */ + @Override + public Remote.QueryResultOrBuilder getResultsOrBuilder(int index) { + return results_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) { + return true; + } + if (isInitialized == 0) { + return false; + } + + memoizedIsInitialized = 1; + return true; + } + + @Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + for (int i = 0; i < results_.size(); i++) { + output.writeMessage(1, results_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) { + return size; + } + + size = 0; + for (int i = 0; i < results_.size(); i++) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 1, results_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @Override + public boolean equals(final Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof Remote.ReadResponse)) { + return super.equals(obj); + } + Remote.ReadResponse other = (Remote.ReadResponse) obj; + + if (!getResultsList().equals(other.getResultsList())) { + return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) { + return false; + } + return true; + } + + @Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getResultsCount() > 0) { + hash = (37 * hash) + RESULTS_FIELD_NUMBER; + hash = (53 * hash) + getResultsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static Remote.ReadResponse parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static Remote.ReadResponse parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static Remote.ReadResponse parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static Remote.ReadResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static Remote.ReadResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static Remote.ReadResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static Remote.ReadResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static Remote.ReadResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static Remote.ReadResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input); + } + + public static Remote.ReadResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static Remote.ReadResponse parseFrom(com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static Remote.ReadResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(Remote.ReadResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @Override + protected Builder newBuilderForType(BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+         * ReadResponse is a response when response_type equals SAMPLES.
+         * 
+ * + *

Protobuf type {@code prometheus.ReadResponse} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:prometheus.ReadResponse) + Remote.ReadResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return Remote.internal_static_prometheus_ReadResponse_descriptor; + } + + @Override + protected FieldAccessorTable internalGetFieldAccessorTable() { + return Remote.internal_static_prometheus_ReadResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + Remote.ReadResponse.class, Remote.ReadResponse.Builder.class); + } + + // Construct using Remote.ReadResponse.newBuilder() + private Builder() {} + + private Builder(BuilderParent parent) { + super(parent); + } + + @Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (resultsBuilder_ == null) { + results_ = java.util.Collections.emptyList(); + } else { + results_ = null; + resultsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + @Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return Remote.internal_static_prometheus_ReadResponse_descriptor; + } + + @Override + public Remote.ReadResponse getDefaultInstanceForType() { + return Remote.ReadResponse.getDefaultInstance(); + } + + @Override + public Remote.ReadResponse build() { + Remote.ReadResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @Override + public Remote.ReadResponse buildPartial() { + Remote.ReadResponse result = new Remote.ReadResponse(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields(Remote.ReadResponse result) { + if (resultsBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + results_ = java.util.Collections.unmodifiableList(results_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.results_ = results_; + } else { + result.results_ = resultsBuilder_.build(); + } + } + + private void buildPartial0(Remote.ReadResponse result) { + int from_bitField0_ = bitField0_; + } + + @Override + public Builder clone() { + return super.clone(); + } + + @Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + return super.setField(field, value); + } + + @Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + Object value) { + return super.setRepeatedField(field, index, value); + } + + @Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + return super.addRepeatedField(field, value); + } + + @Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof Remote.ReadResponse) { + return mergeFrom((Remote.ReadResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(Remote.ReadResponse other) { + if (other == Remote.ReadResponse.getDefaultInstance()) { + return this; + } + if (resultsBuilder_ == null) { + if (!other.results_.isEmpty()) { + if (results_.isEmpty()) { + results_ = other.results_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureResultsIsMutable(); + results_.addAll(other.results_); + } + onChanged(); + } + } else { + if (!other.results_.isEmpty()) { + if (resultsBuilder_.isEmpty()) { + resultsBuilder_.dispose(); + resultsBuilder_ = null; + results_ = other.results_; + bitField0_ = (bitField0_ & ~0x00000001); + resultsBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getResultsFieldBuilder() + : null; + } else { + resultsBuilder_.addAllMessages(other.results_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @Override + public final boolean isInitialized() { + return true; + } + + @Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + Remote.QueryResult m = + input.readMessage( + Remote.QueryResult.parser(), extensionRegistry); + if (resultsBuilder_ == null) { + ensureResultsIsMutable(); + results_.add(m); + } else { + resultsBuilder_.addMessage(m); + } + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List results_ = java.util.Collections.emptyList(); + + private void ensureResultsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + results_ = new java.util.ArrayList(results_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + Remote.QueryResult, + Remote.QueryResult.Builder, + Remote.QueryResultOrBuilder> + resultsBuilder_; + + /** + * + * + *

+             * In same order as the request's queries.
+             * 
+ * + * repeated .prometheus.QueryResult results = 1; + */ + public java.util.List getResultsList() { + if (resultsBuilder_ == null) { + return java.util.Collections.unmodifiableList(results_); + } else { + return resultsBuilder_.getMessageList(); + } + } + + /** + * + * + *
+             * In same order as the request's queries.
+             * 
+ * + * repeated .prometheus.QueryResult results = 1; + */ + public int getResultsCount() { + if (resultsBuilder_ == null) { + return results_.size(); + } else { + return resultsBuilder_.getCount(); + } + } + + /** + * + * + *
+             * In same order as the request's queries.
+             * 
+ * + * repeated .prometheus.QueryResult results = 1; + */ + public Remote.QueryResult getResults(int index) { + if (resultsBuilder_ == null) { + return results_.get(index); + } else { + return resultsBuilder_.getMessage(index); + } + } + + /** + * + * + *
+             * In same order as the request's queries.
+             * 
+ * + * repeated .prometheus.QueryResult results = 1; + */ + public Builder setResults(int index, Remote.QueryResult value) { + if (resultsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureResultsIsMutable(); + results_.set(index, value); + onChanged(); + } else { + resultsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+             * In same order as the request's queries.
+             * 
+ * + * repeated .prometheus.QueryResult results = 1; + */ + public Builder setResults(int index, Remote.QueryResult.Builder builderForValue) { + if (resultsBuilder_ == null) { + ensureResultsIsMutable(); + results_.set(index, builderForValue.build()); + onChanged(); + } else { + resultsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+             * In same order as the request's queries.
+             * 
+ * + * repeated .prometheus.QueryResult results = 1; + */ + public Builder addResults(Remote.QueryResult value) { + if (resultsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureResultsIsMutable(); + results_.add(value); + onChanged(); + } else { + resultsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+             * In same order as the request's queries.
+             * 
+ * + * repeated .prometheus.QueryResult results = 1; + */ + public Builder addResults(int index, Remote.QueryResult value) { + if (resultsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureResultsIsMutable(); + results_.add(index, value); + onChanged(); + } else { + resultsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+             * In same order as the request's queries.
+             * 
+ * + * repeated .prometheus.QueryResult results = 1; + */ + public Builder addResults(Remote.QueryResult.Builder builderForValue) { + if (resultsBuilder_ == null) { + ensureResultsIsMutable(); + results_.add(builderForValue.build()); + onChanged(); + } else { + resultsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+             * In same order as the request's queries.
+             * 
+ * + * repeated .prometheus.QueryResult results = 1; + */ + public Builder addResults(int index, Remote.QueryResult.Builder builderForValue) { + if (resultsBuilder_ == null) { + ensureResultsIsMutable(); + results_.add(index, builderForValue.build()); + onChanged(); + } else { + resultsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+             * In same order as the request's queries.
+             * 
+ * + * repeated .prometheus.QueryResult results = 1; + */ + public Builder addAllResults(Iterable values) { + if (resultsBuilder_ == null) { + ensureResultsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, results_); + onChanged(); + } else { + resultsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+             * In same order as the request's queries.
+             * 
+ * + * repeated .prometheus.QueryResult results = 1; + */ + public Builder clearResults() { + if (resultsBuilder_ == null) { + results_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + resultsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+             * In same order as the request's queries.
+             * 
+ * + * repeated .prometheus.QueryResult results = 1; + */ + public Builder removeResults(int index) { + if (resultsBuilder_ == null) { + ensureResultsIsMutable(); + results_.remove(index); + onChanged(); + } else { + resultsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+             * In same order as the request's queries.
+             * 
+ * + * repeated .prometheus.QueryResult results = 1; + */ + public Remote.QueryResult.Builder getResultsBuilder(int index) { + return getResultsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+             * In same order as the request's queries.
+             * 
+ * + * repeated .prometheus.QueryResult results = 1; + */ + public Remote.QueryResultOrBuilder getResultsOrBuilder(int index) { + if (resultsBuilder_ == null) { + return results_.get(index); + } else { + return resultsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+             * In same order as the request's queries.
+             * 
+ * + * repeated .prometheus.QueryResult results = 1; + */ + public java.util.List getResultsOrBuilderList() { + if (resultsBuilder_ != null) { + return resultsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(results_); + } + } + + /** + * + * + *
+             * In same order as the request's queries.
+             * 
+ * + * repeated .prometheus.QueryResult results = 1; + */ + public Remote.QueryResult.Builder addResultsBuilder() { + return getResultsFieldBuilder().addBuilder(Remote.QueryResult.getDefaultInstance()); + } + + /** + * + * + *
+             * In same order as the request's queries.
+             * 
+ * + * repeated .prometheus.QueryResult results = 1; + */ + public Remote.QueryResult.Builder addResultsBuilder(int index) { + return getResultsFieldBuilder() + .addBuilder(index, Remote.QueryResult.getDefaultInstance()); + } + + /** + * + * + *
+             * In same order as the request's queries.
+             * 
+ * + * repeated .prometheus.QueryResult results = 1; + */ + public java.util.List getResultsBuilderList() { + return getResultsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + Remote.QueryResult, + Remote.QueryResult.Builder, + Remote.QueryResultOrBuilder> + getResultsFieldBuilder() { + if (resultsBuilder_ == null) { + resultsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + Remote.QueryResult, + Remote.QueryResult.Builder, + Remote.QueryResultOrBuilder>( + results_, + ((bitField0_ & 0x00000001) != 0), + getParentForChildren(), + isClean()); + results_ = null; + } + return resultsBuilder_; + } + + @Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:prometheus.ReadResponse) + } + + // @@protoc_insertion_point(class_scope:prometheus.ReadResponse) + private static final Remote.ReadResponse DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new Remote.ReadResponse(); + } + + public static Remote.ReadResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @Override + public ReadResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @Override + public Remote.ReadResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface QueryOrBuilder + extends + // @@protoc_insertion_point(interface_extends:prometheus.Query) + com.google.protobuf.MessageOrBuilder { + + /** + * int64 start_timestamp_ms = 1; + * + * @return The startTimestampMs. + */ + long getStartTimestampMs(); + + /** + * int64 end_timestamp_ms = 2; + * + * @return The endTimestampMs. + */ + long getEndTimestampMs(); + + /** repeated .prometheus.LabelMatcher matchers = 3; */ + java.util.List getMatchersList(); + + /** repeated .prometheus.LabelMatcher matchers = 3; */ + Types.LabelMatcher getMatchers(int index); + + /** repeated .prometheus.LabelMatcher matchers = 3; */ + int getMatchersCount(); + + /** repeated .prometheus.LabelMatcher matchers = 3; */ + java.util.List getMatchersOrBuilderList(); + + /** repeated .prometheus.LabelMatcher matchers = 3; */ + Types.LabelMatcherOrBuilder getMatchersOrBuilder(int index); + + /** + * .prometheus.ReadHints hints = 4; + * + * @return Whether the hints field is set. + */ + boolean hasHints(); + + /** + * .prometheus.ReadHints hints = 4; + * + * @return The hints. + */ + Types.ReadHints getHints(); + + /** .prometheus.ReadHints hints = 4; */ + Types.ReadHintsOrBuilder getHintsOrBuilder(); + } + + /** Protobuf type {@code prometheus.Query} */ + public static final class Query extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:prometheus.Query) + QueryOrBuilder { + private static final long serialVersionUID = 0L; + + // Use Query.newBuilder() to construct. + private Query(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private Query() { + matchers_ = java.util.Collections.emptyList(); + } + + @Override + @SuppressWarnings({"unused"}) + protected Object newInstance(UnusedPrivateParameter unused) { + return new Query(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return Remote.internal_static_prometheus_Query_descriptor; + } + + @Override + protected FieldAccessorTable internalGetFieldAccessorTable() { + return Remote.internal_static_prometheus_Query_fieldAccessorTable + .ensureFieldAccessorsInitialized( + Remote.Query.class, Remote.Query.Builder.class); + } + + private int bitField0_; + public static final int START_TIMESTAMP_MS_FIELD_NUMBER = 1; + private long startTimestampMs_ = 0L; + + /** + * int64 start_timestamp_ms = 1; + * + * @return The startTimestampMs. + */ + @Override + public long getStartTimestampMs() { + return startTimestampMs_; + } + + public static final int END_TIMESTAMP_MS_FIELD_NUMBER = 2; + private long endTimestampMs_ = 0L; + + /** + * int64 end_timestamp_ms = 2; + * + * @return The endTimestampMs. + */ + @Override + public long getEndTimestampMs() { + return endTimestampMs_; + } + + public static final int MATCHERS_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private java.util.List matchers_; + + /** repeated .prometheus.LabelMatcher matchers = 3; */ + @Override + public java.util.List getMatchersList() { + return matchers_; + } + + /** repeated .prometheus.LabelMatcher matchers = 3; */ + @Override + public java.util.List getMatchersOrBuilderList() { + return matchers_; + } + + /** repeated .prometheus.LabelMatcher matchers = 3; */ + @Override + public int getMatchersCount() { + return matchers_.size(); + } + + /** repeated .prometheus.LabelMatcher matchers = 3; */ + @Override + public Types.LabelMatcher getMatchers(int index) { + return matchers_.get(index); + } + + /** repeated .prometheus.LabelMatcher matchers = 3; */ + @Override + public Types.LabelMatcherOrBuilder getMatchersOrBuilder(int index) { + return matchers_.get(index); + } + + public static final int HINTS_FIELD_NUMBER = 4; + private Types.ReadHints hints_; + + /** + * .prometheus.ReadHints hints = 4; + * + * @return Whether the hints field is set. + */ + @Override + public boolean hasHints() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * .prometheus.ReadHints hints = 4; + * + * @return The hints. + */ + @Override + public Types.ReadHints getHints() { + return hints_ == null ? Types.ReadHints.getDefaultInstance() : hints_; + } + + /** .prometheus.ReadHints hints = 4; */ + @Override + public Types.ReadHintsOrBuilder getHintsOrBuilder() { + return hints_ == null ? Types.ReadHints.getDefaultInstance() : hints_; + } + + private byte memoizedIsInitialized = -1; + + @Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) { + return true; + } + if (isInitialized == 0) { + return false; + } + + memoizedIsInitialized = 1; + return true; + } + + @Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (startTimestampMs_ != 0L) { + output.writeInt64(1, startTimestampMs_); + } + if (endTimestampMs_ != 0L) { + output.writeInt64(2, endTimestampMs_); + } + for (int i = 0; i < matchers_.size(); i++) { + output.writeMessage(3, matchers_.get(i)); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(4, getHints()); + } + getUnknownFields().writeTo(output); + } + + @Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) { + return size; + } + + size = 0; + if (startTimestampMs_ != 0L) { + size += + com.google.protobuf.CodedOutputStream.computeInt64Size( + 1, startTimestampMs_); + } + if (endTimestampMs_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(2, endTimestampMs_); + } + for (int i = 0; i < matchers_.size(); i++) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 3, matchers_.get(i)); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, getHints()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @Override + public boolean equals(final Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof Remote.Query)) { + return super.equals(obj); + } + Remote.Query other = (Remote.Query) obj; + + if (getStartTimestampMs() != other.getStartTimestampMs()) { + return false; + } + if (getEndTimestampMs() != other.getEndTimestampMs()) { + return false; + } + if (!getMatchersList().equals(other.getMatchersList())) { + return false; + } + if (hasHints() != other.hasHints()) { + return false; + } + if (hasHints()) { + if (!getHints().equals(other.getHints())) { + return false; + } + } + if (!getUnknownFields().equals(other.getUnknownFields())) { + return false; + } + return true; + } + + @Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + START_TIMESTAMP_MS_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getStartTimestampMs()); + hash = (37 * hash) + END_TIMESTAMP_MS_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getEndTimestampMs()); + if (getMatchersCount() > 0) { + hash = (37 * hash) + MATCHERS_FIELD_NUMBER; + hash = (53 * hash) + getMatchersList().hashCode(); + } + if (hasHints()) { + hash = (37 * hash) + HINTS_FIELD_NUMBER; + hash = (53 * hash) + getHints().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static Remote.Query parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static Remote.Query parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static Remote.Query parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static Remote.Query parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static Remote.Query parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static Remote.Query parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static Remote.Query parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static Remote.Query parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static Remote.Query parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input); + } + + public static Remote.Query parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static Remote.Query parseFrom(com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static Remote.Query parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(Remote.Query prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @Override + protected Builder newBuilderForType(BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** Protobuf type {@code prometheus.Query} */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:prometheus.Query) + Remote.QueryOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return Remote.internal_static_prometheus_Query_descriptor; + } + + @Override + protected FieldAccessorTable internalGetFieldAccessorTable() { + return Remote.internal_static_prometheus_Query_fieldAccessorTable + .ensureFieldAccessorsInitialized( + Remote.Query.class, Remote.Query.Builder.class); + } + + // Construct using Remote.Query.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getMatchersFieldBuilder(); + getHintsFieldBuilder(); + } + } + + @Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + startTimestampMs_ = 0L; + endTimestampMs_ = 0L; + if (matchersBuilder_ == null) { + matchers_ = java.util.Collections.emptyList(); + } else { + matchers_ = null; + matchersBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + hints_ = null; + if (hintsBuilder_ != null) { + hintsBuilder_.dispose(); + hintsBuilder_ = null; + } + return this; + } + + @Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return Remote.internal_static_prometheus_Query_descriptor; + } + + @Override + public Remote.Query getDefaultInstanceForType() { + return Remote.Query.getDefaultInstance(); + } + + @Override + public Remote.Query build() { + Remote.Query result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @Override + public Remote.Query buildPartial() { + Remote.Query result = new Remote.Query(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields(Remote.Query result) { + if (matchersBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0)) { + matchers_ = java.util.Collections.unmodifiableList(matchers_); + bitField0_ = (bitField0_ & ~0x00000004); + } + result.matchers_ = matchers_; + } else { + result.matchers_ = matchersBuilder_.build(); + } + } + + private void buildPartial0(Remote.Query result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.startTimestampMs_ = startTimestampMs_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.endTimestampMs_ = endTimestampMs_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000008) != 0)) { + result.hints_ = hintsBuilder_ == null ? hints_ : hintsBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @Override + public Builder clone() { + return super.clone(); + } + + @Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + return super.setField(field, value); + } + + @Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + Object value) { + return super.setRepeatedField(field, index, value); + } + + @Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + return super.addRepeatedField(field, value); + } + + @Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof Remote.Query) { + return mergeFrom((Remote.Query) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(Remote.Query other) { + if (other == Remote.Query.getDefaultInstance()) { + return this; + } + if (other.getStartTimestampMs() != 0L) { + setStartTimestampMs(other.getStartTimestampMs()); + } + if (other.getEndTimestampMs() != 0L) { + setEndTimestampMs(other.getEndTimestampMs()); + } + if (matchersBuilder_ == null) { + if (!other.matchers_.isEmpty()) { + if (matchers_.isEmpty()) { + matchers_ = other.matchers_; + bitField0_ = (bitField0_ & ~0x00000004); + } else { + ensureMatchersIsMutable(); + matchers_.addAll(other.matchers_); + } + onChanged(); + } + } else { + if (!other.matchers_.isEmpty()) { + if (matchersBuilder_.isEmpty()) { + matchersBuilder_.dispose(); + matchersBuilder_ = null; + matchers_ = other.matchers_; + bitField0_ = (bitField0_ & ~0x00000004); + matchersBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getMatchersFieldBuilder() + : null; + } else { + matchersBuilder_.addAllMessages(other.matchers_); + } + } + } + if (other.hasHints()) { + mergeHints(other.getHints()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @Override + public final boolean isInitialized() { + return true; + } + + @Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + startTimestampMs_ = input.readInt64(); + bitField0_ |= 0x00000001; + break; + } // case 8 + case 16: + { + endTimestampMs_ = input.readInt64(); + bitField0_ |= 0x00000002; + break; + } // case 16 + case 26: + { + Types.LabelMatcher m = + input.readMessage( + Types.LabelMatcher.parser(), extensionRegistry); + if (matchersBuilder_ == null) { + ensureMatchersIsMutable(); + matchers_.add(m); + } else { + matchersBuilder_.addMessage(m); + } + break; + } // case 26 + case 34: + { + input.readMessage( + getHintsFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 34 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private long startTimestampMs_; + + /** + * int64 start_timestamp_ms = 1; + * + * @return The startTimestampMs. + */ + @Override + public long getStartTimestampMs() { + return startTimestampMs_; + } + + /** + * int64 start_timestamp_ms = 1; + * + * @param value The startTimestampMs to set. + * @return This builder for chaining. + */ + public Builder setStartTimestampMs(long value) { + + startTimestampMs_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * int64 start_timestamp_ms = 1; + * + * @return This builder for chaining. + */ + public Builder clearStartTimestampMs() { + bitField0_ = (bitField0_ & ~0x00000001); + startTimestampMs_ = 0L; + onChanged(); + return this; + } + + private long endTimestampMs_; + + /** + * int64 end_timestamp_ms = 2; + * + * @return The endTimestampMs. + */ + @Override + public long getEndTimestampMs() { + return endTimestampMs_; + } + + /** + * int64 end_timestamp_ms = 2; + * + * @param value The endTimestampMs to set. + * @return This builder for chaining. + */ + public Builder setEndTimestampMs(long value) { + + endTimestampMs_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * int64 end_timestamp_ms = 2; + * + * @return This builder for chaining. + */ + public Builder clearEndTimestampMs() { + bitField0_ = (bitField0_ & ~0x00000002); + endTimestampMs_ = 0L; + onChanged(); + return this; + } + + private java.util.List matchers_ = + java.util.Collections.emptyList(); + + private void ensureMatchersIsMutable() { + if (!((bitField0_ & 0x00000004) != 0)) { + matchers_ = new java.util.ArrayList(matchers_); + bitField0_ |= 0x00000004; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + Types.LabelMatcher, + Types.LabelMatcher.Builder, + Types.LabelMatcherOrBuilder> + matchersBuilder_; + + /** repeated .prometheus.LabelMatcher matchers = 3; */ + public java.util.List getMatchersList() { + if (matchersBuilder_ == null) { + return java.util.Collections.unmodifiableList(matchers_); + } else { + return matchersBuilder_.getMessageList(); + } + } + + /** repeated .prometheus.LabelMatcher matchers = 3; */ + public int getMatchersCount() { + if (matchersBuilder_ == null) { + return matchers_.size(); + } else { + return matchersBuilder_.getCount(); + } + } + + /** repeated .prometheus.LabelMatcher matchers = 3; */ + public Types.LabelMatcher getMatchers(int index) { + if (matchersBuilder_ == null) { + return matchers_.get(index); + } else { + return matchersBuilder_.getMessage(index); + } + } + + /** repeated .prometheus.LabelMatcher matchers = 3; */ + public Builder setMatchers(int index, Types.LabelMatcher value) { + if (matchersBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureMatchersIsMutable(); + matchers_.set(index, value); + onChanged(); + } else { + matchersBuilder_.setMessage(index, value); + } + return this; + } + + /** repeated .prometheus.LabelMatcher matchers = 3; */ + public Builder setMatchers(int index, Types.LabelMatcher.Builder builderForValue) { + if (matchersBuilder_ == null) { + ensureMatchersIsMutable(); + matchers_.set(index, builderForValue.build()); + onChanged(); + } else { + matchersBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** repeated .prometheus.LabelMatcher matchers = 3; */ + public Builder addMatchers(Types.LabelMatcher value) { + if (matchersBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureMatchersIsMutable(); + matchers_.add(value); + onChanged(); + } else { + matchersBuilder_.addMessage(value); + } + return this; + } + + /** repeated .prometheus.LabelMatcher matchers = 3; */ + public Builder addMatchers(int index, Types.LabelMatcher value) { + if (matchersBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureMatchersIsMutable(); + matchers_.add(index, value); + onChanged(); + } else { + matchersBuilder_.addMessage(index, value); + } + return this; + } + + /** repeated .prometheus.LabelMatcher matchers = 3; */ + public Builder addMatchers(Types.LabelMatcher.Builder builderForValue) { + if (matchersBuilder_ == null) { + ensureMatchersIsMutable(); + matchers_.add(builderForValue.build()); + onChanged(); + } else { + matchersBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** repeated .prometheus.LabelMatcher matchers = 3; */ + public Builder addMatchers(int index, Types.LabelMatcher.Builder builderForValue) { + if (matchersBuilder_ == null) { + ensureMatchersIsMutable(); + matchers_.add(index, builderForValue.build()); + onChanged(); + } else { + matchersBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** repeated .prometheus.LabelMatcher matchers = 3; */ + public Builder addAllMatchers(Iterable values) { + if (matchersBuilder_ == null) { + ensureMatchersIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, matchers_); + onChanged(); + } else { + matchersBuilder_.addAllMessages(values); + } + return this; + } + + /** repeated .prometheus.LabelMatcher matchers = 3; */ + public Builder clearMatchers() { + if (matchersBuilder_ == null) { + matchers_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + } else { + matchersBuilder_.clear(); + } + return this; + } + + /** repeated .prometheus.LabelMatcher matchers = 3; */ + public Builder removeMatchers(int index) { + if (matchersBuilder_ == null) { + ensureMatchersIsMutable(); + matchers_.remove(index); + onChanged(); + } else { + matchersBuilder_.remove(index); + } + return this; + } + + /** repeated .prometheus.LabelMatcher matchers = 3; */ + public Types.LabelMatcher.Builder getMatchersBuilder(int index) { + return getMatchersFieldBuilder().getBuilder(index); + } + + /** repeated .prometheus.LabelMatcher matchers = 3; */ + public Types.LabelMatcherOrBuilder getMatchersOrBuilder(int index) { + if (matchersBuilder_ == null) { + return matchers_.get(index); + } else { + return matchersBuilder_.getMessageOrBuilder(index); + } + } + + /** repeated .prometheus.LabelMatcher matchers = 3; */ + public java.util.List + getMatchersOrBuilderList() { + if (matchersBuilder_ != null) { + return matchersBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(matchers_); + } + } + + /** repeated .prometheus.LabelMatcher matchers = 3; */ + public Types.LabelMatcher.Builder addMatchersBuilder() { + return getMatchersFieldBuilder() + .addBuilder(Types.LabelMatcher.getDefaultInstance()); + } + + /** repeated .prometheus.LabelMatcher matchers = 3; */ + public Types.LabelMatcher.Builder addMatchersBuilder(int index) { + return getMatchersFieldBuilder() + .addBuilder(index, Types.LabelMatcher.getDefaultInstance()); + } + + /** repeated .prometheus.LabelMatcher matchers = 3; */ + public java.util.List getMatchersBuilderList() { + return getMatchersFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + Types.LabelMatcher, + Types.LabelMatcher.Builder, + Types.LabelMatcherOrBuilder> + getMatchersFieldBuilder() { + if (matchersBuilder_ == null) { + matchersBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + Types.LabelMatcher, + Types.LabelMatcher.Builder, + Types.LabelMatcherOrBuilder>( + matchers_, + ((bitField0_ & 0x00000004) != 0), + getParentForChildren(), + isClean()); + matchers_ = null; + } + return matchersBuilder_; + } + + private Types.ReadHints hints_; + private com.google.protobuf.SingleFieldBuilderV3< + Types.ReadHints, Types.ReadHints.Builder, Types.ReadHintsOrBuilder> + hintsBuilder_; + + /** + * .prometheus.ReadHints hints = 4; + * + * @return Whether the hints field is set. + */ + public boolean hasHints() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * .prometheus.ReadHints hints = 4; + * + * @return The hints. + */ + public Types.ReadHints getHints() { + if (hintsBuilder_ == null) { + return hints_ == null ? Types.ReadHints.getDefaultInstance() : hints_; + } else { + return hintsBuilder_.getMessage(); + } + } + + /** .prometheus.ReadHints hints = 4; */ + public Builder setHints(Types.ReadHints value) { + if (hintsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + hints_ = value; + } else { + hintsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** .prometheus.ReadHints hints = 4; */ + public Builder setHints(Types.ReadHints.Builder builderForValue) { + if (hintsBuilder_ == null) { + hints_ = builderForValue.build(); + } else { + hintsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** .prometheus.ReadHints hints = 4; */ + public Builder mergeHints(Types.ReadHints value) { + if (hintsBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && hints_ != null + && hints_ != Types.ReadHints.getDefaultInstance()) { + getHintsBuilder().mergeFrom(value); + } else { + hints_ = value; + } + } else { + hintsBuilder_.mergeFrom(value); + } + if (hints_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + + /** .prometheus.ReadHints hints = 4; */ + public Builder clearHints() { + bitField0_ = (bitField0_ & ~0x00000008); + hints_ = null; + if (hintsBuilder_ != null) { + hintsBuilder_.dispose(); + hintsBuilder_ = null; + } + onChanged(); + return this; + } + + /** .prometheus.ReadHints hints = 4; */ + public Types.ReadHints.Builder getHintsBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return getHintsFieldBuilder().getBuilder(); + } + + /** .prometheus.ReadHints hints = 4; */ + public Types.ReadHintsOrBuilder getHintsOrBuilder() { + if (hintsBuilder_ != null) { + return hintsBuilder_.getMessageOrBuilder(); + } else { + return hints_ == null ? Types.ReadHints.getDefaultInstance() : hints_; + } + } + + /** .prometheus.ReadHints hints = 4; */ + private com.google.protobuf.SingleFieldBuilderV3< + Types.ReadHints, Types.ReadHints.Builder, Types.ReadHintsOrBuilder> + getHintsFieldBuilder() { + if (hintsBuilder_ == null) { + hintsBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + Types.ReadHints, + Types.ReadHints.Builder, + Types.ReadHintsOrBuilder>( + getHints(), getParentForChildren(), isClean()); + hints_ = null; + } + return hintsBuilder_; + } + + @Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:prometheus.Query) + } + + // @@protoc_insertion_point(class_scope:prometheus.Query) + private static final Remote.Query DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new Remote.Query(); + } + + public static Remote.Query getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @Override + public Query parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @Override + public Remote.Query getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface QueryResultOrBuilder + extends + // @@protoc_insertion_point(interface_extends:prometheus.QueryResult) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+         * Samples within a time series must be ordered by time.
+         * 
+ * + * repeated .prometheus.TimeSeries timeseries = 1; + */ + java.util.List getTimeseriesList(); + + /** + * + * + *
+         * Samples within a time series must be ordered by time.
+         * 
+ * + * repeated .prometheus.TimeSeries timeseries = 1; + */ + Types.TimeSeries getTimeseries(int index); + + /** + * + * + *
+         * Samples within a time series must be ordered by time.
+         * 
+ * + * repeated .prometheus.TimeSeries timeseries = 1; + */ + int getTimeseriesCount(); + + /** + * + * + *
+         * Samples within a time series must be ordered by time.
+         * 
+ * + * repeated .prometheus.TimeSeries timeseries = 1; + */ + java.util.List getTimeseriesOrBuilderList(); + + /** + * + * + *
+         * Samples within a time series must be ordered by time.
+         * 
+ * + * repeated .prometheus.TimeSeries timeseries = 1; + */ + Types.TimeSeriesOrBuilder getTimeseriesOrBuilder(int index); + } + + /** Protobuf type {@code prometheus.QueryResult} */ + public static final class QueryResult extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:prometheus.QueryResult) + QueryResultOrBuilder { + private static final long serialVersionUID = 0L; + + // Use QueryResult.newBuilder() to construct. + private QueryResult(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private QueryResult() { + timeseries_ = java.util.Collections.emptyList(); + } + + @Override + @SuppressWarnings({"unused"}) + protected Object newInstance(UnusedPrivateParameter unused) { + return new QueryResult(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return Remote.internal_static_prometheus_QueryResult_descriptor; + } + + @Override + protected FieldAccessorTable internalGetFieldAccessorTable() { + return Remote.internal_static_prometheus_QueryResult_fieldAccessorTable + .ensureFieldAccessorsInitialized( + Remote.QueryResult.class, Remote.QueryResult.Builder.class); + } + + public static final int TIMESERIES_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List timeseries_; + + /** + * + * + *
+         * Samples within a time series must be ordered by time.
+         * 
+ * + * repeated .prometheus.TimeSeries timeseries = 1; + */ + @Override + public java.util.List getTimeseriesList() { + return timeseries_; + } + + /** + * + * + *
+         * Samples within a time series must be ordered by time.
+         * 
+ * + * repeated .prometheus.TimeSeries timeseries = 1; + */ + @Override + public java.util.List getTimeseriesOrBuilderList() { + return timeseries_; + } + + /** + * + * + *
+         * Samples within a time series must be ordered by time.
+         * 
+ * + * repeated .prometheus.TimeSeries timeseries = 1; + */ + @Override + public int getTimeseriesCount() { + return timeseries_.size(); + } + + /** + * + * + *
+         * Samples within a time series must be ordered by time.
+         * 
+ * + * repeated .prometheus.TimeSeries timeseries = 1; + */ + @Override + public Types.TimeSeries getTimeseries(int index) { + return timeseries_.get(index); + } + + /** + * + * + *
+         * Samples within a time series must be ordered by time.
+         * 
+ * + * repeated .prometheus.TimeSeries timeseries = 1; + */ + @Override + public Types.TimeSeriesOrBuilder getTimeseriesOrBuilder(int index) { + return timeseries_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) { + return true; + } + if (isInitialized == 0) { + return false; + } + + memoizedIsInitialized = 1; + return true; + } + + @Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + for (int i = 0; i < timeseries_.size(); i++) { + output.writeMessage(1, timeseries_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) { + return size; + } + + size = 0; + for (int i = 0; i < timeseries_.size(); i++) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 1, timeseries_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @Override + public boolean equals(final Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof Remote.QueryResult)) { + return super.equals(obj); + } + Remote.QueryResult other = (Remote.QueryResult) obj; + + if (!getTimeseriesList().equals(other.getTimeseriesList())) { + return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) { + return false; + } + return true; + } + + @Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getTimeseriesCount() > 0) { + hash = (37 * hash) + TIMESERIES_FIELD_NUMBER; + hash = (53 * hash) + getTimeseriesList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static Remote.QueryResult parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static Remote.QueryResult parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static Remote.QueryResult parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static Remote.QueryResult parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static Remote.QueryResult parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static Remote.QueryResult parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static Remote.QueryResult parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static Remote.QueryResult parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static Remote.QueryResult parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input); + } + + public static Remote.QueryResult parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static Remote.QueryResult parseFrom(com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static Remote.QueryResult parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(Remote.QueryResult prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @Override + protected Builder newBuilderForType(BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** Protobuf type {@code prometheus.QueryResult} */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:prometheus.QueryResult) + Remote.QueryResultOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return Remote.internal_static_prometheus_QueryResult_descriptor; + } + + @Override + protected FieldAccessorTable internalGetFieldAccessorTable() { + return Remote.internal_static_prometheus_QueryResult_fieldAccessorTable + .ensureFieldAccessorsInitialized( + Remote.QueryResult.class, Remote.QueryResult.Builder.class); + } + + // Construct using Remote.QueryResult.newBuilder() + private Builder() {} + + private Builder(BuilderParent parent) { + super(parent); + } + + @Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (timeseriesBuilder_ == null) { + timeseries_ = java.util.Collections.emptyList(); + } else { + timeseries_ = null; + timeseriesBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + @Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return Remote.internal_static_prometheus_QueryResult_descriptor; + } + + @Override + public Remote.QueryResult getDefaultInstanceForType() { + return Remote.QueryResult.getDefaultInstance(); + } + + @Override + public Remote.QueryResult build() { + Remote.QueryResult result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @Override + public Remote.QueryResult buildPartial() { + Remote.QueryResult result = new Remote.QueryResult(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields(Remote.QueryResult result) { + if (timeseriesBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + timeseries_ = java.util.Collections.unmodifiableList(timeseries_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.timeseries_ = timeseries_; + } else { + result.timeseries_ = timeseriesBuilder_.build(); + } + } + + private void buildPartial0(Remote.QueryResult result) { + int from_bitField0_ = bitField0_; + } + + @Override + public Builder clone() { + return super.clone(); + } + + @Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + return super.setField(field, value); + } + + @Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + Object value) { + return super.setRepeatedField(field, index, value); + } + + @Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + return super.addRepeatedField(field, value); + } + + @Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof Remote.QueryResult) { + return mergeFrom((Remote.QueryResult) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(Remote.QueryResult other) { + if (other == Remote.QueryResult.getDefaultInstance()) { + return this; + } + if (timeseriesBuilder_ == null) { + if (!other.timeseries_.isEmpty()) { + if (timeseries_.isEmpty()) { + timeseries_ = other.timeseries_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureTimeseriesIsMutable(); + timeseries_.addAll(other.timeseries_); + } + onChanged(); + } + } else { + if (!other.timeseries_.isEmpty()) { + if (timeseriesBuilder_.isEmpty()) { + timeseriesBuilder_.dispose(); + timeseriesBuilder_ = null; + timeseries_ = other.timeseries_; + bitField0_ = (bitField0_ & ~0x00000001); + timeseriesBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getTimeseriesFieldBuilder() + : null; + } else { + timeseriesBuilder_.addAllMessages(other.timeseries_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @Override + public final boolean isInitialized() { + return true; + } + + @Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + Types.TimeSeries m = + input.readMessage( + Types.TimeSeries.parser(), extensionRegistry); + if (timeseriesBuilder_ == null) { + ensureTimeseriesIsMutable(); + timeseries_.add(m); + } else { + timeseriesBuilder_.addMessage(m); + } + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List timeseries_ = + java.util.Collections.emptyList(); + + private void ensureTimeseriesIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + timeseries_ = new java.util.ArrayList(timeseries_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + Types.TimeSeries, Types.TimeSeries.Builder, Types.TimeSeriesOrBuilder> + timeseriesBuilder_; + + /** + * + * + *
+             * Samples within a time series must be ordered by time.
+             * 
+ * + * repeated .prometheus.TimeSeries timeseries = 1; + */ + public java.util.List getTimeseriesList() { + if (timeseriesBuilder_ == null) { + return java.util.Collections.unmodifiableList(timeseries_); + } else { + return timeseriesBuilder_.getMessageList(); + } + } + + /** + * + * + *
+             * Samples within a time series must be ordered by time.
+             * 
+ * + * repeated .prometheus.TimeSeries timeseries = 1; + */ + public int getTimeseriesCount() { + if (timeseriesBuilder_ == null) { + return timeseries_.size(); + } else { + return timeseriesBuilder_.getCount(); + } + } + + /** + * + * + *
+             * Samples within a time series must be ordered by time.
+             * 
+ * + * repeated .prometheus.TimeSeries timeseries = 1; + */ + public Types.TimeSeries getTimeseries(int index) { + if (timeseriesBuilder_ == null) { + return timeseries_.get(index); + } else { + return timeseriesBuilder_.getMessage(index); + } + } + + /** + * + * + *
+             * Samples within a time series must be ordered by time.
+             * 
+ * + * repeated .prometheus.TimeSeries timeseries = 1; + */ + public Builder setTimeseries(int index, Types.TimeSeries value) { + if (timeseriesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTimeseriesIsMutable(); + timeseries_.set(index, value); + onChanged(); + } else { + timeseriesBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+             * Samples within a time series must be ordered by time.
+             * 
+ * + * repeated .prometheus.TimeSeries timeseries = 1; + */ + public Builder setTimeseries(int index, Types.TimeSeries.Builder builderForValue) { + if (timeseriesBuilder_ == null) { + ensureTimeseriesIsMutable(); + timeseries_.set(index, builderForValue.build()); + onChanged(); + } else { + timeseriesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+             * Samples within a time series must be ordered by time.
+             * 
+ * + * repeated .prometheus.TimeSeries timeseries = 1; + */ + public Builder addTimeseries(Types.TimeSeries value) { + if (timeseriesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTimeseriesIsMutable(); + timeseries_.add(value); + onChanged(); + } else { + timeseriesBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+             * Samples within a time series must be ordered by time.
+             * 
+ * + * repeated .prometheus.TimeSeries timeseries = 1; + */ + public Builder addTimeseries(int index, Types.TimeSeries value) { + if (timeseriesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTimeseriesIsMutable(); + timeseries_.add(index, value); + onChanged(); + } else { + timeseriesBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+             * Samples within a time series must be ordered by time.
+             * 
+ * + * repeated .prometheus.TimeSeries timeseries = 1; + */ + public Builder addTimeseries(Types.TimeSeries.Builder builderForValue) { + if (timeseriesBuilder_ == null) { + ensureTimeseriesIsMutable(); + timeseries_.add(builderForValue.build()); + onChanged(); + } else { + timeseriesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+             * Samples within a time series must be ordered by time.
+             * 
+ * + * repeated .prometheus.TimeSeries timeseries = 1; + */ + public Builder addTimeseries(int index, Types.TimeSeries.Builder builderForValue) { + if (timeseriesBuilder_ == null) { + ensureTimeseriesIsMutable(); + timeseries_.add(index, builderForValue.build()); + onChanged(); + } else { + timeseriesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+             * Samples within a time series must be ordered by time.
+             * 
+ * + * repeated .prometheus.TimeSeries timeseries = 1; + */ + public Builder addAllTimeseries(Iterable values) { + if (timeseriesBuilder_ == null) { + ensureTimeseriesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, timeseries_); + onChanged(); + } else { + timeseriesBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+             * Samples within a time series must be ordered by time.
+             * 
+ * + * repeated .prometheus.TimeSeries timeseries = 1; + */ + public Builder clearTimeseries() { + if (timeseriesBuilder_ == null) { + timeseries_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + timeseriesBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+             * Samples within a time series must be ordered by time.
+             * 
+ * + * repeated .prometheus.TimeSeries timeseries = 1; + */ + public Builder removeTimeseries(int index) { + if (timeseriesBuilder_ == null) { + ensureTimeseriesIsMutable(); + timeseries_.remove(index); + onChanged(); + } else { + timeseriesBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+             * Samples within a time series must be ordered by time.
+             * 
+ * + * repeated .prometheus.TimeSeries timeseries = 1; + */ + public Types.TimeSeries.Builder getTimeseriesBuilder(int index) { + return getTimeseriesFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+             * Samples within a time series must be ordered by time.
+             * 
+ * + * repeated .prometheus.TimeSeries timeseries = 1; + */ + public Types.TimeSeriesOrBuilder getTimeseriesOrBuilder(int index) { + if (timeseriesBuilder_ == null) { + return timeseries_.get(index); + } else { + return timeseriesBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+             * Samples within a time series must be ordered by time.
+             * 
+ * + * repeated .prometheus.TimeSeries timeseries = 1; + */ + public java.util.List + getTimeseriesOrBuilderList() { + if (timeseriesBuilder_ != null) { + return timeseriesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(timeseries_); + } + } + + /** + * + * + *
+             * Samples within a time series must be ordered by time.
+             * 
+ * + * repeated .prometheus.TimeSeries timeseries = 1; + */ + public Types.TimeSeries.Builder addTimeseriesBuilder() { + return getTimeseriesFieldBuilder() + .addBuilder(Types.TimeSeries.getDefaultInstance()); + } + + /** + * + * + *
+             * Samples within a time series must be ordered by time.
+             * 
+ * + * repeated .prometheus.TimeSeries timeseries = 1; + */ + public Types.TimeSeries.Builder addTimeseriesBuilder(int index) { + return getTimeseriesFieldBuilder() + .addBuilder(index, Types.TimeSeries.getDefaultInstance()); + } + + /** + * + * + *
+             * Samples within a time series must be ordered by time.
+             * 
+ * + * repeated .prometheus.TimeSeries timeseries = 1; + */ + public java.util.List getTimeseriesBuilderList() { + return getTimeseriesFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + Types.TimeSeries, Types.TimeSeries.Builder, Types.TimeSeriesOrBuilder> + getTimeseriesFieldBuilder() { + if (timeseriesBuilder_ == null) { + timeseriesBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + Types.TimeSeries, + Types.TimeSeries.Builder, + Types.TimeSeriesOrBuilder>( + timeseries_, + ((bitField0_ & 0x00000001) != 0), + getParentForChildren(), + isClean()); + timeseries_ = null; + } + return timeseriesBuilder_; + } + + @Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:prometheus.QueryResult) + } + + // @@protoc_insertion_point(class_scope:prometheus.QueryResult) + private static final Remote.QueryResult DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new Remote.QueryResult(); + } + + public static Remote.QueryResult getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @Override + public QueryResult parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @Override + public Remote.QueryResult getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface ChunkedReadResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:prometheus.ChunkedReadResponse) + com.google.protobuf.MessageOrBuilder { + + /** repeated .prometheus.ChunkedSeries chunked_series = 1; */ + java.util.List getChunkedSeriesList(); + + /** repeated .prometheus.ChunkedSeries chunked_series = 1; */ + Types.ChunkedSeries getChunkedSeries(int index); + + /** repeated .prometheus.ChunkedSeries chunked_series = 1; */ + int getChunkedSeriesCount(); + + /** repeated .prometheus.ChunkedSeries chunked_series = 1; */ + java.util.List getChunkedSeriesOrBuilderList(); + + /** repeated .prometheus.ChunkedSeries chunked_series = 1; */ + Types.ChunkedSeriesOrBuilder getChunkedSeriesOrBuilder(int index); + + /** + * + * + *
+         * query_index represents an index of the query from ReadRequest.queries these chunks relates to.
+         * 
+ * + * int64 query_index = 2; + * + * @return The queryIndex. + */ + long getQueryIndex(); + } + + /** + * + * + *
+     * ChunkedReadResponse is a response when response_type equals STREAMED_XOR_CHUNKS.
+     * We strictly stream full series after series, optionally split by time. This means that a single frame can contain
+     * partition of the single series, but once a new series is started to be streamed it means that no more chunks will
+     * be sent for previous one. Series are returned sorted in the same way TSDB block are internally.
+     * 
+ * + *

Protobuf type {@code prometheus.ChunkedReadResponse} + */ + public static final class ChunkedReadResponse extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:prometheus.ChunkedReadResponse) + ChunkedReadResponseOrBuilder { + private static final long serialVersionUID = 0L; + + // Use ChunkedReadResponse.newBuilder() to construct. + private ChunkedReadResponse(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ChunkedReadResponse() { + chunkedSeries_ = java.util.Collections.emptyList(); + } + + @Override + @SuppressWarnings({"unused"}) + protected Object newInstance(UnusedPrivateParameter unused) { + return new ChunkedReadResponse(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return Remote.internal_static_prometheus_ChunkedReadResponse_descriptor; + } + + @Override + protected FieldAccessorTable internalGetFieldAccessorTable() { + return Remote.internal_static_prometheus_ChunkedReadResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + Remote.ChunkedReadResponse.class, + Remote.ChunkedReadResponse.Builder.class); + } + + public static final int CHUNKED_SERIES_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List chunkedSeries_; + + /** repeated .prometheus.ChunkedSeries chunked_series = 1; */ + @Override + public java.util.List getChunkedSeriesList() { + return chunkedSeries_; + } + + /** repeated .prometheus.ChunkedSeries chunked_series = 1; */ + @Override + public java.util.List + getChunkedSeriesOrBuilderList() { + return chunkedSeries_; + } + + /** repeated .prometheus.ChunkedSeries chunked_series = 1; */ + @Override + public int getChunkedSeriesCount() { + return chunkedSeries_.size(); + } + + /** repeated .prometheus.ChunkedSeries chunked_series = 1; */ + @Override + public Types.ChunkedSeries getChunkedSeries(int index) { + return chunkedSeries_.get(index); + } + + /** repeated .prometheus.ChunkedSeries chunked_series = 1; */ + @Override + public Types.ChunkedSeriesOrBuilder getChunkedSeriesOrBuilder(int index) { + return chunkedSeries_.get(index); + } + + public static final int QUERY_INDEX_FIELD_NUMBER = 2; + private long queryIndex_ = 0L; + + /** + * + * + *

+         * query_index represents an index of the query from ReadRequest.queries these chunks relates to.
+         * 
+ * + * int64 query_index = 2; + * + * @return The queryIndex. + */ + @Override + public long getQueryIndex() { + return queryIndex_; + } + + private byte memoizedIsInitialized = -1; + + @Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) { + return true; + } + if (isInitialized == 0) { + return false; + } + + memoizedIsInitialized = 1; + return true; + } + + @Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + for (int i = 0; i < chunkedSeries_.size(); i++) { + output.writeMessage(1, chunkedSeries_.get(i)); + } + if (queryIndex_ != 0L) { + output.writeInt64(2, queryIndex_); + } + getUnknownFields().writeTo(output); + } + + @Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) { + return size; + } + + size = 0; + for (int i = 0; i < chunkedSeries_.size(); i++) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 1, chunkedSeries_.get(i)); + } + if (queryIndex_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(2, queryIndex_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @Override + public boolean equals(final Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof Remote.ChunkedReadResponse)) { + return super.equals(obj); + } + Remote.ChunkedReadResponse other = (Remote.ChunkedReadResponse) obj; + + if (!getChunkedSeriesList().equals(other.getChunkedSeriesList())) { + return false; + } + if (getQueryIndex() != other.getQueryIndex()) { + return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) { + return false; + } + return true; + } + + @Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getChunkedSeriesCount() > 0) { + hash = (37 * hash) + CHUNKED_SERIES_FIELD_NUMBER; + hash = (53 * hash) + getChunkedSeriesList().hashCode(); + } + hash = (37 * hash) + QUERY_INDEX_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getQueryIndex()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static Remote.ChunkedReadResponse parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static Remote.ChunkedReadResponse parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static Remote.ChunkedReadResponse parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static Remote.ChunkedReadResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static Remote.ChunkedReadResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static Remote.ChunkedReadResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static Remote.ChunkedReadResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static Remote.ChunkedReadResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static Remote.ChunkedReadResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input); + } + + public static Remote.ChunkedReadResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static Remote.ChunkedReadResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static Remote.ChunkedReadResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(Remote.ChunkedReadResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @Override + protected Builder newBuilderForType(BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+         * ChunkedReadResponse is a response when response_type equals STREAMED_XOR_CHUNKS.
+         * We strictly stream full series after series, optionally split by time. This means that a single frame can contain
+         * partition of the single series, but once a new series is started to be streamed it means that no more chunks will
+         * be sent for previous one. Series are returned sorted in the same way TSDB block are internally.
+         * 
+ * + *

Protobuf type {@code prometheus.ChunkedReadResponse} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:prometheus.ChunkedReadResponse) + Remote.ChunkedReadResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return Remote.internal_static_prometheus_ChunkedReadResponse_descriptor; + } + + @Override + protected FieldAccessorTable internalGetFieldAccessorTable() { + return Remote.internal_static_prometheus_ChunkedReadResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + Remote.ChunkedReadResponse.class, + Remote.ChunkedReadResponse.Builder.class); + } + + // Construct using Remote.ChunkedReadResponse.newBuilder() + private Builder() {} + + private Builder(BuilderParent parent) { + super(parent); + } + + @Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (chunkedSeriesBuilder_ == null) { + chunkedSeries_ = java.util.Collections.emptyList(); + } else { + chunkedSeries_ = null; + chunkedSeriesBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + queryIndex_ = 0L; + return this; + } + + @Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return Remote.internal_static_prometheus_ChunkedReadResponse_descriptor; + } + + @Override + public Remote.ChunkedReadResponse getDefaultInstanceForType() { + return Remote.ChunkedReadResponse.getDefaultInstance(); + } + + @Override + public Remote.ChunkedReadResponse build() { + Remote.ChunkedReadResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @Override + public Remote.ChunkedReadResponse buildPartial() { + Remote.ChunkedReadResponse result = new Remote.ChunkedReadResponse(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields(Remote.ChunkedReadResponse result) { + if (chunkedSeriesBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + chunkedSeries_ = java.util.Collections.unmodifiableList(chunkedSeries_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.chunkedSeries_ = chunkedSeries_; + } else { + result.chunkedSeries_ = chunkedSeriesBuilder_.build(); + } + } + + private void buildPartial0(Remote.ChunkedReadResponse result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.queryIndex_ = queryIndex_; + } + } + + @Override + public Builder clone() { + return super.clone(); + } + + @Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + return super.setField(field, value); + } + + @Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + Object value) { + return super.setRepeatedField(field, index, value); + } + + @Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + return super.addRepeatedField(field, value); + } + + @Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof Remote.ChunkedReadResponse) { + return mergeFrom((Remote.ChunkedReadResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(Remote.ChunkedReadResponse other) { + if (other == Remote.ChunkedReadResponse.getDefaultInstance()) { + return this; + } + if (chunkedSeriesBuilder_ == null) { + if (!other.chunkedSeries_.isEmpty()) { + if (chunkedSeries_.isEmpty()) { + chunkedSeries_ = other.chunkedSeries_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureChunkedSeriesIsMutable(); + chunkedSeries_.addAll(other.chunkedSeries_); + } + onChanged(); + } + } else { + if (!other.chunkedSeries_.isEmpty()) { + if (chunkedSeriesBuilder_.isEmpty()) { + chunkedSeriesBuilder_.dispose(); + chunkedSeriesBuilder_ = null; + chunkedSeries_ = other.chunkedSeries_; + bitField0_ = (bitField0_ & ~0x00000001); + chunkedSeriesBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getChunkedSeriesFieldBuilder() + : null; + } else { + chunkedSeriesBuilder_.addAllMessages(other.chunkedSeries_); + } + } + } + if (other.getQueryIndex() != 0L) { + setQueryIndex(other.getQueryIndex()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @Override + public final boolean isInitialized() { + return true; + } + + @Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + Types.ChunkedSeries m = + input.readMessage( + Types.ChunkedSeries.parser(), + extensionRegistry); + if (chunkedSeriesBuilder_ == null) { + ensureChunkedSeriesIsMutable(); + chunkedSeries_.add(m); + } else { + chunkedSeriesBuilder_.addMessage(m); + } + break; + } // case 10 + case 16: + { + queryIndex_ = input.readInt64(); + bitField0_ |= 0x00000002; + break; + } // case 16 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List chunkedSeries_ = + java.util.Collections.emptyList(); + + private void ensureChunkedSeriesIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + chunkedSeries_ = new java.util.ArrayList(chunkedSeries_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + Types.ChunkedSeries, + Types.ChunkedSeries.Builder, + Types.ChunkedSeriesOrBuilder> + chunkedSeriesBuilder_; + + /** repeated .prometheus.ChunkedSeries chunked_series = 1; */ + public java.util.List getChunkedSeriesList() { + if (chunkedSeriesBuilder_ == null) { + return java.util.Collections.unmodifiableList(chunkedSeries_); + } else { + return chunkedSeriesBuilder_.getMessageList(); + } + } + + /** repeated .prometheus.ChunkedSeries chunked_series = 1; */ + public int getChunkedSeriesCount() { + if (chunkedSeriesBuilder_ == null) { + return chunkedSeries_.size(); + } else { + return chunkedSeriesBuilder_.getCount(); + } + } + + /** repeated .prometheus.ChunkedSeries chunked_series = 1; */ + public Types.ChunkedSeries getChunkedSeries(int index) { + if (chunkedSeriesBuilder_ == null) { + return chunkedSeries_.get(index); + } else { + return chunkedSeriesBuilder_.getMessage(index); + } + } + + /** repeated .prometheus.ChunkedSeries chunked_series = 1; */ + public Builder setChunkedSeries(int index, Types.ChunkedSeries value) { + if (chunkedSeriesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureChunkedSeriesIsMutable(); + chunkedSeries_.set(index, value); + onChanged(); + } else { + chunkedSeriesBuilder_.setMessage(index, value); + } + return this; + } + + /** repeated .prometheus.ChunkedSeries chunked_series = 1; */ + public Builder setChunkedSeries( + int index, Types.ChunkedSeries.Builder builderForValue) { + if (chunkedSeriesBuilder_ == null) { + ensureChunkedSeriesIsMutable(); + chunkedSeries_.set(index, builderForValue.build()); + onChanged(); + } else { + chunkedSeriesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** repeated .prometheus.ChunkedSeries chunked_series = 1; */ + public Builder addChunkedSeries(Types.ChunkedSeries value) { + if (chunkedSeriesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureChunkedSeriesIsMutable(); + chunkedSeries_.add(value); + onChanged(); + } else { + chunkedSeriesBuilder_.addMessage(value); + } + return this; + } + + /** repeated .prometheus.ChunkedSeries chunked_series = 1; */ + public Builder addChunkedSeries(int index, Types.ChunkedSeries value) { + if (chunkedSeriesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureChunkedSeriesIsMutable(); + chunkedSeries_.add(index, value); + onChanged(); + } else { + chunkedSeriesBuilder_.addMessage(index, value); + } + return this; + } + + /** repeated .prometheus.ChunkedSeries chunked_series = 1; */ + public Builder addChunkedSeries(Types.ChunkedSeries.Builder builderForValue) { + if (chunkedSeriesBuilder_ == null) { + ensureChunkedSeriesIsMutable(); + chunkedSeries_.add(builderForValue.build()); + onChanged(); + } else { + chunkedSeriesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** repeated .prometheus.ChunkedSeries chunked_series = 1; */ + public Builder addChunkedSeries( + int index, Types.ChunkedSeries.Builder builderForValue) { + if (chunkedSeriesBuilder_ == null) { + ensureChunkedSeriesIsMutable(); + chunkedSeries_.add(index, builderForValue.build()); + onChanged(); + } else { + chunkedSeriesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** repeated .prometheus.ChunkedSeries chunked_series = 1; */ + public Builder addAllChunkedSeries(Iterable values) { + if (chunkedSeriesBuilder_ == null) { + ensureChunkedSeriesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, chunkedSeries_); + onChanged(); + } else { + chunkedSeriesBuilder_.addAllMessages(values); + } + return this; + } + + /** repeated .prometheus.ChunkedSeries chunked_series = 1; */ + public Builder clearChunkedSeries() { + if (chunkedSeriesBuilder_ == null) { + chunkedSeries_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + chunkedSeriesBuilder_.clear(); + } + return this; + } + + /** repeated .prometheus.ChunkedSeries chunked_series = 1; */ + public Builder removeChunkedSeries(int index) { + if (chunkedSeriesBuilder_ == null) { + ensureChunkedSeriesIsMutable(); + chunkedSeries_.remove(index); + onChanged(); + } else { + chunkedSeriesBuilder_.remove(index); + } + return this; + } + + /** repeated .prometheus.ChunkedSeries chunked_series = 1; */ + public Types.ChunkedSeries.Builder getChunkedSeriesBuilder(int index) { + return getChunkedSeriesFieldBuilder().getBuilder(index); + } + + /** repeated .prometheus.ChunkedSeries chunked_series = 1; */ + public Types.ChunkedSeriesOrBuilder getChunkedSeriesOrBuilder(int index) { + if (chunkedSeriesBuilder_ == null) { + return chunkedSeries_.get(index); + } else { + return chunkedSeriesBuilder_.getMessageOrBuilder(index); + } + } + + /** repeated .prometheus.ChunkedSeries chunked_series = 1; */ + public java.util.List + getChunkedSeriesOrBuilderList() { + if (chunkedSeriesBuilder_ != null) { + return chunkedSeriesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(chunkedSeries_); + } + } + + /** repeated .prometheus.ChunkedSeries chunked_series = 1; */ + public Types.ChunkedSeries.Builder addChunkedSeriesBuilder() { + return getChunkedSeriesFieldBuilder() + .addBuilder(Types.ChunkedSeries.getDefaultInstance()); + } + + /** repeated .prometheus.ChunkedSeries chunked_series = 1; */ + public Types.ChunkedSeries.Builder addChunkedSeriesBuilder(int index) { + return getChunkedSeriesFieldBuilder() + .addBuilder(index, Types.ChunkedSeries.getDefaultInstance()); + } + + /** repeated .prometheus.ChunkedSeries chunked_series = 1; */ + public java.util.List getChunkedSeriesBuilderList() { + return getChunkedSeriesFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + Types.ChunkedSeries, + Types.ChunkedSeries.Builder, + Types.ChunkedSeriesOrBuilder> + getChunkedSeriesFieldBuilder() { + if (chunkedSeriesBuilder_ == null) { + chunkedSeriesBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + Types.ChunkedSeries, + Types.ChunkedSeries.Builder, + Types.ChunkedSeriesOrBuilder>( + chunkedSeries_, + ((bitField0_ & 0x00000001) != 0), + getParentForChildren(), + isClean()); + chunkedSeries_ = null; + } + return chunkedSeriesBuilder_; + } + + private long queryIndex_; + + /** + * + * + *

+             * query_index represents an index of the query from ReadRequest.queries these chunks relates to.
+             * 
+ * + * int64 query_index = 2; + * + * @return The queryIndex. + */ + @Override + public long getQueryIndex() { + return queryIndex_; + } + + /** + * + * + *
+             * query_index represents an index of the query from ReadRequest.queries these chunks relates to.
+             * 
+ * + * int64 query_index = 2; + * + * @param value The queryIndex to set. + * @return This builder for chaining. + */ + public Builder setQueryIndex(long value) { + + queryIndex_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+             * query_index represents an index of the query from ReadRequest.queries these chunks relates to.
+             * 
+ * + * int64 query_index = 2; + * + * @return This builder for chaining. + */ + public Builder clearQueryIndex() { + bitField0_ = (bitField0_ & ~0x00000002); + queryIndex_ = 0L; + onChanged(); + return this; + } + + @Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:prometheus.ChunkedReadResponse) + } + + // @@protoc_insertion_point(class_scope:prometheus.ChunkedReadResponse) + private static final Remote.ChunkedReadResponse DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new Remote.ChunkedReadResponse(); + } + + public static Remote.ChunkedReadResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @Override + public ChunkedReadResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @Override + public Remote.ChunkedReadResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_prometheus_WriteRequest_descriptor; + private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_prometheus_WriteRequest_fieldAccessorTable; + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_prometheus_ReadRequest_descriptor; + private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_prometheus_ReadRequest_fieldAccessorTable; + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_prometheus_ReadResponse_descriptor; + private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_prometheus_ReadResponse_fieldAccessorTable; + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_prometheus_Query_descriptor; + private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_prometheus_Query_fieldAccessorTable; + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_prometheus_QueryResult_descriptor; + private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_prometheus_QueryResult_fieldAccessorTable; + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_prometheus_ChunkedReadResponse_descriptor; + private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_prometheus_ChunkedReadResponse_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + String[] descriptorData = { + "\n\014remote.proto\022\nprometheus\032\013types.proto\032" + + "\ngogo.proto\"z\n\014WriteRequest\0220\n\ntimeserie" + + "s\030\001 \003(\0132\026.prometheus.TimeSeriesB\004\310\336\037\000\0222\n" + + "\010metadata\030\003 \003(\0132\032.prometheus.MetricMetad" + + "ataB\004\310\336\037\000J\004\010\002\020\003\"\256\001\n\013ReadRequest\022\"\n\007queri" + + "es\030\001 \003(\0132\021.prometheus.Query\022E\n\027accepted_" + + "response_types\030\002 \003(\0162$.prometheus.ReadRe" + + "quest.ResponseType\"4\n\014ResponseType\022\013\n\007SA" + + "MPLES\020\000\022\027\n\023STREAMED_XOR_CHUNKS\020\001\"8\n\014Read" + + "Response\022(\n\007results\030\001 \003(\0132\027.prometheus.Q" + + "ueryResult\"\217\001\n\005Query\022\032\n\022start_timestamp_" + + "ms\030\001 \001(\003\022\030\n\020end_timestamp_ms\030\002 \001(\003\022*\n\010ma" + + "tchers\030\003 \003(\0132\030.prometheus.LabelMatcher\022$" + + "\n\005hints\030\004 \001(\0132\025.prometheus.ReadHints\"9\n\013" + + "QueryResult\022*\n\ntimeseries\030\001 \003(\0132\026.promet" + + "heus.TimeSeries\"]\n\023ChunkedReadResponse\0221" + + "\n\016chunked_series\030\001 \003(\0132\031.prometheus.Chun" + + "kedSeries\022\023\n\013query_index\030\002 \001(\003B\010Z\006prompb" + + "b\006proto3" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + Types.getDescriptor(), GoGoProtos.getDescriptor(), + }); + internal_static_prometheus_WriteRequest_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_prometheus_WriteRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_prometheus_WriteRequest_descriptor, + new String[] { + "Timeseries", "Metadata", + }); + internal_static_prometheus_ReadRequest_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_prometheus_ReadRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_prometheus_ReadRequest_descriptor, + new String[] { + "Queries", "AcceptedResponseTypes", + }); + internal_static_prometheus_ReadResponse_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_prometheus_ReadResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_prometheus_ReadResponse_descriptor, + new String[] { + "Results", + }); + internal_static_prometheus_Query_descriptor = getDescriptor().getMessageTypes().get(3); + internal_static_prometheus_Query_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_prometheus_Query_descriptor, + new String[] { + "StartTimestampMs", "EndTimestampMs", "Matchers", "Hints", + }); + internal_static_prometheus_QueryResult_descriptor = + getDescriptor().getMessageTypes().get(4); + internal_static_prometheus_QueryResult_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_prometheus_QueryResult_descriptor, + new String[] { + "Timeseries", + }); + internal_static_prometheus_ChunkedReadResponse_descriptor = + getDescriptor().getMessageTypes().get(5); + internal_static_prometheus_ChunkedReadResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_prometheus_ChunkedReadResponse_descriptor, + new String[] { + "ChunkedSeries", "QueryIndex", + }); + com.google.protobuf.ExtensionRegistry registry = + com.google.protobuf.ExtensionRegistry.newInstance(); + registry.add(GoGoProtos.nullable); + com.google.protobuf.Descriptors.FileDescriptor.internalUpdateFileDescriptor( + descriptor, registry); + Types.getDescriptor(); + GoGoProtos.getDescriptor(); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/seatunnel-connectors-v2/connector-prometheus/src/main/java/org/apache/seatunnel/connectors/seatunnel/prometheus/sink/proto/Types.java b/seatunnel-connectors-v2/connector-prometheus/src/main/java/org/apache/seatunnel/connectors/seatunnel/prometheus/sink/proto/Types.java new file mode 100644 index 00000000000..0dc5e20d3c4 --- /dev/null +++ b/seatunnel-connectors-v2/connector-prometheus/src/main/java/org/apache/seatunnel/connectors/seatunnel/prometheus/sink/proto/Types.java @@ -0,0 +1,17105 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.seatunnel.connectors.seatunnel.prometheus.sink.proto; + +public final class Types { + private Types() {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + public interface MetricMetadataOrBuilder + extends + // @@protoc_insertion_point(interface_extends:prometheus.MetricMetadata) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+         * Represents the metric type, these match the set from Prometheus.
+         * Refer to github.com/prometheus/common/model/metadata.go for details.
+         * 
+ * + * .prometheus.MetricMetadata.MetricType type = 1; + * + * @return The enum numeric value on the wire for type. + */ + int getTypeValue(); + + /** + * + * + *
+         * Represents the metric type, these match the set from Prometheus.
+         * Refer to github.com/prometheus/common/model/metadata.go for details.
+         * 
+ * + * .prometheus.MetricMetadata.MetricType type = 1; + * + * @return The type. + */ + Types.MetricMetadata.MetricType getType(); + + /** + * string metric_family_name = 2; + * + * @return The metricFamilyName. + */ + String getMetricFamilyName(); + + /** + * string metric_family_name = 2; + * + * @return The bytes for metricFamilyName. + */ + com.google.protobuf.ByteString getMetricFamilyNameBytes(); + + /** + * string help = 4; + * + * @return The help. + */ + String getHelp(); + + /** + * string help = 4; + * + * @return The bytes for help. + */ + com.google.protobuf.ByteString getHelpBytes(); + + /** + * string unit = 5; + * + * @return The unit. + */ + String getUnit(); + + /** + * string unit = 5; + * + * @return The bytes for unit. + */ + com.google.protobuf.ByteString getUnitBytes(); + } + + /** Protobuf type {@code prometheus.MetricMetadata} */ + public static final class MetricMetadata extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:prometheus.MetricMetadata) + MetricMetadataOrBuilder { + private static final long serialVersionUID = 0L; + + // Use MetricMetadata.newBuilder() to construct. + private MetricMetadata(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private MetricMetadata() { + type_ = 0; + metricFamilyName_ = ""; + help_ = ""; + unit_ = ""; + } + + @Override + @SuppressWarnings({"unused"}) + protected Object newInstance(UnusedPrivateParameter unused) { + return new MetricMetadata(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return Types.internal_static_prometheus_MetricMetadata_descriptor; + } + + @Override + protected FieldAccessorTable internalGetFieldAccessorTable() { + return Types.internal_static_prometheus_MetricMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + Types.MetricMetadata.class, Types.MetricMetadata.Builder.class); + } + + /** Protobuf enum {@code prometheus.MetricMetadata.MetricType} */ + public enum MetricType implements com.google.protobuf.ProtocolMessageEnum { + /** UNKNOWN = 0; */ + UNKNOWN(0), + /** COUNTER = 1; */ + COUNTER(1), + /** GAUGE = 2; */ + GAUGE(2), + /** HISTOGRAM = 3; */ + HISTOGRAM(3), + /** GAUGEHISTOGRAM = 4; */ + GAUGEHISTOGRAM(4), + /** SUMMARY = 5; */ + SUMMARY(5), + /** INFO = 6; */ + INFO(6), + /** STATESET = 7; */ + STATESET(7), + UNRECOGNIZED(-1), + ; + + /** UNKNOWN = 0; */ + public static final int UNKNOWN_VALUE = 0; + /** COUNTER = 1; */ + public static final int COUNTER_VALUE = 1; + /** GAUGE = 2; */ + public static final int GAUGE_VALUE = 2; + /** HISTOGRAM = 3; */ + public static final int HISTOGRAM_VALUE = 3; + /** GAUGEHISTOGRAM = 4; */ + public static final int GAUGEHISTOGRAM_VALUE = 4; + /** SUMMARY = 5; */ + public static final int SUMMARY_VALUE = 5; + /** INFO = 6; */ + public static final int INFO_VALUE = 6; + /** STATESET = 7; */ + public static final int STATESET_VALUE = 7; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @Deprecated + public static MetricType valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static MetricType forNumber(int value) { + switch (value) { + case 0: + return UNKNOWN; + case 1: + return COUNTER; + case 2: + return GAUGE; + case 3: + return HISTOGRAM; + case 4: + return GAUGEHISTOGRAM; + case 5: + return SUMMARY; + case 6: + return INFO; + case 7: + return STATESET; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public MetricType findValueByNumber(int number) { + return MetricType.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return Types.MetricMetadata.getDescriptor().getEnumTypes().get(0); + } + + private static final MetricType[] VALUES = values(); + + public static MetricType valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private MetricType(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:prometheus.MetricMetadata.MetricType) + } + + public static final int TYPE_FIELD_NUMBER = 1; + private int type_ = 0; + + /** + * + * + *
+         * Represents the metric type, these match the set from Prometheus.
+         * Refer to github.com/prometheus/common/model/metadata.go for details.
+         * 
+ * + * .prometheus.MetricMetadata.MetricType type = 1; + * + * @return The enum numeric value on the wire for type. + */ + @Override + public int getTypeValue() { + return type_; + } + + /** + * + * + *
+         * Represents the metric type, these match the set from Prometheus.
+         * Refer to github.com/prometheus/common/model/metadata.go for details.
+         * 
+ * + * .prometheus.MetricMetadata.MetricType type = 1; + * + * @return The type. + */ + @Override + public Types.MetricMetadata.MetricType getType() { + Types.MetricMetadata.MetricType result = + Types.MetricMetadata.MetricType.forNumber(type_); + return result == null ? Types.MetricMetadata.MetricType.UNRECOGNIZED : result; + } + + public static final int METRIC_FAMILY_NAME_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile Object metricFamilyName_ = ""; + + /** + * string metric_family_name = 2; + * + * @return The metricFamilyName. + */ + @Override + public String getMetricFamilyName() { + Object ref = metricFamilyName_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + metricFamilyName_ = s; + return s; + } + } + + /** + * string metric_family_name = 2; + * + * @return The bytes for metricFamilyName. + */ + @Override + public com.google.protobuf.ByteString getMetricFamilyNameBytes() { + Object ref = metricFamilyName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + metricFamilyName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int HELP_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private volatile Object help_ = ""; + + /** + * string help = 4; + * + * @return The help. + */ + @Override + public String getHelp() { + Object ref = help_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + help_ = s; + return s; + } + } + + /** + * string help = 4; + * + * @return The bytes for help. + */ + @Override + public com.google.protobuf.ByteString getHelpBytes() { + Object ref = help_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + help_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int UNIT_FIELD_NUMBER = 5; + + @SuppressWarnings("serial") + private volatile Object unit_ = ""; + + /** + * string unit = 5; + * + * @return The unit. + */ + @Override + public String getUnit() { + Object ref = unit_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + unit_ = s; + return s; + } + } + + /** + * string unit = 5; + * + * @return The bytes for unit. + */ + @Override + public com.google.protobuf.ByteString getUnitBytes() { + Object ref = unit_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + unit_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) { + return true; + } + if (isInitialized == 0) { + return false; + } + + memoizedIsInitialized = 1; + return true; + } + + @Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (type_ != Types.MetricMetadata.MetricType.UNKNOWN.getNumber()) { + output.writeEnum(1, type_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(metricFamilyName_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, metricFamilyName_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(help_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 4, help_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(unit_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 5, unit_); + } + getUnknownFields().writeTo(output); + } + + @Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) { + return size; + } + + size = 0; + if (type_ != Types.MetricMetadata.MetricType.UNKNOWN.getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(1, type_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(metricFamilyName_)) { + size += + com.google.protobuf.GeneratedMessageV3.computeStringSize( + 2, metricFamilyName_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(help_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(4, help_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(unit_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(5, unit_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @Override + public boolean equals(final Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof Types.MetricMetadata)) { + return super.equals(obj); + } + Types.MetricMetadata other = (Types.MetricMetadata) obj; + + if (type_ != other.type_) { + return false; + } + if (!getMetricFamilyName().equals(other.getMetricFamilyName())) { + return false; + } + if (!getHelp().equals(other.getHelp())) { + return false; + } + if (!getUnit().equals(other.getUnit())) { + return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) { + return false; + } + return true; + } + + @Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + TYPE_FIELD_NUMBER; + hash = (53 * hash) + type_; + hash = (37 * hash) + METRIC_FAMILY_NAME_FIELD_NUMBER; + hash = (53 * hash) + getMetricFamilyName().hashCode(); + hash = (37 * hash) + HELP_FIELD_NUMBER; + hash = (53 * hash) + getHelp().hashCode(); + hash = (37 * hash) + UNIT_FIELD_NUMBER; + hash = (53 * hash) + getUnit().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static Types.MetricMetadata parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static Types.MetricMetadata parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static Types.MetricMetadata parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static Types.MetricMetadata parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static Types.MetricMetadata parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static Types.MetricMetadata parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static Types.MetricMetadata parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static Types.MetricMetadata parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static Types.MetricMetadata parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input); + } + + public static Types.MetricMetadata parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static Types.MetricMetadata parseFrom(com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static Types.MetricMetadata parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(Types.MetricMetadata prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @Override + protected Builder newBuilderForType(BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** Protobuf type {@code prometheus.MetricMetadata} */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:prometheus.MetricMetadata) + Types.MetricMetadataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return Types.internal_static_prometheus_MetricMetadata_descriptor; + } + + @Override + protected FieldAccessorTable internalGetFieldAccessorTable() { + return Types.internal_static_prometheus_MetricMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + Types.MetricMetadata.class, Types.MetricMetadata.Builder.class); + } + + // Construct using Types.MetricMetadata.newBuilder() + private Builder() {} + + private Builder(BuilderParent parent) { + super(parent); + } + + @Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + type_ = 0; + metricFamilyName_ = ""; + help_ = ""; + unit_ = ""; + return this; + } + + @Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return Types.internal_static_prometheus_MetricMetadata_descriptor; + } + + @Override + public Types.MetricMetadata getDefaultInstanceForType() { + return Types.MetricMetadata.getDefaultInstance(); + } + + @Override + public Types.MetricMetadata build() { + Types.MetricMetadata result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @Override + public Types.MetricMetadata buildPartial() { + Types.MetricMetadata result = new Types.MetricMetadata(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(Types.MetricMetadata result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.type_ = type_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.metricFamilyName_ = metricFamilyName_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.help_ = help_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.unit_ = unit_; + } + } + + @Override + public Builder clone() { + return super.clone(); + } + + @Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + return super.setField(field, value); + } + + @Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + Object value) { + return super.setRepeatedField(field, index, value); + } + + @Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + return super.addRepeatedField(field, value); + } + + @Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof Types.MetricMetadata) { + return mergeFrom((Types.MetricMetadata) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(Types.MetricMetadata other) { + if (other == Types.MetricMetadata.getDefaultInstance()) { + return this; + } + if (other.type_ != 0) { + setTypeValue(other.getTypeValue()); + } + if (!other.getMetricFamilyName().isEmpty()) { + metricFamilyName_ = other.metricFamilyName_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.getHelp().isEmpty()) { + help_ = other.help_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (!other.getUnit().isEmpty()) { + unit_ = other.unit_; + bitField0_ |= 0x00000008; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @Override + public final boolean isInitialized() { + return true; + } + + @Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + type_ = input.readEnum(); + bitField0_ |= 0x00000001; + break; + } // case 8 + case 18: + { + metricFamilyName_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 34: + { + help_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 34 + case 42: + { + unit_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000008; + break; + } // case 42 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private int type_ = 0; + + /** + * + * + *
+             * Represents the metric type, these match the set from Prometheus.
+             * Refer to github.com/prometheus/common/model/metadata.go for details.
+             * 
+ * + * .prometheus.MetricMetadata.MetricType type = 1; + * + * @return The enum numeric value on the wire for type. + */ + @Override + public int getTypeValue() { + return type_; + } + + /** + * + * + *
+             * Represents the metric type, these match the set from Prometheus.
+             * Refer to github.com/prometheus/common/model/metadata.go for details.
+             * 
+ * + * .prometheus.MetricMetadata.MetricType type = 1; + * + * @param value The enum numeric value on the wire for type to set. + * @return This builder for chaining. + */ + public Builder setTypeValue(int value) { + type_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+             * Represents the metric type, these match the set from Prometheus.
+             * Refer to github.com/prometheus/common/model/metadata.go for details.
+             * 
+ * + * .prometheus.MetricMetadata.MetricType type = 1; + * + * @return The type. + */ + @Override + public Types.MetricMetadata.MetricType getType() { + Types.MetricMetadata.MetricType result = + Types.MetricMetadata.MetricType.forNumber(type_); + return result == null ? Types.MetricMetadata.MetricType.UNRECOGNIZED : result; + } + + /** + * + * + *
+             * Represents the metric type, these match the set from Prometheus.
+             * Refer to github.com/prometheus/common/model/metadata.go for details.
+             * 
+ * + * .prometheus.MetricMetadata.MetricType type = 1; + * + * @param value The type to set. + * @return This builder for chaining. + */ + public Builder setType(Types.MetricMetadata.MetricType value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + type_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
+             * Represents the metric type, these match the set from Prometheus.
+             * Refer to github.com/prometheus/common/model/metadata.go for details.
+             * 
+ * + * .prometheus.MetricMetadata.MetricType type = 1; + * + * @return This builder for chaining. + */ + public Builder clearType() { + bitField0_ = (bitField0_ & ~0x00000001); + type_ = 0; + onChanged(); + return this; + } + + private Object metricFamilyName_ = ""; + + /** + * string metric_family_name = 2; + * + * @return The metricFamilyName. + */ + public String getMetricFamilyName() { + Object ref = metricFamilyName_; + if (!(ref instanceof String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + metricFamilyName_ = s; + return s; + } else { + return (String) ref; + } + } + + /** + * string metric_family_name = 2; + * + * @return The bytes for metricFamilyName. + */ + public com.google.protobuf.ByteString getMetricFamilyNameBytes() { + Object ref = metricFamilyName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + metricFamilyName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * string metric_family_name = 2; + * + * @param value The metricFamilyName to set. + * @return This builder for chaining. + */ + public Builder setMetricFamilyName(String value) { + if (value == null) { + throw new NullPointerException(); + } + metricFamilyName_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * string metric_family_name = 2; + * + * @return This builder for chaining. + */ + public Builder clearMetricFamilyName() { + metricFamilyName_ = getDefaultInstance().getMetricFamilyName(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * string metric_family_name = 2; + * + * @param value The bytes for metricFamilyName to set. + * @return This builder for chaining. + */ + public Builder setMetricFamilyNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + metricFamilyName_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private Object help_ = ""; + + /** + * string help = 4; + * + * @return The help. + */ + public String getHelp() { + Object ref = help_; + if (!(ref instanceof String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + help_ = s; + return s; + } else { + return (String) ref; + } + } + + /** + * string help = 4; + * + * @return The bytes for help. + */ + public com.google.protobuf.ByteString getHelpBytes() { + Object ref = help_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + help_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * string help = 4; + * + * @param value The help to set. + * @return This builder for chaining. + */ + public Builder setHelp(String value) { + if (value == null) { + throw new NullPointerException(); + } + help_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * string help = 4; + * + * @return This builder for chaining. + */ + public Builder clearHelp() { + help_ = getDefaultInstance().getHelp(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * string help = 4; + * + * @param value The bytes for help to set. + * @return This builder for chaining. + */ + public Builder setHelpBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + help_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private Object unit_ = ""; + + /** + * string unit = 5; + * + * @return The unit. + */ + public String getUnit() { + Object ref = unit_; + if (!(ref instanceof String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + unit_ = s; + return s; + } else { + return (String) ref; + } + } + + /** + * string unit = 5; + * + * @return The bytes for unit. + */ + public com.google.protobuf.ByteString getUnitBytes() { + Object ref = unit_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + unit_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * string unit = 5; + * + * @param value The unit to set. + * @return This builder for chaining. + */ + public Builder setUnit(String value) { + if (value == null) { + throw new NullPointerException(); + } + unit_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * string unit = 5; + * + * @return This builder for chaining. + */ + public Builder clearUnit() { + unit_ = getDefaultInstance().getUnit(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + return this; + } + + /** + * string unit = 5; + * + * @param value The bytes for unit to set. + * @return This builder for chaining. + */ + public Builder setUnitBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + unit_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + @Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:prometheus.MetricMetadata) + } + + // @@protoc_insertion_point(class_scope:prometheus.MetricMetadata) + private static final Types.MetricMetadata DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new Types.MetricMetadata(); + } + + public static Types.MetricMetadata getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @Override + public MetricMetadata parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @Override + public Types.MetricMetadata getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface SampleOrBuilder + extends + // @@protoc_insertion_point(interface_extends:prometheus.Sample) + com.google.protobuf.MessageOrBuilder { + + /** + * double value = 1; + * + * @return The value. + */ + double getValue(); + + /** + * + * + *
+         * timestamp is in ms format, see model/timestamp/timestamp.go for
+         * conversion from time.Time to Prometheus timestamp.
+         * 
+ * + * int64 timestamp = 2; + * + * @return The timestamp. + */ + long getTimestamp(); + } + + /** Protobuf type {@code prometheus.Sample} */ + public static final class Sample extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:prometheus.Sample) + SampleOrBuilder { + private static final long serialVersionUID = 0L; + + // Use Sample.newBuilder() to construct. + private Sample(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private Sample() {} + + @Override + @SuppressWarnings({"unused"}) + protected Object newInstance(UnusedPrivateParameter unused) { + return new Sample(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return Types.internal_static_prometheus_Sample_descriptor; + } + + @Override + protected FieldAccessorTable internalGetFieldAccessorTable() { + return Types.internal_static_prometheus_Sample_fieldAccessorTable + .ensureFieldAccessorsInitialized( + Types.Sample.class, Types.Sample.Builder.class); + } + + public static final int VALUE_FIELD_NUMBER = 1; + private double value_ = 0D; + + /** + * double value = 1; + * + * @return The value. + */ + @Override + public double getValue() { + return value_; + } + + public static final int TIMESTAMP_FIELD_NUMBER = 2; + private long timestamp_ = 0L; + + /** + * + * + *
+         * timestamp is in ms format, see model/timestamp/timestamp.go for
+         * conversion from time.Time to Prometheus timestamp.
+         * 
+ * + * int64 timestamp = 2; + * + * @return The timestamp. + */ + @Override + public long getTimestamp() { + return timestamp_; + } + + private byte memoizedIsInitialized = -1; + + @Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) { + return true; + } + if (isInitialized == 0) { + return false; + } + + memoizedIsInitialized = 1; + return true; + } + + @Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (Double.doubleToRawLongBits(value_) != 0) { + output.writeDouble(1, value_); + } + if (timestamp_ != 0L) { + output.writeInt64(2, timestamp_); + } + getUnknownFields().writeTo(output); + } + + @Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) { + return size; + } + + size = 0; + if (Double.doubleToRawLongBits(value_) != 0) { + size += com.google.protobuf.CodedOutputStream.computeDoubleSize(1, value_); + } + if (timestamp_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(2, timestamp_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @Override + public boolean equals(final Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof Types.Sample)) { + return super.equals(obj); + } + Types.Sample other = (Types.Sample) obj; + + if (Double.doubleToLongBits(getValue()) != Double.doubleToLongBits(other.getValue())) { + return false; + } + if (getTimestamp() != other.getTimestamp()) { + return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) { + return false; + } + return true; + } + + @Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + VALUE_FIELD_NUMBER; + hash = + (53 * hash) + + com.google.protobuf.Internal.hashLong( + Double.doubleToLongBits(getValue())); + hash = (37 * hash) + TIMESTAMP_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getTimestamp()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static Types.Sample parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static Types.Sample parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static Types.Sample parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static Types.Sample parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static Types.Sample parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static Types.Sample parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static Types.Sample parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static Types.Sample parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static Types.Sample parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input); + } + + public static Types.Sample parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static Types.Sample parseFrom(com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static Types.Sample parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(Types.Sample prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @Override + protected Builder newBuilderForType(BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** Protobuf type {@code prometheus.Sample} */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:prometheus.Sample) + Types.SampleOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return Types.internal_static_prometheus_Sample_descriptor; + } + + @Override + protected FieldAccessorTable internalGetFieldAccessorTable() { + return Types.internal_static_prometheus_Sample_fieldAccessorTable + .ensureFieldAccessorsInitialized( + Types.Sample.class, Types.Sample.Builder.class); + } + + // Construct using Types.Sample.newBuilder() + private Builder() {} + + private Builder(BuilderParent parent) { + super(parent); + } + + @Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + value_ = 0D; + timestamp_ = 0L; + return this; + } + + @Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return Types.internal_static_prometheus_Sample_descriptor; + } + + @Override + public Types.Sample getDefaultInstanceForType() { + return Types.Sample.getDefaultInstance(); + } + + @Override + public Types.Sample build() { + Types.Sample result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @Override + public Types.Sample buildPartial() { + Types.Sample result = new Types.Sample(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(Types.Sample result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.value_ = value_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.timestamp_ = timestamp_; + } + } + + @Override + public Builder clone() { + return super.clone(); + } + + @Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + return super.setField(field, value); + } + + @Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + Object value) { + return super.setRepeatedField(field, index, value); + } + + @Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + return super.addRepeatedField(field, value); + } + + @Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof Types.Sample) { + return mergeFrom((Types.Sample) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(Types.Sample other) { + if (other == Types.Sample.getDefaultInstance()) { + return this; + } + if (other.getValue() != 0D) { + setValue(other.getValue()); + } + if (other.getTimestamp() != 0L) { + setTimestamp(other.getTimestamp()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @Override + public final boolean isInitialized() { + return true; + } + + @Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 9: + { + value_ = input.readDouble(); + bitField0_ |= 0x00000001; + break; + } // case 9 + case 16: + { + timestamp_ = input.readInt64(); + bitField0_ |= 0x00000002; + break; + } // case 16 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private double value_; + + /** + * double value = 1; + * + * @return The value. + */ + @Override + public double getValue() { + return value_; + } + + /** + * double value = 1; + * + * @param value The value to set. + * @return This builder for chaining. + */ + public Builder setValue(double value) { + + value_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * double value = 1; + * + * @return This builder for chaining. + */ + public Builder clearValue() { + bitField0_ = (bitField0_ & ~0x00000001); + value_ = 0D; + onChanged(); + return this; + } + + private long timestamp_; + + /** + * + * + *
+             * timestamp is in ms format, see model/timestamp/timestamp.go for
+             * conversion from time.Time to Prometheus timestamp.
+             * 
+ * + * int64 timestamp = 2; + * + * @return The timestamp. + */ + @Override + public long getTimestamp() { + return timestamp_; + } + + /** + * + * + *
+             * timestamp is in ms format, see model/timestamp/timestamp.go for
+             * conversion from time.Time to Prometheus timestamp.
+             * 
+ * + * int64 timestamp = 2; + * + * @param value The timestamp to set. + * @return This builder for chaining. + */ + public Builder setTimestamp(long value) { + + timestamp_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+             * timestamp is in ms format, see model/timestamp/timestamp.go for
+             * conversion from time.Time to Prometheus timestamp.
+             * 
+ * + * int64 timestamp = 2; + * + * @return This builder for chaining. + */ + public Builder clearTimestamp() { + bitField0_ = (bitField0_ & ~0x00000002); + timestamp_ = 0L; + onChanged(); + return this; + } + + @Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:prometheus.Sample) + } + + // @@protoc_insertion_point(class_scope:prometheus.Sample) + private static final Types.Sample DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new Types.Sample(); + } + + public static Types.Sample getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @Override + public Sample parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @Override + public Types.Sample getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface ExemplarOrBuilder + extends + // @@protoc_insertion_point(interface_extends:prometheus.Exemplar) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+         * Optional, can be empty.
+         * 
+ * + * repeated .prometheus.Label labels = 1 [(.gogoproto.nullable) = false]; + */ + java.util.List getLabelsList(); + + /** + * + * + *
+         * Optional, can be empty.
+         * 
+ * + * repeated .prometheus.Label labels = 1 [(.gogoproto.nullable) = false]; + */ + Types.Label getLabels(int index); + + /** + * + * + *
+         * Optional, can be empty.
+         * 
+ * + * repeated .prometheus.Label labels = 1 [(.gogoproto.nullable) = false]; + */ + int getLabelsCount(); + + /** + * + * + *
+         * Optional, can be empty.
+         * 
+ * + * repeated .prometheus.Label labels = 1 [(.gogoproto.nullable) = false]; + */ + java.util.List getLabelsOrBuilderList(); + + /** + * + * + *
+         * Optional, can be empty.
+         * 
+ * + * repeated .prometheus.Label labels = 1 [(.gogoproto.nullable) = false]; + */ + Types.LabelOrBuilder getLabelsOrBuilder(int index); + + /** + * double value = 2; + * + * @return The value. + */ + double getValue(); + + /** + * + * + *
+         * timestamp is in ms format, see model/timestamp/timestamp.go for
+         * conversion from time.Time to Prometheus timestamp.
+         * 
+ * + * int64 timestamp = 3; + * + * @return The timestamp. + */ + long getTimestamp(); + } + + /** Protobuf type {@code prometheus.Exemplar} */ + public static final class Exemplar extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:prometheus.Exemplar) + ExemplarOrBuilder { + private static final long serialVersionUID = 0L; + + // Use Exemplar.newBuilder() to construct. + private Exemplar(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private Exemplar() { + labels_ = java.util.Collections.emptyList(); + } + + @Override + @SuppressWarnings({"unused"}) + protected Object newInstance(UnusedPrivateParameter unused) { + return new Exemplar(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return Types.internal_static_prometheus_Exemplar_descriptor; + } + + @Override + protected FieldAccessorTable internalGetFieldAccessorTable() { + return Types.internal_static_prometheus_Exemplar_fieldAccessorTable + .ensureFieldAccessorsInitialized( + Types.Exemplar.class, Types.Exemplar.Builder.class); + } + + public static final int LABELS_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List labels_; + + /** + * + * + *
+         * Optional, can be empty.
+         * 
+ * + * repeated .prometheus.Label labels = 1 [(.gogoproto.nullable) = false]; + */ + @Override + public java.util.List getLabelsList() { + return labels_; + } + + /** + * + * + *
+         * Optional, can be empty.
+         * 
+ * + * repeated .prometheus.Label labels = 1 [(.gogoproto.nullable) = false]; + */ + @Override + public java.util.List getLabelsOrBuilderList() { + return labels_; + } + + /** + * + * + *
+         * Optional, can be empty.
+         * 
+ * + * repeated .prometheus.Label labels = 1 [(.gogoproto.nullable) = false]; + */ + @Override + public int getLabelsCount() { + return labels_.size(); + } + + /** + * + * + *
+         * Optional, can be empty.
+         * 
+ * + * repeated .prometheus.Label labels = 1 [(.gogoproto.nullable) = false]; + */ + @Override + public Types.Label getLabels(int index) { + return labels_.get(index); + } + + /** + * + * + *
+         * Optional, can be empty.
+         * 
+ * + * repeated .prometheus.Label labels = 1 [(.gogoproto.nullable) = false]; + */ + @Override + public Types.LabelOrBuilder getLabelsOrBuilder(int index) { + return labels_.get(index); + } + + public static final int VALUE_FIELD_NUMBER = 2; + private double value_ = 0D; + + /** + * double value = 2; + * + * @return The value. + */ + @Override + public double getValue() { + return value_; + } + + public static final int TIMESTAMP_FIELD_NUMBER = 3; + private long timestamp_ = 0L; + + /** + * + * + *
+         * timestamp is in ms format, see model/timestamp/timestamp.go for
+         * conversion from time.Time to Prometheus timestamp.
+         * 
+ * + * int64 timestamp = 3; + * + * @return The timestamp. + */ + @Override + public long getTimestamp() { + return timestamp_; + } + + private byte memoizedIsInitialized = -1; + + @Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) { + return true; + } + if (isInitialized == 0) { + return false; + } + + memoizedIsInitialized = 1; + return true; + } + + @Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + for (int i = 0; i < labels_.size(); i++) { + output.writeMessage(1, labels_.get(i)); + } + if (Double.doubleToRawLongBits(value_) != 0) { + output.writeDouble(2, value_); + } + if (timestamp_ != 0L) { + output.writeInt64(3, timestamp_); + } + getUnknownFields().writeTo(output); + } + + @Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) { + return size; + } + + size = 0; + for (int i = 0; i < labels_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, labels_.get(i)); + } + if (Double.doubleToRawLongBits(value_) != 0) { + size += com.google.protobuf.CodedOutputStream.computeDoubleSize(2, value_); + } + if (timestamp_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(3, timestamp_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @Override + public boolean equals(final Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof Types.Exemplar)) { + return super.equals(obj); + } + Types.Exemplar other = (Types.Exemplar) obj; + + if (!getLabelsList().equals(other.getLabelsList())) { + return false; + } + if (Double.doubleToLongBits(getValue()) != Double.doubleToLongBits(other.getValue())) { + return false; + } + if (getTimestamp() != other.getTimestamp()) { + return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) { + return false; + } + return true; + } + + @Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getLabelsCount() > 0) { + hash = (37 * hash) + LABELS_FIELD_NUMBER; + hash = (53 * hash) + getLabelsList().hashCode(); + } + hash = (37 * hash) + VALUE_FIELD_NUMBER; + hash = + (53 * hash) + + com.google.protobuf.Internal.hashLong( + Double.doubleToLongBits(getValue())); + hash = (37 * hash) + TIMESTAMP_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getTimestamp()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static Types.Exemplar parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static Types.Exemplar parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static Types.Exemplar parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static Types.Exemplar parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static Types.Exemplar parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static Types.Exemplar parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static Types.Exemplar parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static Types.Exemplar parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static Types.Exemplar parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input); + } + + public static Types.Exemplar parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static Types.Exemplar parseFrom(com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static Types.Exemplar parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(Types.Exemplar prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @Override + protected Builder newBuilderForType(BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** Protobuf type {@code prometheus.Exemplar} */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:prometheus.Exemplar) + Types.ExemplarOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return Types.internal_static_prometheus_Exemplar_descriptor; + } + + @Override + protected FieldAccessorTable internalGetFieldAccessorTable() { + return Types.internal_static_prometheus_Exemplar_fieldAccessorTable + .ensureFieldAccessorsInitialized( + Types.Exemplar.class, Types.Exemplar.Builder.class); + } + + // Construct using Types.Exemplar.newBuilder() + private Builder() {} + + private Builder(BuilderParent parent) { + super(parent); + } + + @Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (labelsBuilder_ == null) { + labels_ = java.util.Collections.emptyList(); + } else { + labels_ = null; + labelsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + value_ = 0D; + timestamp_ = 0L; + return this; + } + + @Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return Types.internal_static_prometheus_Exemplar_descriptor; + } + + @Override + public Types.Exemplar getDefaultInstanceForType() { + return Types.Exemplar.getDefaultInstance(); + } + + @Override + public Types.Exemplar build() { + Types.Exemplar result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @Override + public Types.Exemplar buildPartial() { + Types.Exemplar result = new Types.Exemplar(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields(Types.Exemplar result) { + if (labelsBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + labels_ = java.util.Collections.unmodifiableList(labels_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.labels_ = labels_; + } else { + result.labels_ = labelsBuilder_.build(); + } + } + + private void buildPartial0(Types.Exemplar result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.value_ = value_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.timestamp_ = timestamp_; + } + } + + @Override + public Builder clone() { + return super.clone(); + } + + @Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + return super.setField(field, value); + } + + @Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + Object value) { + return super.setRepeatedField(field, index, value); + } + + @Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + return super.addRepeatedField(field, value); + } + + @Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof Types.Exemplar) { + return mergeFrom((Types.Exemplar) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(Types.Exemplar other) { + if (other == Types.Exemplar.getDefaultInstance()) { + return this; + } + if (labelsBuilder_ == null) { + if (!other.labels_.isEmpty()) { + if (labels_.isEmpty()) { + labels_ = other.labels_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureLabelsIsMutable(); + labels_.addAll(other.labels_); + } + onChanged(); + } + } else { + if (!other.labels_.isEmpty()) { + if (labelsBuilder_.isEmpty()) { + labelsBuilder_.dispose(); + labelsBuilder_ = null; + labels_ = other.labels_; + bitField0_ = (bitField0_ & ~0x00000001); + labelsBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getLabelsFieldBuilder() + : null; + } else { + labelsBuilder_.addAllMessages(other.labels_); + } + } + } + if (other.getValue() != 0D) { + setValue(other.getValue()); + } + if (other.getTimestamp() != 0L) { + setTimestamp(other.getTimestamp()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @Override + public final boolean isInitialized() { + return true; + } + + @Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + Types.Label m = + input.readMessage( + Types.Label.parser(), extensionRegistry); + if (labelsBuilder_ == null) { + ensureLabelsIsMutable(); + labels_.add(m); + } else { + labelsBuilder_.addMessage(m); + } + break; + } // case 10 + case 17: + { + value_ = input.readDouble(); + bitField0_ |= 0x00000002; + break; + } // case 17 + case 24: + { + timestamp_ = input.readInt64(); + bitField0_ |= 0x00000004; + break; + } // case 24 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List labels_ = java.util.Collections.emptyList(); + + private void ensureLabelsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + labels_ = new java.util.ArrayList(labels_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + Types.Label, Types.Label.Builder, Types.LabelOrBuilder> + labelsBuilder_; + + /** + * + * + *
+             * Optional, can be empty.
+             * 
+ * + * repeated .prometheus.Label labels = 1 [(.gogoproto.nullable) = false]; + */ + public java.util.List getLabelsList() { + if (labelsBuilder_ == null) { + return java.util.Collections.unmodifiableList(labels_); + } else { + return labelsBuilder_.getMessageList(); + } + } + + /** + * + * + *
+             * Optional, can be empty.
+             * 
+ * + * repeated .prometheus.Label labels = 1 [(.gogoproto.nullable) = false]; + */ + public int getLabelsCount() { + if (labelsBuilder_ == null) { + return labels_.size(); + } else { + return labelsBuilder_.getCount(); + } + } + + /** + * + * + *
+             * Optional, can be empty.
+             * 
+ * + * repeated .prometheus.Label labels = 1 [(.gogoproto.nullable) = false]; + */ + public Types.Label getLabels(int index) { + if (labelsBuilder_ == null) { + return labels_.get(index); + } else { + return labelsBuilder_.getMessage(index); + } + } + + /** + * + * + *
+             * Optional, can be empty.
+             * 
+ * + * repeated .prometheus.Label labels = 1 [(.gogoproto.nullable) = false]; + */ + public Builder setLabels(int index, Types.Label value) { + if (labelsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureLabelsIsMutable(); + labels_.set(index, value); + onChanged(); + } else { + labelsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+             * Optional, can be empty.
+             * 
+ * + * repeated .prometheus.Label labels = 1 [(.gogoproto.nullable) = false]; + */ + public Builder setLabels(int index, Types.Label.Builder builderForValue) { + if (labelsBuilder_ == null) { + ensureLabelsIsMutable(); + labels_.set(index, builderForValue.build()); + onChanged(); + } else { + labelsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+             * Optional, can be empty.
+             * 
+ * + * repeated .prometheus.Label labels = 1 [(.gogoproto.nullable) = false]; + */ + public Builder addLabels(Types.Label value) { + if (labelsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureLabelsIsMutable(); + labels_.add(value); + onChanged(); + } else { + labelsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+             * Optional, can be empty.
+             * 
+ * + * repeated .prometheus.Label labels = 1 [(.gogoproto.nullable) = false]; + */ + public Builder addLabels(int index, Types.Label value) { + if (labelsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureLabelsIsMutable(); + labels_.add(index, value); + onChanged(); + } else { + labelsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+             * Optional, can be empty.
+             * 
+ * + * repeated .prometheus.Label labels = 1 [(.gogoproto.nullable) = false]; + */ + public Builder addLabels(Types.Label.Builder builderForValue) { + if (labelsBuilder_ == null) { + ensureLabelsIsMutable(); + labels_.add(builderForValue.build()); + onChanged(); + } else { + labelsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+             * Optional, can be empty.
+             * 
+ * + * repeated .prometheus.Label labels = 1 [(.gogoproto.nullable) = false]; + */ + public Builder addLabels(int index, Types.Label.Builder builderForValue) { + if (labelsBuilder_ == null) { + ensureLabelsIsMutable(); + labels_.add(index, builderForValue.build()); + onChanged(); + } else { + labelsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+             * Optional, can be empty.
+             * 
+ * + * repeated .prometheus.Label labels = 1 [(.gogoproto.nullable) = false]; + */ + public Builder addAllLabels(Iterable values) { + if (labelsBuilder_ == null) { + ensureLabelsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, labels_); + onChanged(); + } else { + labelsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+             * Optional, can be empty.
+             * 
+ * + * repeated .prometheus.Label labels = 1 [(.gogoproto.nullable) = false]; + */ + public Builder clearLabels() { + if (labelsBuilder_ == null) { + labels_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + labelsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+             * Optional, can be empty.
+             * 
+ * + * repeated .prometheus.Label labels = 1 [(.gogoproto.nullable) = false]; + */ + public Builder removeLabels(int index) { + if (labelsBuilder_ == null) { + ensureLabelsIsMutable(); + labels_.remove(index); + onChanged(); + } else { + labelsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+             * Optional, can be empty.
+             * 
+ * + * repeated .prometheus.Label labels = 1 [(.gogoproto.nullable) = false]; + */ + public Types.Label.Builder getLabelsBuilder(int index) { + return getLabelsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+             * Optional, can be empty.
+             * 
+ * + * repeated .prometheus.Label labels = 1 [(.gogoproto.nullable) = false]; + */ + public Types.LabelOrBuilder getLabelsOrBuilder(int index) { + if (labelsBuilder_ == null) { + return labels_.get(index); + } else { + return labelsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+             * Optional, can be empty.
+             * 
+ * + * repeated .prometheus.Label labels = 1 [(.gogoproto.nullable) = false]; + */ + public java.util.List getLabelsOrBuilderList() { + if (labelsBuilder_ != null) { + return labelsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(labels_); + } + } + + /** + * + * + *
+             * Optional, can be empty.
+             * 
+ * + * repeated .prometheus.Label labels = 1 [(.gogoproto.nullable) = false]; + */ + public Types.Label.Builder addLabelsBuilder() { + return getLabelsFieldBuilder().addBuilder(Types.Label.getDefaultInstance()); + } + + /** + * + * + *
+             * Optional, can be empty.
+             * 
+ * + * repeated .prometheus.Label labels = 1 [(.gogoproto.nullable) = false]; + */ + public Types.Label.Builder addLabelsBuilder(int index) { + return getLabelsFieldBuilder().addBuilder(index, Types.Label.getDefaultInstance()); + } + + /** + * + * + *
+             * Optional, can be empty.
+             * 
+ * + * repeated .prometheus.Label labels = 1 [(.gogoproto.nullable) = false]; + */ + public java.util.List getLabelsBuilderList() { + return getLabelsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + Types.Label, Types.Label.Builder, Types.LabelOrBuilder> + getLabelsFieldBuilder() { + if (labelsBuilder_ == null) { + labelsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + Types.Label, Types.Label.Builder, Types.LabelOrBuilder>( + labels_, + ((bitField0_ & 0x00000001) != 0), + getParentForChildren(), + isClean()); + labels_ = null; + } + return labelsBuilder_; + } + + private double value_; + + /** + * double value = 2; + * + * @return The value. + */ + @Override + public double getValue() { + return value_; + } + + /** + * double value = 2; + * + * @param value The value to set. + * @return This builder for chaining. + */ + public Builder setValue(double value) { + + value_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * double value = 2; + * + * @return This builder for chaining. + */ + public Builder clearValue() { + bitField0_ = (bitField0_ & ~0x00000002); + value_ = 0D; + onChanged(); + return this; + } + + private long timestamp_; + + /** + * + * + *
+             * timestamp is in ms format, see model/timestamp/timestamp.go for
+             * conversion from time.Time to Prometheus timestamp.
+             * 
+ * + * int64 timestamp = 3; + * + * @return The timestamp. + */ + @Override + public long getTimestamp() { + return timestamp_; + } + + /** + * + * + *
+             * timestamp is in ms format, see model/timestamp/timestamp.go for
+             * conversion from time.Time to Prometheus timestamp.
+             * 
+ * + * int64 timestamp = 3; + * + * @param value The timestamp to set. + * @return This builder for chaining. + */ + public Builder setTimestamp(long value) { + + timestamp_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+             * timestamp is in ms format, see model/timestamp/timestamp.go for
+             * conversion from time.Time to Prometheus timestamp.
+             * 
+ * + * int64 timestamp = 3; + * + * @return This builder for chaining. + */ + public Builder clearTimestamp() { + bitField0_ = (bitField0_ & ~0x00000004); + timestamp_ = 0L; + onChanged(); + return this; + } + + @Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:prometheus.Exemplar) + } + + // @@protoc_insertion_point(class_scope:prometheus.Exemplar) + private static final Types.Exemplar DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new Types.Exemplar(); + } + + public static Types.Exemplar getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @Override + public Exemplar parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @Override + public Types.Exemplar getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface HistogramOrBuilder + extends + // @@protoc_insertion_point(interface_extends:prometheus.Histogram) + com.google.protobuf.MessageOrBuilder { + + /** + * uint64 count_int = 1; + * + * @return Whether the countInt field is set. + */ + boolean hasCountInt(); + + /** + * uint64 count_int = 1; + * + * @return The countInt. + */ + long getCountInt(); + + /** + * double count_float = 2; + * + * @return Whether the countFloat field is set. + */ + boolean hasCountFloat(); + + /** + * double count_float = 2; + * + * @return The countFloat. + */ + double getCountFloat(); + + /** + * + * + *
+         * Sum of observations in the histogram.
+         * 
+ * + * double sum = 3; + * + * @return The sum. + */ + double getSum(); + + /** + * + * + *
+         * The schema defines the bucket schema. Currently, valid numbers
+         * are -4 <= n <= 8. They are all for base-2 bucket schemas, where 1
+         * is a bucket boundary in each case, and then each power of two is
+         * divided into 2^n logarithmic buckets. Or in other words, each
+         * bucket boundary is the previous boundary times 2^(2^-n). In the
+         * future, more bucket schemas may be added using numbers < -4 or >
+         * 8.
+         * 
+ * + * sint32 schema = 4; + * + * @return The schema. + */ + int getSchema(); + + /** + * + * + *
+         * Breadth of the zero bucket.
+         * 
+ * + * double zero_threshold = 5; + * + * @return The zeroThreshold. + */ + double getZeroThreshold(); + + /** + * uint64 zero_count_int = 6; + * + * @return Whether the zeroCountInt field is set. + */ + boolean hasZeroCountInt(); + + /** + * uint64 zero_count_int = 6; + * + * @return The zeroCountInt. + */ + long getZeroCountInt(); + + /** + * double zero_count_float = 7; + * + * @return Whether the zeroCountFloat field is set. + */ + boolean hasZeroCountFloat(); + + /** + * double zero_count_float = 7; + * + * @return The zeroCountFloat. + */ + double getZeroCountFloat(); + + /** + * + * + *
+         * Negative Buckets.
+         * 
+ * + * repeated .prometheus.BucketSpan negative_spans = 8 [(.gogoproto.nullable) = false]; + * + */ + java.util.List getNegativeSpansList(); + + /** + * + * + *
+         * Negative Buckets.
+         * 
+ * + * repeated .prometheus.BucketSpan negative_spans = 8 [(.gogoproto.nullable) = false]; + * + */ + Types.BucketSpan getNegativeSpans(int index); + + /** + * + * + *
+         * Negative Buckets.
+         * 
+ * + * repeated .prometheus.BucketSpan negative_spans = 8 [(.gogoproto.nullable) = false]; + * + */ + int getNegativeSpansCount(); + + /** + * + * + *
+         * Negative Buckets.
+         * 
+ * + * repeated .prometheus.BucketSpan negative_spans = 8 [(.gogoproto.nullable) = false]; + * + */ + java.util.List getNegativeSpansOrBuilderList(); + + /** + * + * + *
+         * Negative Buckets.
+         * 
+ * + * repeated .prometheus.BucketSpan negative_spans = 8 [(.gogoproto.nullable) = false]; + * + */ + Types.BucketSpanOrBuilder getNegativeSpansOrBuilder(int index); + + /** + * + * + *
+         * Use either "negative_deltas" or "negative_counts", the former for
+         * regular histograms with integer counts, the latter for float
+         * histograms.
+         * 
+ * + * repeated sint64 negative_deltas = 9; + * + * @return A list containing the negativeDeltas. + */ + java.util.List getNegativeDeltasList(); + + /** + * + * + *
+         * Use either "negative_deltas" or "negative_counts", the former for
+         * regular histograms with integer counts, the latter for float
+         * histograms.
+         * 
+ * + * repeated sint64 negative_deltas = 9; + * + * @return The count of negativeDeltas. + */ + int getNegativeDeltasCount(); + + /** + * + * + *
+         * Use either "negative_deltas" or "negative_counts", the former for
+         * regular histograms with integer counts, the latter for float
+         * histograms.
+         * 
+ * + * repeated sint64 negative_deltas = 9; + * + * @param index The index of the element to return. + * @return The negativeDeltas at the given index. + */ + long getNegativeDeltas(int index); + + /** + * + * + *
+         * Absolute count of each bucket.
+         * 
+ * + * repeated double negative_counts = 10; + * + * @return A list containing the negativeCounts. + */ + java.util.List getNegativeCountsList(); + + /** + * + * + *
+         * Absolute count of each bucket.
+         * 
+ * + * repeated double negative_counts = 10; + * + * @return The count of negativeCounts. + */ + int getNegativeCountsCount(); + + /** + * + * + *
+         * Absolute count of each bucket.
+         * 
+ * + * repeated double negative_counts = 10; + * + * @param index The index of the element to return. + * @return The negativeCounts at the given index. + */ + double getNegativeCounts(int index); + + /** + * + * + *
+         * Positive Buckets.
+         * 
+ * + * + * repeated .prometheus.BucketSpan positive_spans = 11 [(.gogoproto.nullable) = false]; + * + */ + java.util.List getPositiveSpansList(); + + /** + * + * + *
+         * Positive Buckets.
+         * 
+ * + * + * repeated .prometheus.BucketSpan positive_spans = 11 [(.gogoproto.nullable) = false]; + * + */ + Types.BucketSpan getPositiveSpans(int index); + + /** + * + * + *
+         * Positive Buckets.
+         * 
+ * + * + * repeated .prometheus.BucketSpan positive_spans = 11 [(.gogoproto.nullable) = false]; + * + */ + int getPositiveSpansCount(); + + /** + * + * + *
+         * Positive Buckets.
+         * 
+ * + * + * repeated .prometheus.BucketSpan positive_spans = 11 [(.gogoproto.nullable) = false]; + * + */ + java.util.List getPositiveSpansOrBuilderList(); + + /** + * + * + *
+         * Positive Buckets.
+         * 
+ * + * + * repeated .prometheus.BucketSpan positive_spans = 11 [(.gogoproto.nullable) = false]; + * + */ + Types.BucketSpanOrBuilder getPositiveSpansOrBuilder(int index); + + /** + * + * + *
+         * Use either "positive_deltas" or "positive_counts", the former for
+         * regular histograms with integer counts, the latter for float
+         * histograms.
+         * 
+ * + * repeated sint64 positive_deltas = 12; + * + * @return A list containing the positiveDeltas. + */ + java.util.List getPositiveDeltasList(); + + /** + * + * + *
+         * Use either "positive_deltas" or "positive_counts", the former for
+         * regular histograms with integer counts, the latter for float
+         * histograms.
+         * 
+ * + * repeated sint64 positive_deltas = 12; + * + * @return The count of positiveDeltas. + */ + int getPositiveDeltasCount(); + + /** + * + * + *
+         * Use either "positive_deltas" or "positive_counts", the former for
+         * regular histograms with integer counts, the latter for float
+         * histograms.
+         * 
+ * + * repeated sint64 positive_deltas = 12; + * + * @param index The index of the element to return. + * @return The positiveDeltas at the given index. + */ + long getPositiveDeltas(int index); + + /** + * + * + *
+         * Absolute count of each bucket.
+         * 
+ * + * repeated double positive_counts = 13; + * + * @return A list containing the positiveCounts. + */ + java.util.List getPositiveCountsList(); + + /** + * + * + *
+         * Absolute count of each bucket.
+         * 
+ * + * repeated double positive_counts = 13; + * + * @return The count of positiveCounts. + */ + int getPositiveCountsCount(); + + /** + * + * + *
+         * Absolute count of each bucket.
+         * 
+ * + * repeated double positive_counts = 13; + * + * @param index The index of the element to return. + * @return The positiveCounts at the given index. + */ + double getPositiveCounts(int index); + + /** + * .prometheus.Histogram.ResetHint reset_hint = 14; + * + * @return The enum numeric value on the wire for resetHint. + */ + int getResetHintValue(); + + /** + * .prometheus.Histogram.ResetHint reset_hint = 14; + * + * @return The resetHint. + */ + Types.Histogram.ResetHint getResetHint(); + + /** + * + * + *
+         * timestamp is in ms format, see model/timestamp/timestamp.go for
+         * conversion from time.Time to Prometheus timestamp.
+         * 
+ * + * int64 timestamp = 15; + * + * @return The timestamp. + */ + long getTimestamp(); + + Types.Histogram.CountCase getCountCase(); + + Types.Histogram.ZeroCountCase getZeroCountCase(); + } + + /** + * + * + *
+     * A native histogram, also known as a sparse histogram.
+     * Original design doc:
+     * https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit
+     * The appendix of this design doc also explains the concept of float
+     * histograms. This Histogram message can represent both, the usual
+     * integer histogram as well as a float histogram.
+     * 
+ * + *

Protobuf type {@code prometheus.Histogram} + */ + public static final class Histogram extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:prometheus.Histogram) + HistogramOrBuilder { + private static final long serialVersionUID = 0L; + + // Use Histogram.newBuilder() to construct. + private Histogram(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private Histogram() { + negativeSpans_ = java.util.Collections.emptyList(); + negativeDeltas_ = emptyLongList(); + negativeCounts_ = emptyDoubleList(); + positiveSpans_ = java.util.Collections.emptyList(); + positiveDeltas_ = emptyLongList(); + positiveCounts_ = emptyDoubleList(); + resetHint_ = 0; + } + + @Override + @SuppressWarnings({"unused"}) + protected Object newInstance(UnusedPrivateParameter unused) { + return new Histogram(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return Types.internal_static_prometheus_Histogram_descriptor; + } + + @Override + protected FieldAccessorTable internalGetFieldAccessorTable() { + return Types.internal_static_prometheus_Histogram_fieldAccessorTable + .ensureFieldAccessorsInitialized( + Types.Histogram.class, Types.Histogram.Builder.class); + } + + /** Protobuf enum {@code prometheus.Histogram.ResetHint} */ + public enum ResetHint implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *

+             * Need to test for a counter reset explicitly.
+             * 
+ * + * UNKNOWN = 0; + */ + UNKNOWN(0), + /** + * + * + *
+             * This is the 1st histogram after a counter reset.
+             * 
+ * + * YES = 1; + */ + YES(1), + /** + * + * + *
+             * There was no counter reset between this and the previous Histogram.
+             * 
+ * + * NO = 2; + */ + NO(2), + /** + * + * + *
+             * This is a gauge histogram where counter resets don't happen.
+             * 
+ * + * GAUGE = 3; + */ + GAUGE(3), + UNRECOGNIZED(-1), + ; + + /** + * + * + *
+             * Need to test for a counter reset explicitly.
+             * 
+ * + * UNKNOWN = 0; + */ + public static final int UNKNOWN_VALUE = 0; + /** + * + * + *
+             * This is the 1st histogram after a counter reset.
+             * 
+ * + * YES = 1; + */ + public static final int YES_VALUE = 1; + /** + * + * + *
+             * There was no counter reset between this and the previous Histogram.
+             * 
+ * + * NO = 2; + */ + public static final int NO_VALUE = 2; + /** + * + * + *
+             * This is a gauge histogram where counter resets don't happen.
+             * 
+ * + * GAUGE = 3; + */ + public static final int GAUGE_VALUE = 3; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @Deprecated + public static ResetHint valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static ResetHint forNumber(int value) { + switch (value) { + case 0: + return UNKNOWN; + case 1: + return YES; + case 2: + return NO; + case 3: + return GAUGE; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public ResetHint findValueByNumber(int number) { + return ResetHint.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return Types.Histogram.getDescriptor().getEnumTypes().get(0); + } + + private static final ResetHint[] VALUES = values(); + + public static ResetHint valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private ResetHint(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:prometheus.Histogram.ResetHint) + } + + private int countCase_ = 0; + + @SuppressWarnings("serial") + private Object count_; + + public enum CountCase implements com.google.protobuf.Internal.EnumLite, InternalOneOfEnum { + COUNT_INT(1), + COUNT_FLOAT(2), + COUNT_NOT_SET(0); + private final int value; + + private CountCase(int value) { + this.value = value; + } + + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @Deprecated + public static CountCase valueOf(int value) { + return forNumber(value); + } + + public static CountCase forNumber(int value) { + switch (value) { + case 1: + return COUNT_INT; + case 2: + return COUNT_FLOAT; + case 0: + return COUNT_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public CountCase getCountCase() { + return CountCase.forNumber(countCase_); + } + + private int zeroCountCase_ = 0; + + @SuppressWarnings("serial") + private Object zeroCount_; + + public enum ZeroCountCase + implements com.google.protobuf.Internal.EnumLite, InternalOneOfEnum { + ZERO_COUNT_INT(6), + ZERO_COUNT_FLOAT(7), + ZEROCOUNT_NOT_SET(0); + private final int value; + + private ZeroCountCase(int value) { + this.value = value; + } + + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @Deprecated + public static ZeroCountCase valueOf(int value) { + return forNumber(value); + } + + public static ZeroCountCase forNumber(int value) { + switch (value) { + case 6: + return ZERO_COUNT_INT; + case 7: + return ZERO_COUNT_FLOAT; + case 0: + return ZEROCOUNT_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public ZeroCountCase getZeroCountCase() { + return ZeroCountCase.forNumber(zeroCountCase_); + } + + public static final int COUNT_INT_FIELD_NUMBER = 1; + + /** + * uint64 count_int = 1; + * + * @return Whether the countInt field is set. + */ + @Override + public boolean hasCountInt() { + return countCase_ == 1; + } + + /** + * uint64 count_int = 1; + * + * @return The countInt. + */ + @Override + public long getCountInt() { + if (countCase_ == 1) { + return (Long) count_; + } + return 0L; + } + + public static final int COUNT_FLOAT_FIELD_NUMBER = 2; + + /** + * double count_float = 2; + * + * @return Whether the countFloat field is set. + */ + @Override + public boolean hasCountFloat() { + return countCase_ == 2; + } + + /** + * double count_float = 2; + * + * @return The countFloat. + */ + @Override + public double getCountFloat() { + if (countCase_ == 2) { + return (Double) count_; + } + return 0D; + } + + public static final int SUM_FIELD_NUMBER = 3; + private double sum_ = 0D; + + /** + * + * + *
+         * Sum of observations in the histogram.
+         * 
+ * + * double sum = 3; + * + * @return The sum. + */ + @Override + public double getSum() { + return sum_; + } + + public static final int SCHEMA_FIELD_NUMBER = 4; + private int schema_ = 0; + + /** + * + * + *
+         * The schema defines the bucket schema. Currently, valid numbers
+         * are -4 <= n <= 8. They are all for base-2 bucket schemas, where 1
+         * is a bucket boundary in each case, and then each power of two is
+         * divided into 2^n logarithmic buckets. Or in other words, each
+         * bucket boundary is the previous boundary times 2^(2^-n). In the
+         * future, more bucket schemas may be added using numbers < -4 or >
+         * 8.
+         * 
+ * + * sint32 schema = 4; + * + * @return The schema. + */ + @Override + public int getSchema() { + return schema_; + } + + public static final int ZERO_THRESHOLD_FIELD_NUMBER = 5; + private double zeroThreshold_ = 0D; + + /** + * + * + *
+         * Breadth of the zero bucket.
+         * 
+ * + * double zero_threshold = 5; + * + * @return The zeroThreshold. + */ + @Override + public double getZeroThreshold() { + return zeroThreshold_; + } + + public static final int ZERO_COUNT_INT_FIELD_NUMBER = 6; + + /** + * uint64 zero_count_int = 6; + * + * @return Whether the zeroCountInt field is set. + */ + @Override + public boolean hasZeroCountInt() { + return zeroCountCase_ == 6; + } + + /** + * uint64 zero_count_int = 6; + * + * @return The zeroCountInt. + */ + @Override + public long getZeroCountInt() { + if (zeroCountCase_ == 6) { + return (Long) zeroCount_; + } + return 0L; + } + + public static final int ZERO_COUNT_FLOAT_FIELD_NUMBER = 7; + + /** + * double zero_count_float = 7; + * + * @return Whether the zeroCountFloat field is set. + */ + @Override + public boolean hasZeroCountFloat() { + return zeroCountCase_ == 7; + } + + /** + * double zero_count_float = 7; + * + * @return The zeroCountFloat. + */ + @Override + public double getZeroCountFloat() { + if (zeroCountCase_ == 7) { + return (Double) zeroCount_; + } + return 0D; + } + + public static final int NEGATIVE_SPANS_FIELD_NUMBER = 8; + + @SuppressWarnings("serial") + private java.util.List negativeSpans_; + + /** + * + * + *
+         * Negative Buckets.
+         * 
+ * + * repeated .prometheus.BucketSpan negative_spans = 8 [(.gogoproto.nullable) = false]; + * + */ + @Override + public java.util.List getNegativeSpansList() { + return negativeSpans_; + } + + /** + * + * + *
+         * Negative Buckets.
+         * 
+ * + * repeated .prometheus.BucketSpan negative_spans = 8 [(.gogoproto.nullable) = false]; + * + */ + @Override + public java.util.List getNegativeSpansOrBuilderList() { + return negativeSpans_; + } + + /** + * + * + *
+         * Negative Buckets.
+         * 
+ * + * repeated .prometheus.BucketSpan negative_spans = 8 [(.gogoproto.nullable) = false]; + * + */ + @Override + public int getNegativeSpansCount() { + return negativeSpans_.size(); + } + + /** + * + * + *
+         * Negative Buckets.
+         * 
+ * + * repeated .prometheus.BucketSpan negative_spans = 8 [(.gogoproto.nullable) = false]; + * + */ + @Override + public Types.BucketSpan getNegativeSpans(int index) { + return negativeSpans_.get(index); + } + + /** + * + * + *
+         * Negative Buckets.
+         * 
+ * + * repeated .prometheus.BucketSpan negative_spans = 8 [(.gogoproto.nullable) = false]; + * + */ + @Override + public Types.BucketSpanOrBuilder getNegativeSpansOrBuilder(int index) { + return negativeSpans_.get(index); + } + + public static final int NEGATIVE_DELTAS_FIELD_NUMBER = 9; + + @SuppressWarnings("serial") + private com.google.protobuf.Internal.LongList negativeDeltas_ = emptyLongList(); + + /** + * + * + *
+         * Use either "negative_deltas" or "negative_counts", the former for
+         * regular histograms with integer counts, the latter for float
+         * histograms.
+         * 
+ * + * repeated sint64 negative_deltas = 9; + * + * @return A list containing the negativeDeltas. + */ + @Override + public java.util.List getNegativeDeltasList() { + return negativeDeltas_; + } + + /** + * + * + *
+         * Use either "negative_deltas" or "negative_counts", the former for
+         * regular histograms with integer counts, the latter for float
+         * histograms.
+         * 
+ * + * repeated sint64 negative_deltas = 9; + * + * @return The count of negativeDeltas. + */ + public int getNegativeDeltasCount() { + return negativeDeltas_.size(); + } + + /** + * + * + *
+         * Use either "negative_deltas" or "negative_counts", the former for
+         * regular histograms with integer counts, the latter for float
+         * histograms.
+         * 
+ * + * repeated sint64 negative_deltas = 9; + * + * @param index The index of the element to return. + * @return The negativeDeltas at the given index. + */ + public long getNegativeDeltas(int index) { + return negativeDeltas_.getLong(index); + } + + private int negativeDeltasMemoizedSerializedSize = -1; + + public static final int NEGATIVE_COUNTS_FIELD_NUMBER = 10; + + @SuppressWarnings("serial") + private com.google.protobuf.Internal.DoubleList negativeCounts_ = emptyDoubleList(); + + /** + * + * + *
+         * Absolute count of each bucket.
+         * 
+ * + * repeated double negative_counts = 10; + * + * @return A list containing the negativeCounts. + */ + @Override + public java.util.List getNegativeCountsList() { + return negativeCounts_; + } + + /** + * + * + *
+         * Absolute count of each bucket.
+         * 
+ * + * repeated double negative_counts = 10; + * + * @return The count of negativeCounts. + */ + public int getNegativeCountsCount() { + return negativeCounts_.size(); + } + + /** + * + * + *
+         * Absolute count of each bucket.
+         * 
+ * + * repeated double negative_counts = 10; + * + * @param index The index of the element to return. + * @return The negativeCounts at the given index. + */ + public double getNegativeCounts(int index) { + return negativeCounts_.getDouble(index); + } + + private int negativeCountsMemoizedSerializedSize = -1; + + public static final int POSITIVE_SPANS_FIELD_NUMBER = 11; + + @SuppressWarnings("serial") + private java.util.List positiveSpans_; + + /** + * + * + *
+         * Positive Buckets.
+         * 
+ * + * + * repeated .prometheus.BucketSpan positive_spans = 11 [(.gogoproto.nullable) = false]; + * + */ + @Override + public java.util.List getPositiveSpansList() { + return positiveSpans_; + } + + /** + * + * + *
+         * Positive Buckets.
+         * 
+ * + * + * repeated .prometheus.BucketSpan positive_spans = 11 [(.gogoproto.nullable) = false]; + * + */ + @Override + public java.util.List getPositiveSpansOrBuilderList() { + return positiveSpans_; + } + + /** + * + * + *
+         * Positive Buckets.
+         * 
+ * + * + * repeated .prometheus.BucketSpan positive_spans = 11 [(.gogoproto.nullable) = false]; + * + */ + @Override + public int getPositiveSpansCount() { + return positiveSpans_.size(); + } + + /** + * + * + *
+         * Positive Buckets.
+         * 
+ * + * + * repeated .prometheus.BucketSpan positive_spans = 11 [(.gogoproto.nullable) = false]; + * + */ + @Override + public Types.BucketSpan getPositiveSpans(int index) { + return positiveSpans_.get(index); + } + + /** + * + * + *
+         * Positive Buckets.
+         * 
+ * + * + * repeated .prometheus.BucketSpan positive_spans = 11 [(.gogoproto.nullable) = false]; + * + */ + @Override + public Types.BucketSpanOrBuilder getPositiveSpansOrBuilder(int index) { + return positiveSpans_.get(index); + } + + public static final int POSITIVE_DELTAS_FIELD_NUMBER = 12; + + @SuppressWarnings("serial") + private com.google.protobuf.Internal.LongList positiveDeltas_ = emptyLongList(); + + /** + * + * + *
+         * Use either "positive_deltas" or "positive_counts", the former for
+         * regular histograms with integer counts, the latter for float
+         * histograms.
+         * 
+ * + * repeated sint64 positive_deltas = 12; + * + * @return A list containing the positiveDeltas. + */ + @Override + public java.util.List getPositiveDeltasList() { + return positiveDeltas_; + } + + /** + * + * + *
+         * Use either "positive_deltas" or "positive_counts", the former for
+         * regular histograms with integer counts, the latter for float
+         * histograms.
+         * 
+ * + * repeated sint64 positive_deltas = 12; + * + * @return The count of positiveDeltas. + */ + public int getPositiveDeltasCount() { + return positiveDeltas_.size(); + } + + /** + * + * + *
+         * Use either "positive_deltas" or "positive_counts", the former for
+         * regular histograms with integer counts, the latter for float
+         * histograms.
+         * 
+ * + * repeated sint64 positive_deltas = 12; + * + * @param index The index of the element to return. + * @return The positiveDeltas at the given index. + */ + public long getPositiveDeltas(int index) { + return positiveDeltas_.getLong(index); + } + + private int positiveDeltasMemoizedSerializedSize = -1; + + public static final int POSITIVE_COUNTS_FIELD_NUMBER = 13; + + @SuppressWarnings("serial") + private com.google.protobuf.Internal.DoubleList positiveCounts_ = emptyDoubleList(); + + /** + * + * + *
+         * Absolute count of each bucket.
+         * 
+ * + * repeated double positive_counts = 13; + * + * @return A list containing the positiveCounts. + */ + @Override + public java.util.List getPositiveCountsList() { + return positiveCounts_; + } + + /** + * + * + *
+         * Absolute count of each bucket.
+         * 
+ * + * repeated double positive_counts = 13; + * + * @return The count of positiveCounts. + */ + public int getPositiveCountsCount() { + return positiveCounts_.size(); + } + + /** + * + * + *
+         * Absolute count of each bucket.
+         * 
+ * + * repeated double positive_counts = 13; + * + * @param index The index of the element to return. + * @return The positiveCounts at the given index. + */ + public double getPositiveCounts(int index) { + return positiveCounts_.getDouble(index); + } + + private int positiveCountsMemoizedSerializedSize = -1; + + public static final int RESET_HINT_FIELD_NUMBER = 14; + private int resetHint_ = 0; + + /** + * .prometheus.Histogram.ResetHint reset_hint = 14; + * + * @return The enum numeric value on the wire for resetHint. + */ + @Override + public int getResetHintValue() { + return resetHint_; + } + + /** + * .prometheus.Histogram.ResetHint reset_hint = 14; + * + * @return The resetHint. + */ + @Override + public Types.Histogram.ResetHint getResetHint() { + Types.Histogram.ResetHint result = Types.Histogram.ResetHint.forNumber(resetHint_); + return result == null ? Types.Histogram.ResetHint.UNRECOGNIZED : result; + } + + public static final int TIMESTAMP_FIELD_NUMBER = 15; + private long timestamp_ = 0L; + + /** + * + * + *
+         * timestamp is in ms format, see model/timestamp/timestamp.go for
+         * conversion from time.Time to Prometheus timestamp.
+         * 
+ * + * int64 timestamp = 15; + * + * @return The timestamp. + */ + @Override + public long getTimestamp() { + return timestamp_; + } + + private byte memoizedIsInitialized = -1; + + @Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) { + return true; + } + if (isInitialized == 0) { + return false; + } + + memoizedIsInitialized = 1; + return true; + } + + @Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (countCase_ == 1) { + output.writeUInt64(1, (long) ((Long) count_)); + } + if (countCase_ == 2) { + output.writeDouble(2, (double) ((Double) count_)); + } + if (Double.doubleToRawLongBits(sum_) != 0) { + output.writeDouble(3, sum_); + } + if (schema_ != 0) { + output.writeSInt32(4, schema_); + } + if (Double.doubleToRawLongBits(zeroThreshold_) != 0) { + output.writeDouble(5, zeroThreshold_); + } + if (zeroCountCase_ == 6) { + output.writeUInt64(6, (long) ((Long) zeroCount_)); + } + if (zeroCountCase_ == 7) { + output.writeDouble(7, (double) ((Double) zeroCount_)); + } + for (int i = 0; i < negativeSpans_.size(); i++) { + output.writeMessage(8, negativeSpans_.get(i)); + } + if (getNegativeDeltasList().size() > 0) { + output.writeUInt32NoTag(74); + output.writeUInt32NoTag(negativeDeltasMemoizedSerializedSize); + } + for (int i = 0; i < negativeDeltas_.size(); i++) { + output.writeSInt64NoTag(negativeDeltas_.getLong(i)); + } + if (getNegativeCountsList().size() > 0) { + output.writeUInt32NoTag(82); + output.writeUInt32NoTag(negativeCountsMemoizedSerializedSize); + } + for (int i = 0; i < negativeCounts_.size(); i++) { + output.writeDoubleNoTag(negativeCounts_.getDouble(i)); + } + for (int i = 0; i < positiveSpans_.size(); i++) { + output.writeMessage(11, positiveSpans_.get(i)); + } + if (getPositiveDeltasList().size() > 0) { + output.writeUInt32NoTag(98); + output.writeUInt32NoTag(positiveDeltasMemoizedSerializedSize); + } + for (int i = 0; i < positiveDeltas_.size(); i++) { + output.writeSInt64NoTag(positiveDeltas_.getLong(i)); + } + if (getPositiveCountsList().size() > 0) { + output.writeUInt32NoTag(106); + output.writeUInt32NoTag(positiveCountsMemoizedSerializedSize); + } + for (int i = 0; i < positiveCounts_.size(); i++) { + output.writeDoubleNoTag(positiveCounts_.getDouble(i)); + } + if (resetHint_ != Types.Histogram.ResetHint.UNKNOWN.getNumber()) { + output.writeEnum(14, resetHint_); + } + if (timestamp_ != 0L) { + output.writeInt64(15, timestamp_); + } + getUnknownFields().writeTo(output); + } + + @Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) { + return size; + } + + size = 0; + if (countCase_ == 1) { + size += + com.google.protobuf.CodedOutputStream.computeUInt64Size( + 1, (long) ((Long) count_)); + } + if (countCase_ == 2) { + size += + com.google.protobuf.CodedOutputStream.computeDoubleSize( + 2, (double) ((Double) count_)); + } + if (Double.doubleToRawLongBits(sum_) != 0) { + size += com.google.protobuf.CodedOutputStream.computeDoubleSize(3, sum_); + } + if (schema_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeSInt32Size(4, schema_); + } + if (Double.doubleToRawLongBits(zeroThreshold_) != 0) { + size += com.google.protobuf.CodedOutputStream.computeDoubleSize(5, zeroThreshold_); + } + if (zeroCountCase_ == 6) { + size += + com.google.protobuf.CodedOutputStream.computeUInt64Size( + 6, (long) ((Long) zeroCount_)); + } + if (zeroCountCase_ == 7) { + size += + com.google.protobuf.CodedOutputStream.computeDoubleSize( + 7, (double) ((Double) zeroCount_)); + } + for (int i = 0; i < negativeSpans_.size(); i++) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 8, negativeSpans_.get(i)); + } + { + int dataSize = 0; + for (int i = 0; i < negativeDeltas_.size(); i++) { + dataSize += + com.google.protobuf.CodedOutputStream.computeSInt64SizeNoTag( + negativeDeltas_.getLong(i)); + } + size += dataSize; + if (!getNegativeDeltasList().isEmpty()) { + size += 1; + size += com.google.protobuf.CodedOutputStream.computeInt32SizeNoTag(dataSize); + } + negativeDeltasMemoizedSerializedSize = dataSize; + } + { + int dataSize = 0; + dataSize = 8 * getNegativeCountsList().size(); + size += dataSize; + if (!getNegativeCountsList().isEmpty()) { + size += 1; + size += com.google.protobuf.CodedOutputStream.computeInt32SizeNoTag(dataSize); + } + negativeCountsMemoizedSerializedSize = dataSize; + } + for (int i = 0; i < positiveSpans_.size(); i++) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 11, positiveSpans_.get(i)); + } + { + int dataSize = 0; + for (int i = 0; i < positiveDeltas_.size(); i++) { + dataSize += + com.google.protobuf.CodedOutputStream.computeSInt64SizeNoTag( + positiveDeltas_.getLong(i)); + } + size += dataSize; + if (!getPositiveDeltasList().isEmpty()) { + size += 1; + size += com.google.protobuf.CodedOutputStream.computeInt32SizeNoTag(dataSize); + } + positiveDeltasMemoizedSerializedSize = dataSize; + } + { + int dataSize = 0; + dataSize = 8 * getPositiveCountsList().size(); + size += dataSize; + if (!getPositiveCountsList().isEmpty()) { + size += 1; + size += com.google.protobuf.CodedOutputStream.computeInt32SizeNoTag(dataSize); + } + positiveCountsMemoizedSerializedSize = dataSize; + } + if (resetHint_ != Types.Histogram.ResetHint.UNKNOWN.getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(14, resetHint_); + } + if (timestamp_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(15, timestamp_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @Override + public boolean equals(final Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof Types.Histogram)) { + return super.equals(obj); + } + Types.Histogram other = (Types.Histogram) obj; + + if (Double.doubleToLongBits(getSum()) != Double.doubleToLongBits(other.getSum())) { + return false; + } + if (getSchema() != other.getSchema()) { + return false; + } + if (Double.doubleToLongBits(getZeroThreshold()) + != Double.doubleToLongBits(other.getZeroThreshold())) { + return false; + } + if (!getNegativeSpansList().equals(other.getNegativeSpansList())) { + return false; + } + if (!getNegativeDeltasList().equals(other.getNegativeDeltasList())) { + return false; + } + if (!getNegativeCountsList().equals(other.getNegativeCountsList())) { + return false; + } + if (!getPositiveSpansList().equals(other.getPositiveSpansList())) { + return false; + } + if (!getPositiveDeltasList().equals(other.getPositiveDeltasList())) { + return false; + } + if (!getPositiveCountsList().equals(other.getPositiveCountsList())) { + return false; + } + if (resetHint_ != other.resetHint_) { + return false; + } + if (getTimestamp() != other.getTimestamp()) { + return false; + } + if (!getCountCase().equals(other.getCountCase())) { + return false; + } + switch (countCase_) { + case 1: + if (getCountInt() != other.getCountInt()) { + return false; + } + break; + case 2: + if (Double.doubleToLongBits(getCountFloat()) + != Double.doubleToLongBits(other.getCountFloat())) { + return false; + } + break; + case 0: + default: + } + if (!getZeroCountCase().equals(other.getZeroCountCase())) { + return false; + } + switch (zeroCountCase_) { + case 6: + if (getZeroCountInt() != other.getZeroCountInt()) { + return false; + } + break; + case 7: + if (Double.doubleToLongBits(getZeroCountFloat()) + != Double.doubleToLongBits(other.getZeroCountFloat())) { + return false; + } + break; + case 0: + default: + } + if (!getUnknownFields().equals(other.getUnknownFields())) { + return false; + } + return true; + } + + @Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + SUM_FIELD_NUMBER; + hash = + (53 * hash) + + com.google.protobuf.Internal.hashLong( + Double.doubleToLongBits(getSum())); + hash = (37 * hash) + SCHEMA_FIELD_NUMBER; + hash = (53 * hash) + getSchema(); + hash = (37 * hash) + ZERO_THRESHOLD_FIELD_NUMBER; + hash = + (53 * hash) + + com.google.protobuf.Internal.hashLong( + Double.doubleToLongBits(getZeroThreshold())); + if (getNegativeSpansCount() > 0) { + hash = (37 * hash) + NEGATIVE_SPANS_FIELD_NUMBER; + hash = (53 * hash) + getNegativeSpansList().hashCode(); + } + if (getNegativeDeltasCount() > 0) { + hash = (37 * hash) + NEGATIVE_DELTAS_FIELD_NUMBER; + hash = (53 * hash) + getNegativeDeltasList().hashCode(); + } + if (getNegativeCountsCount() > 0) { + hash = (37 * hash) + NEGATIVE_COUNTS_FIELD_NUMBER; + hash = (53 * hash) + getNegativeCountsList().hashCode(); + } + if (getPositiveSpansCount() > 0) { + hash = (37 * hash) + POSITIVE_SPANS_FIELD_NUMBER; + hash = (53 * hash) + getPositiveSpansList().hashCode(); + } + if (getPositiveDeltasCount() > 0) { + hash = (37 * hash) + POSITIVE_DELTAS_FIELD_NUMBER; + hash = (53 * hash) + getPositiveDeltasList().hashCode(); + } + if (getPositiveCountsCount() > 0) { + hash = (37 * hash) + POSITIVE_COUNTS_FIELD_NUMBER; + hash = (53 * hash) + getPositiveCountsList().hashCode(); + } + hash = (37 * hash) + RESET_HINT_FIELD_NUMBER; + hash = (53 * hash) + resetHint_; + hash = (37 * hash) + TIMESTAMP_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getTimestamp()); + switch (countCase_) { + case 1: + hash = (37 * hash) + COUNT_INT_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getCountInt()); + break; + case 2: + hash = (37 * hash) + COUNT_FLOAT_FIELD_NUMBER; + hash = + (53 * hash) + + com.google.protobuf.Internal.hashLong( + Double.doubleToLongBits(getCountFloat())); + break; + case 0: + default: + } + switch (zeroCountCase_) { + case 6: + hash = (37 * hash) + ZERO_COUNT_INT_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getZeroCountInt()); + break; + case 7: + hash = (37 * hash) + ZERO_COUNT_FLOAT_FIELD_NUMBER; + hash = + (53 * hash) + + com.google.protobuf.Internal.hashLong( + Double.doubleToLongBits(getZeroCountFloat())); + break; + case 0: + default: + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static Types.Histogram parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static Types.Histogram parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static Types.Histogram parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static Types.Histogram parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static Types.Histogram parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static Types.Histogram parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static Types.Histogram parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static Types.Histogram parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static Types.Histogram parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input); + } + + public static Types.Histogram parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static Types.Histogram parseFrom(com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static Types.Histogram parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(Types.Histogram prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @Override + protected Builder newBuilderForType(BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+         * A native histogram, also known as a sparse histogram.
+         * Original design doc:
+         * https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit
+         * The appendix of this design doc also explains the concept of float
+         * histograms. This Histogram message can represent both, the usual
+         * integer histogram as well as a float histogram.
+         * 
+ * + *

Protobuf type {@code prometheus.Histogram} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:prometheus.Histogram) + Types.HistogramOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return Types.internal_static_prometheus_Histogram_descriptor; + } + + @Override + protected FieldAccessorTable internalGetFieldAccessorTable() { + return Types.internal_static_prometheus_Histogram_fieldAccessorTable + .ensureFieldAccessorsInitialized( + Types.Histogram.class, Types.Histogram.Builder.class); + } + + // Construct using Types.Histogram.newBuilder() + private Builder() {} + + private Builder(BuilderParent parent) { + super(parent); + } + + @Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + sum_ = 0D; + schema_ = 0; + zeroThreshold_ = 0D; + if (negativeSpansBuilder_ == null) { + negativeSpans_ = java.util.Collections.emptyList(); + } else { + negativeSpans_ = null; + negativeSpansBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000080); + negativeDeltas_ = emptyLongList(); + negativeCounts_ = emptyDoubleList(); + if (positiveSpansBuilder_ == null) { + positiveSpans_ = java.util.Collections.emptyList(); + } else { + positiveSpans_ = null; + positiveSpansBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000400); + positiveDeltas_ = emptyLongList(); + positiveCounts_ = emptyDoubleList(); + resetHint_ = 0; + timestamp_ = 0L; + countCase_ = 0; + count_ = null; + zeroCountCase_ = 0; + zeroCount_ = null; + return this; + } + + @Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return Types.internal_static_prometheus_Histogram_descriptor; + } + + @Override + public Types.Histogram getDefaultInstanceForType() { + return Types.Histogram.getDefaultInstance(); + } + + @Override + public Types.Histogram build() { + Types.Histogram result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @Override + public Types.Histogram buildPartial() { + Types.Histogram result = new Types.Histogram(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + buildPartialOneofs(result); + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields(Types.Histogram result) { + if (negativeSpansBuilder_ == null) { + if (((bitField0_ & 0x00000080) != 0)) { + negativeSpans_ = java.util.Collections.unmodifiableList(negativeSpans_); + bitField0_ = (bitField0_ & ~0x00000080); + } + result.negativeSpans_ = negativeSpans_; + } else { + result.negativeSpans_ = negativeSpansBuilder_.build(); + } + if (positiveSpansBuilder_ == null) { + if (((bitField0_ & 0x00000400) != 0)) { + positiveSpans_ = java.util.Collections.unmodifiableList(positiveSpans_); + bitField0_ = (bitField0_ & ~0x00000400); + } + result.positiveSpans_ = positiveSpans_; + } else { + result.positiveSpans_ = positiveSpansBuilder_.build(); + } + } + + private void buildPartial0(Types.Histogram result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000004) != 0)) { + result.sum_ = sum_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.schema_ = schema_; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.zeroThreshold_ = zeroThreshold_; + } + if (((from_bitField0_ & 0x00000100) != 0)) { + negativeDeltas_.makeImmutable(); + result.negativeDeltas_ = negativeDeltas_; + } + if (((from_bitField0_ & 0x00000200) != 0)) { + negativeCounts_.makeImmutable(); + result.negativeCounts_ = negativeCounts_; + } + if (((from_bitField0_ & 0x00000800) != 0)) { + positiveDeltas_.makeImmutable(); + result.positiveDeltas_ = positiveDeltas_; + } + if (((from_bitField0_ & 0x00001000) != 0)) { + positiveCounts_.makeImmutable(); + result.positiveCounts_ = positiveCounts_; + } + if (((from_bitField0_ & 0x00002000) != 0)) { + result.resetHint_ = resetHint_; + } + if (((from_bitField0_ & 0x00004000) != 0)) { + result.timestamp_ = timestamp_; + } + } + + private void buildPartialOneofs(Types.Histogram result) { + result.countCase_ = countCase_; + result.count_ = this.count_; + result.zeroCountCase_ = zeroCountCase_; + result.zeroCount_ = this.zeroCount_; + } + + @Override + public Builder clone() { + return super.clone(); + } + + @Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + return super.setField(field, value); + } + + @Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + Object value) { + return super.setRepeatedField(field, index, value); + } + + @Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + return super.addRepeatedField(field, value); + } + + @Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof Types.Histogram) { + return mergeFrom((Types.Histogram) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(Types.Histogram other) { + if (other == Types.Histogram.getDefaultInstance()) { + return this; + } + if (other.getSum() != 0D) { + setSum(other.getSum()); + } + if (other.getSchema() != 0) { + setSchema(other.getSchema()); + } + if (other.getZeroThreshold() != 0D) { + setZeroThreshold(other.getZeroThreshold()); + } + if (negativeSpansBuilder_ == null) { + if (!other.negativeSpans_.isEmpty()) { + if (negativeSpans_.isEmpty()) { + negativeSpans_ = other.negativeSpans_; + bitField0_ = (bitField0_ & ~0x00000080); + } else { + ensureNegativeSpansIsMutable(); + negativeSpans_.addAll(other.negativeSpans_); + } + onChanged(); + } + } else { + if (!other.negativeSpans_.isEmpty()) { + if (negativeSpansBuilder_.isEmpty()) { + negativeSpansBuilder_.dispose(); + negativeSpansBuilder_ = null; + negativeSpans_ = other.negativeSpans_; + bitField0_ = (bitField0_ & ~0x00000080); + negativeSpansBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getNegativeSpansFieldBuilder() + : null; + } else { + negativeSpansBuilder_.addAllMessages(other.negativeSpans_); + } + } + } + if (!other.negativeDeltas_.isEmpty()) { + if (negativeDeltas_.isEmpty()) { + negativeDeltas_ = other.negativeDeltas_; + negativeDeltas_.makeImmutable(); + bitField0_ |= 0x00000100; + } else { + ensureNegativeDeltasIsMutable(); + negativeDeltas_.addAll(other.negativeDeltas_); + } + onChanged(); + } + if (!other.negativeCounts_.isEmpty()) { + if (negativeCounts_.isEmpty()) { + negativeCounts_ = other.negativeCounts_; + negativeCounts_.makeImmutable(); + bitField0_ |= 0x00000200; + } else { + ensureNegativeCountsIsMutable(); + negativeCounts_.addAll(other.negativeCounts_); + } + onChanged(); + } + if (positiveSpansBuilder_ == null) { + if (!other.positiveSpans_.isEmpty()) { + if (positiveSpans_.isEmpty()) { + positiveSpans_ = other.positiveSpans_; + bitField0_ = (bitField0_ & ~0x00000400); + } else { + ensurePositiveSpansIsMutable(); + positiveSpans_.addAll(other.positiveSpans_); + } + onChanged(); + } + } else { + if (!other.positiveSpans_.isEmpty()) { + if (positiveSpansBuilder_.isEmpty()) { + positiveSpansBuilder_.dispose(); + positiveSpansBuilder_ = null; + positiveSpans_ = other.positiveSpans_; + bitField0_ = (bitField0_ & ~0x00000400); + positiveSpansBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getPositiveSpansFieldBuilder() + : null; + } else { + positiveSpansBuilder_.addAllMessages(other.positiveSpans_); + } + } + } + if (!other.positiveDeltas_.isEmpty()) { + if (positiveDeltas_.isEmpty()) { + positiveDeltas_ = other.positiveDeltas_; + positiveDeltas_.makeImmutable(); + bitField0_ |= 0x00000800; + } else { + ensurePositiveDeltasIsMutable(); + positiveDeltas_.addAll(other.positiveDeltas_); + } + onChanged(); + } + if (!other.positiveCounts_.isEmpty()) { + if (positiveCounts_.isEmpty()) { + positiveCounts_ = other.positiveCounts_; + positiveCounts_.makeImmutable(); + bitField0_ |= 0x00001000; + } else { + ensurePositiveCountsIsMutable(); + positiveCounts_.addAll(other.positiveCounts_); + } + onChanged(); + } + if (other.resetHint_ != 0) { + setResetHintValue(other.getResetHintValue()); + } + if (other.getTimestamp() != 0L) { + setTimestamp(other.getTimestamp()); + } + switch (other.getCountCase()) { + case COUNT_INT: + { + setCountInt(other.getCountInt()); + break; + } + case COUNT_FLOAT: + { + setCountFloat(other.getCountFloat()); + break; + } + case COUNT_NOT_SET: + { + break; + } + } + switch (other.getZeroCountCase()) { + case ZERO_COUNT_INT: + { + setZeroCountInt(other.getZeroCountInt()); + break; + } + case ZERO_COUNT_FLOAT: + { + setZeroCountFloat(other.getZeroCountFloat()); + break; + } + case ZEROCOUNT_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @Override + public final boolean isInitialized() { + return true; + } + + @Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + count_ = input.readUInt64(); + countCase_ = 1; + break; + } // case 8 + case 17: + { + count_ = input.readDouble(); + countCase_ = 2; + break; + } // case 17 + case 25: + { + sum_ = input.readDouble(); + bitField0_ |= 0x00000004; + break; + } // case 25 + case 32: + { + schema_ = input.readSInt32(); + bitField0_ |= 0x00000008; + break; + } // case 32 + case 41: + { + zeroThreshold_ = input.readDouble(); + bitField0_ |= 0x00000010; + break; + } // case 41 + case 48: + { + zeroCount_ = input.readUInt64(); + zeroCountCase_ = 6; + break; + } // case 48 + case 57: + { + zeroCount_ = input.readDouble(); + zeroCountCase_ = 7; + break; + } // case 57 + case 66: + { + Types.BucketSpan m = + input.readMessage( + Types.BucketSpan.parser(), extensionRegistry); + if (negativeSpansBuilder_ == null) { + ensureNegativeSpansIsMutable(); + negativeSpans_.add(m); + } else { + negativeSpansBuilder_.addMessage(m); + } + break; + } // case 66 + case 72: + { + long v = input.readSInt64(); + ensureNegativeDeltasIsMutable(); + negativeDeltas_.addLong(v); + break; + } // case 72 + case 74: + { + int length = input.readRawVarint32(); + int limit = input.pushLimit(length); + ensureNegativeDeltasIsMutable(); + while (input.getBytesUntilLimit() > 0) { + negativeDeltas_.addLong(input.readSInt64()); + } + input.popLimit(limit); + break; + } // case 74 + case 81: + { + double v = input.readDouble(); + ensureNegativeCountsIsMutable(); + negativeCounts_.addDouble(v); + break; + } // case 81 + case 82: + { + int length = input.readRawVarint32(); + int limit = input.pushLimit(length); + int alloc = length > 4096 ? 4096 : length; + ensureNegativeCountsIsMutable(alloc / 8); + while (input.getBytesUntilLimit() > 0) { + negativeCounts_.addDouble(input.readDouble()); + } + input.popLimit(limit); + break; + } // case 82 + case 90: + { + Types.BucketSpan m = + input.readMessage( + Types.BucketSpan.parser(), extensionRegistry); + if (positiveSpansBuilder_ == null) { + ensurePositiveSpansIsMutable(); + positiveSpans_.add(m); + } else { + positiveSpansBuilder_.addMessage(m); + } + break; + } // case 90 + case 96: + { + long v = input.readSInt64(); + ensurePositiveDeltasIsMutable(); + positiveDeltas_.addLong(v); + break; + } // case 96 + case 98: + { + int length = input.readRawVarint32(); + int limit = input.pushLimit(length); + ensurePositiveDeltasIsMutable(); + while (input.getBytesUntilLimit() > 0) { + positiveDeltas_.addLong(input.readSInt64()); + } + input.popLimit(limit); + break; + } // case 98 + case 105: + { + double v = input.readDouble(); + ensurePositiveCountsIsMutable(); + positiveCounts_.addDouble(v); + break; + } // case 105 + case 106: + { + int length = input.readRawVarint32(); + int limit = input.pushLimit(length); + int alloc = length > 4096 ? 4096 : length; + ensurePositiveCountsIsMutable(alloc / 8); + while (input.getBytesUntilLimit() > 0) { + positiveCounts_.addDouble(input.readDouble()); + } + input.popLimit(limit); + break; + } // case 106 + case 112: + { + resetHint_ = input.readEnum(); + bitField0_ |= 0x00002000; + break; + } // case 112 + case 120: + { + timestamp_ = input.readInt64(); + bitField0_ |= 0x00004000; + break; + } // case 120 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int countCase_ = 0; + private Object count_; + + public CountCase getCountCase() { + return CountCase.forNumber(countCase_); + } + + public Builder clearCount() { + countCase_ = 0; + count_ = null; + onChanged(); + return this; + } + + private int zeroCountCase_ = 0; + private Object zeroCount_; + + public ZeroCountCase getZeroCountCase() { + return ZeroCountCase.forNumber(zeroCountCase_); + } + + public Builder clearZeroCount() { + zeroCountCase_ = 0; + zeroCount_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + /** + * uint64 count_int = 1; + * + * @return Whether the countInt field is set. + */ + public boolean hasCountInt() { + return countCase_ == 1; + } + + /** + * uint64 count_int = 1; + * + * @return The countInt. + */ + public long getCountInt() { + if (countCase_ == 1) { + return (Long) count_; + } + return 0L; + } + + /** + * uint64 count_int = 1; + * + * @param value The countInt to set. + * @return This builder for chaining. + */ + public Builder setCountInt(long value) { + + countCase_ = 1; + count_ = value; + onChanged(); + return this; + } + + /** + * uint64 count_int = 1; + * + * @return This builder for chaining. + */ + public Builder clearCountInt() { + if (countCase_ == 1) { + countCase_ = 0; + count_ = null; + onChanged(); + } + return this; + } + + /** + * double count_float = 2; + * + * @return Whether the countFloat field is set. + */ + public boolean hasCountFloat() { + return countCase_ == 2; + } + + /** + * double count_float = 2; + * + * @return The countFloat. + */ + public double getCountFloat() { + if (countCase_ == 2) { + return (Double) count_; + } + return 0D; + } + + /** + * double count_float = 2; + * + * @param value The countFloat to set. + * @return This builder for chaining. + */ + public Builder setCountFloat(double value) { + + countCase_ = 2; + count_ = value; + onChanged(); + return this; + } + + /** + * double count_float = 2; + * + * @return This builder for chaining. + */ + public Builder clearCountFloat() { + if (countCase_ == 2) { + countCase_ = 0; + count_ = null; + onChanged(); + } + return this; + } + + private double sum_; + + /** + * + * + *

+             * Sum of observations in the histogram.
+             * 
+ * + * double sum = 3; + * + * @return The sum. + */ + @Override + public double getSum() { + return sum_; + } + + /** + * + * + *
+             * Sum of observations in the histogram.
+             * 
+ * + * double sum = 3; + * + * @param value The sum to set. + * @return This builder for chaining. + */ + public Builder setSum(double value) { + + sum_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+             * Sum of observations in the histogram.
+             * 
+ * + * double sum = 3; + * + * @return This builder for chaining. + */ + public Builder clearSum() { + bitField0_ = (bitField0_ & ~0x00000004); + sum_ = 0D; + onChanged(); + return this; + } + + private int schema_; + + /** + * + * + *
+             * The schema defines the bucket schema. Currently, valid numbers
+             * are -4 <= n <= 8. They are all for base-2 bucket schemas, where 1
+             * is a bucket boundary in each case, and then each power of two is
+             * divided into 2^n logarithmic buckets. Or in other words, each
+             * bucket boundary is the previous boundary times 2^(2^-n). In the
+             * future, more bucket schemas may be added using numbers < -4 or >
+             * 8.
+             * 
+ * + * sint32 schema = 4; + * + * @return The schema. + */ + @Override + public int getSchema() { + return schema_; + } + + /** + * + * + *
+             * The schema defines the bucket schema. Currently, valid numbers
+             * are -4 <= n <= 8. They are all for base-2 bucket schemas, where 1
+             * is a bucket boundary in each case, and then each power of two is
+             * divided into 2^n logarithmic buckets. Or in other words, each
+             * bucket boundary is the previous boundary times 2^(2^-n). In the
+             * future, more bucket schemas may be added using numbers < -4 or >
+             * 8.
+             * 
+ * + * sint32 schema = 4; + * + * @param value The schema to set. + * @return This builder for chaining. + */ + public Builder setSchema(int value) { + + schema_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+             * The schema defines the bucket schema. Currently, valid numbers
+             * are -4 <= n <= 8. They are all for base-2 bucket schemas, where 1
+             * is a bucket boundary in each case, and then each power of two is
+             * divided into 2^n logarithmic buckets. Or in other words, each
+             * bucket boundary is the previous boundary times 2^(2^-n). In the
+             * future, more bucket schemas may be added using numbers < -4 or >
+             * 8.
+             * 
+ * + * sint32 schema = 4; + * + * @return This builder for chaining. + */ + public Builder clearSchema() { + bitField0_ = (bitField0_ & ~0x00000008); + schema_ = 0; + onChanged(); + return this; + } + + private double zeroThreshold_; + + /** + * + * + *
+             * Breadth of the zero bucket.
+             * 
+ * + * double zero_threshold = 5; + * + * @return The zeroThreshold. + */ + @Override + public double getZeroThreshold() { + return zeroThreshold_; + } + + /** + * + * + *
+             * Breadth of the zero bucket.
+             * 
+ * + * double zero_threshold = 5; + * + * @param value The zeroThreshold to set. + * @return This builder for chaining. + */ + public Builder setZeroThreshold(double value) { + + zeroThreshold_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+             * Breadth of the zero bucket.
+             * 
+ * + * double zero_threshold = 5; + * + * @return This builder for chaining. + */ + public Builder clearZeroThreshold() { + bitField0_ = (bitField0_ & ~0x00000010); + zeroThreshold_ = 0D; + onChanged(); + return this; + } + + /** + * uint64 zero_count_int = 6; + * + * @return Whether the zeroCountInt field is set. + */ + public boolean hasZeroCountInt() { + return zeroCountCase_ == 6; + } + + /** + * uint64 zero_count_int = 6; + * + * @return The zeroCountInt. + */ + public long getZeroCountInt() { + if (zeroCountCase_ == 6) { + return (Long) zeroCount_; + } + return 0L; + } + + /** + * uint64 zero_count_int = 6; + * + * @param value The zeroCountInt to set. + * @return This builder for chaining. + */ + public Builder setZeroCountInt(long value) { + + zeroCountCase_ = 6; + zeroCount_ = value; + onChanged(); + return this; + } + + /** + * uint64 zero_count_int = 6; + * + * @return This builder for chaining. + */ + public Builder clearZeroCountInt() { + if (zeroCountCase_ == 6) { + zeroCountCase_ = 0; + zeroCount_ = null; + onChanged(); + } + return this; + } + + /** + * double zero_count_float = 7; + * + * @return Whether the zeroCountFloat field is set. + */ + public boolean hasZeroCountFloat() { + return zeroCountCase_ == 7; + } + + /** + * double zero_count_float = 7; + * + * @return The zeroCountFloat. + */ + public double getZeroCountFloat() { + if (zeroCountCase_ == 7) { + return (Double) zeroCount_; + } + return 0D; + } + + /** + * double zero_count_float = 7; + * + * @param value The zeroCountFloat to set. + * @return This builder for chaining. + */ + public Builder setZeroCountFloat(double value) { + + zeroCountCase_ = 7; + zeroCount_ = value; + onChanged(); + return this; + } + + /** + * double zero_count_float = 7; + * + * @return This builder for chaining. + */ + public Builder clearZeroCountFloat() { + if (zeroCountCase_ == 7) { + zeroCountCase_ = 0; + zeroCount_ = null; + onChanged(); + } + return this; + } + + private java.util.List negativeSpans_ = + java.util.Collections.emptyList(); + + private void ensureNegativeSpansIsMutable() { + if (!((bitField0_ & 0x00000080) != 0)) { + negativeSpans_ = new java.util.ArrayList(negativeSpans_); + bitField0_ |= 0x00000080; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + Types.BucketSpan, Types.BucketSpan.Builder, Types.BucketSpanOrBuilder> + negativeSpansBuilder_; + + /** + * + * + *
+             * Negative Buckets.
+             * 
+ * + * + * repeated .prometheus.BucketSpan negative_spans = 8 [(.gogoproto.nullable) = false]; + * + */ + public java.util.List getNegativeSpansList() { + if (negativeSpansBuilder_ == null) { + return java.util.Collections.unmodifiableList(negativeSpans_); + } else { + return negativeSpansBuilder_.getMessageList(); + } + } + + /** + * + * + *
+             * Negative Buckets.
+             * 
+ * + * + * repeated .prometheus.BucketSpan negative_spans = 8 [(.gogoproto.nullable) = false]; + * + */ + public int getNegativeSpansCount() { + if (negativeSpansBuilder_ == null) { + return negativeSpans_.size(); + } else { + return negativeSpansBuilder_.getCount(); + } + } + + /** + * + * + *
+             * Negative Buckets.
+             * 
+ * + * + * repeated .prometheus.BucketSpan negative_spans = 8 [(.gogoproto.nullable) = false]; + * + */ + public Types.BucketSpan getNegativeSpans(int index) { + if (negativeSpansBuilder_ == null) { + return negativeSpans_.get(index); + } else { + return negativeSpansBuilder_.getMessage(index); + } + } + + /** + * + * + *
+             * Negative Buckets.
+             * 
+ * + * + * repeated .prometheus.BucketSpan negative_spans = 8 [(.gogoproto.nullable) = false]; + * + */ + public Builder setNegativeSpans(int index, Types.BucketSpan value) { + if (negativeSpansBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureNegativeSpansIsMutable(); + negativeSpans_.set(index, value); + onChanged(); + } else { + negativeSpansBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+             * Negative Buckets.
+             * 
+ * + * + * repeated .prometheus.BucketSpan negative_spans = 8 [(.gogoproto.nullable) = false]; + * + */ + public Builder setNegativeSpans(int index, Types.BucketSpan.Builder builderForValue) { + if (negativeSpansBuilder_ == null) { + ensureNegativeSpansIsMutable(); + negativeSpans_.set(index, builderForValue.build()); + onChanged(); + } else { + negativeSpansBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+             * Negative Buckets.
+             * 
+ * + * + * repeated .prometheus.BucketSpan negative_spans = 8 [(.gogoproto.nullable) = false]; + * + */ + public Builder addNegativeSpans(Types.BucketSpan value) { + if (negativeSpansBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureNegativeSpansIsMutable(); + negativeSpans_.add(value); + onChanged(); + } else { + negativeSpansBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+             * Negative Buckets.
+             * 
+ * + * + * repeated .prometheus.BucketSpan negative_spans = 8 [(.gogoproto.nullable) = false]; + * + */ + public Builder addNegativeSpans(int index, Types.BucketSpan value) { + if (negativeSpansBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureNegativeSpansIsMutable(); + negativeSpans_.add(index, value); + onChanged(); + } else { + negativeSpansBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+             * Negative Buckets.
+             * 
+ * + * + * repeated .prometheus.BucketSpan negative_spans = 8 [(.gogoproto.nullable) = false]; + * + */ + public Builder addNegativeSpans(Types.BucketSpan.Builder builderForValue) { + if (negativeSpansBuilder_ == null) { + ensureNegativeSpansIsMutable(); + negativeSpans_.add(builderForValue.build()); + onChanged(); + } else { + negativeSpansBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+             * Negative Buckets.
+             * 
+ * + * + * repeated .prometheus.BucketSpan negative_spans = 8 [(.gogoproto.nullable) = false]; + * + */ + public Builder addNegativeSpans(int index, Types.BucketSpan.Builder builderForValue) { + if (negativeSpansBuilder_ == null) { + ensureNegativeSpansIsMutable(); + negativeSpans_.add(index, builderForValue.build()); + onChanged(); + } else { + negativeSpansBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+             * Negative Buckets.
+             * 
+ * + * + * repeated .prometheus.BucketSpan negative_spans = 8 [(.gogoproto.nullable) = false]; + * + */ + public Builder addAllNegativeSpans(Iterable values) { + if (negativeSpansBuilder_ == null) { + ensureNegativeSpansIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, negativeSpans_); + onChanged(); + } else { + negativeSpansBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+             * Negative Buckets.
+             * 
+ * + * + * repeated .prometheus.BucketSpan negative_spans = 8 [(.gogoproto.nullable) = false]; + * + */ + public Builder clearNegativeSpans() { + if (negativeSpansBuilder_ == null) { + negativeSpans_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000080); + onChanged(); + } else { + negativeSpansBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+             * Negative Buckets.
+             * 
+ * + * + * repeated .prometheus.BucketSpan negative_spans = 8 [(.gogoproto.nullable) = false]; + * + */ + public Builder removeNegativeSpans(int index) { + if (negativeSpansBuilder_ == null) { + ensureNegativeSpansIsMutable(); + negativeSpans_.remove(index); + onChanged(); + } else { + negativeSpansBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+             * Negative Buckets.
+             * 
+ * + * + * repeated .prometheus.BucketSpan negative_spans = 8 [(.gogoproto.nullable) = false]; + * + */ + public Types.BucketSpan.Builder getNegativeSpansBuilder(int index) { + return getNegativeSpansFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+             * Negative Buckets.
+             * 
+ * + * + * repeated .prometheus.BucketSpan negative_spans = 8 [(.gogoproto.nullable) = false]; + * + */ + public Types.BucketSpanOrBuilder getNegativeSpansOrBuilder(int index) { + if (negativeSpansBuilder_ == null) { + return negativeSpans_.get(index); + } else { + return negativeSpansBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+             * Negative Buckets.
+             * 
+ * + * + * repeated .prometheus.BucketSpan negative_spans = 8 [(.gogoproto.nullable) = false]; + * + */ + public java.util.List + getNegativeSpansOrBuilderList() { + if (negativeSpansBuilder_ != null) { + return negativeSpansBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(negativeSpans_); + } + } + + /** + * + * + *
+             * Negative Buckets.
+             * 
+ * + * + * repeated .prometheus.BucketSpan negative_spans = 8 [(.gogoproto.nullable) = false]; + * + */ + public Types.BucketSpan.Builder addNegativeSpansBuilder() { + return getNegativeSpansFieldBuilder() + .addBuilder(Types.BucketSpan.getDefaultInstance()); + } + + /** + * + * + *
+             * Negative Buckets.
+             * 
+ * + * + * repeated .prometheus.BucketSpan negative_spans = 8 [(.gogoproto.nullable) = false]; + * + */ + public Types.BucketSpan.Builder addNegativeSpansBuilder(int index) { + return getNegativeSpansFieldBuilder() + .addBuilder(index, Types.BucketSpan.getDefaultInstance()); + } + + /** + * + * + *
+             * Negative Buckets.
+             * 
+ * + * + * repeated .prometheus.BucketSpan negative_spans = 8 [(.gogoproto.nullable) = false]; + * + */ + public java.util.List getNegativeSpansBuilderList() { + return getNegativeSpansFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + Types.BucketSpan, Types.BucketSpan.Builder, Types.BucketSpanOrBuilder> + getNegativeSpansFieldBuilder() { + if (negativeSpansBuilder_ == null) { + negativeSpansBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + Types.BucketSpan, + Types.BucketSpan.Builder, + Types.BucketSpanOrBuilder>( + negativeSpans_, + ((bitField0_ & 0x00000080) != 0), + getParentForChildren(), + isClean()); + negativeSpans_ = null; + } + return negativeSpansBuilder_; + } + + private com.google.protobuf.Internal.LongList negativeDeltas_ = emptyLongList(); + + private void ensureNegativeDeltasIsMutable() { + if (!negativeDeltas_.isModifiable()) { + negativeDeltas_ = makeMutableCopy(negativeDeltas_); + } + bitField0_ |= 0x00000100; + } + + /** + * + * + *
+             * Use either "negative_deltas" or "negative_counts", the former for
+             * regular histograms with integer counts, the latter for float
+             * histograms.
+             * 
+ * + * repeated sint64 negative_deltas = 9; + * + * @return A list containing the negativeDeltas. + */ + public java.util.List getNegativeDeltasList() { + negativeDeltas_.makeImmutable(); + return negativeDeltas_; + } + + /** + * + * + *
+             * Use either "negative_deltas" or "negative_counts", the former for
+             * regular histograms with integer counts, the latter for float
+             * histograms.
+             * 
+ * + * repeated sint64 negative_deltas = 9; + * + * @return The count of negativeDeltas. + */ + public int getNegativeDeltasCount() { + return negativeDeltas_.size(); + } + + /** + * + * + *
+             * Use either "negative_deltas" or "negative_counts", the former for
+             * regular histograms with integer counts, the latter for float
+             * histograms.
+             * 
+ * + * repeated sint64 negative_deltas = 9; + * + * @param index The index of the element to return. + * @return The negativeDeltas at the given index. + */ + public long getNegativeDeltas(int index) { + return negativeDeltas_.getLong(index); + } + + /** + * + * + *
+             * Use either "negative_deltas" or "negative_counts", the former for
+             * regular histograms with integer counts, the latter for float
+             * histograms.
+             * 
+ * + * repeated sint64 negative_deltas = 9; + * + * @param index The index to set the value at. + * @param value The negativeDeltas to set. + * @return This builder for chaining. + */ + public Builder setNegativeDeltas(int index, long value) { + + ensureNegativeDeltasIsMutable(); + negativeDeltas_.setLong(index, value); + bitField0_ |= 0x00000100; + onChanged(); + return this; + } + + /** + * + * + *
+             * Use either "negative_deltas" or "negative_counts", the former for
+             * regular histograms with integer counts, the latter for float
+             * histograms.
+             * 
+ * + * repeated sint64 negative_deltas = 9; + * + * @param value The negativeDeltas to add. + * @return This builder for chaining. + */ + public Builder addNegativeDeltas(long value) { + + ensureNegativeDeltasIsMutable(); + negativeDeltas_.addLong(value); + bitField0_ |= 0x00000100; + onChanged(); + return this; + } + + /** + * + * + *
+             * Use either "negative_deltas" or "negative_counts", the former for
+             * regular histograms with integer counts, the latter for float
+             * histograms.
+             * 
+ * + * repeated sint64 negative_deltas = 9; + * + * @param values The negativeDeltas to add. + * @return This builder for chaining. + */ + public Builder addAllNegativeDeltas(Iterable values) { + ensureNegativeDeltasIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, negativeDeltas_); + bitField0_ |= 0x00000100; + onChanged(); + return this; + } + + /** + * + * + *
+             * Use either "negative_deltas" or "negative_counts", the former for
+             * regular histograms with integer counts, the latter for float
+             * histograms.
+             * 
+ * + * repeated sint64 negative_deltas = 9; + * + * @return This builder for chaining. + */ + public Builder clearNegativeDeltas() { + negativeDeltas_ = emptyLongList(); + bitField0_ = (bitField0_ & ~0x00000100); + onChanged(); + return this; + } + + private com.google.protobuf.Internal.DoubleList negativeCounts_ = emptyDoubleList(); + + private void ensureNegativeCountsIsMutable() { + if (!negativeCounts_.isModifiable()) { + negativeCounts_ = makeMutableCopy(negativeCounts_); + } + bitField0_ |= 0x00000200; + } + + private void ensureNegativeCountsIsMutable(int capacity) { + if (!negativeCounts_.isModifiable()) { + negativeCounts_ = makeMutableCopy(negativeCounts_, capacity); + } + bitField0_ |= 0x00000200; + } + + /** + * + * + *
+             * Absolute count of each bucket.
+             * 
+ * + * repeated double negative_counts = 10; + * + * @return A list containing the negativeCounts. + */ + public java.util.List getNegativeCountsList() { + negativeCounts_.makeImmutable(); + return negativeCounts_; + } + + /** + * + * + *
+             * Absolute count of each bucket.
+             * 
+ * + * repeated double negative_counts = 10; + * + * @return The count of negativeCounts. + */ + public int getNegativeCountsCount() { + return negativeCounts_.size(); + } + + /** + * + * + *
+             * Absolute count of each bucket.
+             * 
+ * + * repeated double negative_counts = 10; + * + * @param index The index of the element to return. + * @return The negativeCounts at the given index. + */ + public double getNegativeCounts(int index) { + return negativeCounts_.getDouble(index); + } + + /** + * + * + *
+             * Absolute count of each bucket.
+             * 
+ * + * repeated double negative_counts = 10; + * + * @param index The index to set the value at. + * @param value The negativeCounts to set. + * @return This builder for chaining. + */ + public Builder setNegativeCounts(int index, double value) { + + ensureNegativeCountsIsMutable(); + negativeCounts_.setDouble(index, value); + bitField0_ |= 0x00000200; + onChanged(); + return this; + } + + /** + * + * + *
+             * Absolute count of each bucket.
+             * 
+ * + * repeated double negative_counts = 10; + * + * @param value The negativeCounts to add. + * @return This builder for chaining. + */ + public Builder addNegativeCounts(double value) { + + ensureNegativeCountsIsMutable(); + negativeCounts_.addDouble(value); + bitField0_ |= 0x00000200; + onChanged(); + return this; + } + + /** + * + * + *
+             * Absolute count of each bucket.
+             * 
+ * + * repeated double negative_counts = 10; + * + * @param values The negativeCounts to add. + * @return This builder for chaining. + */ + public Builder addAllNegativeCounts(Iterable values) { + ensureNegativeCountsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, negativeCounts_); + bitField0_ |= 0x00000200; + onChanged(); + return this; + } + + /** + * + * + *
+             * Absolute count of each bucket.
+             * 
+ * + * repeated double negative_counts = 10; + * + * @return This builder for chaining. + */ + public Builder clearNegativeCounts() { + negativeCounts_ = emptyDoubleList(); + bitField0_ = (bitField0_ & ~0x00000200); + onChanged(); + return this; + } + + private java.util.List positiveSpans_ = + java.util.Collections.emptyList(); + + private void ensurePositiveSpansIsMutable() { + if (!((bitField0_ & 0x00000400) != 0)) { + positiveSpans_ = new java.util.ArrayList(positiveSpans_); + bitField0_ |= 0x00000400; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + Types.BucketSpan, Types.BucketSpan.Builder, Types.BucketSpanOrBuilder> + positiveSpansBuilder_; + + /** + * + * + *
+             * Positive Buckets.
+             * 
+ * + * + * repeated .prometheus.BucketSpan positive_spans = 11 [(.gogoproto.nullable) = false]; + * + */ + public java.util.List getPositiveSpansList() { + if (positiveSpansBuilder_ == null) { + return java.util.Collections.unmodifiableList(positiveSpans_); + } else { + return positiveSpansBuilder_.getMessageList(); + } + } + + /** + * + * + *
+             * Positive Buckets.
+             * 
+ * + * + * repeated .prometheus.BucketSpan positive_spans = 11 [(.gogoproto.nullable) = false]; + * + */ + public int getPositiveSpansCount() { + if (positiveSpansBuilder_ == null) { + return positiveSpans_.size(); + } else { + return positiveSpansBuilder_.getCount(); + } + } + + /** + * + * + *
+             * Positive Buckets.
+             * 
+ * + * + * repeated .prometheus.BucketSpan positive_spans = 11 [(.gogoproto.nullable) = false]; + * + */ + public Types.BucketSpan getPositiveSpans(int index) { + if (positiveSpansBuilder_ == null) { + return positiveSpans_.get(index); + } else { + return positiveSpansBuilder_.getMessage(index); + } + } + + /** + * + * + *
+             * Positive Buckets.
+             * 
+ * + * + * repeated .prometheus.BucketSpan positive_spans = 11 [(.gogoproto.nullable) = false]; + * + */ + public Builder setPositiveSpans(int index, Types.BucketSpan value) { + if (positiveSpansBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePositiveSpansIsMutable(); + positiveSpans_.set(index, value); + onChanged(); + } else { + positiveSpansBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+             * Positive Buckets.
+             * 
+ * + * + * repeated .prometheus.BucketSpan positive_spans = 11 [(.gogoproto.nullable) = false]; + * + */ + public Builder setPositiveSpans(int index, Types.BucketSpan.Builder builderForValue) { + if (positiveSpansBuilder_ == null) { + ensurePositiveSpansIsMutable(); + positiveSpans_.set(index, builderForValue.build()); + onChanged(); + } else { + positiveSpansBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+             * Positive Buckets.
+             * 
+ * + * + * repeated .prometheus.BucketSpan positive_spans = 11 [(.gogoproto.nullable) = false]; + * + */ + public Builder addPositiveSpans(Types.BucketSpan value) { + if (positiveSpansBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePositiveSpansIsMutable(); + positiveSpans_.add(value); + onChanged(); + } else { + positiveSpansBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+             * Positive Buckets.
+             * 
+ * + * + * repeated .prometheus.BucketSpan positive_spans = 11 [(.gogoproto.nullable) = false]; + * + */ + public Builder addPositiveSpans(int index, Types.BucketSpan value) { + if (positiveSpansBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePositiveSpansIsMutable(); + positiveSpans_.add(index, value); + onChanged(); + } else { + positiveSpansBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+             * Positive Buckets.
+             * 
+ * + * + * repeated .prometheus.BucketSpan positive_spans = 11 [(.gogoproto.nullable) = false]; + * + */ + public Builder addPositiveSpans(Types.BucketSpan.Builder builderForValue) { + if (positiveSpansBuilder_ == null) { + ensurePositiveSpansIsMutable(); + positiveSpans_.add(builderForValue.build()); + onChanged(); + } else { + positiveSpansBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+             * Positive Buckets.
+             * 
+ * + * + * repeated .prometheus.BucketSpan positive_spans = 11 [(.gogoproto.nullable) = false]; + * + */ + public Builder addPositiveSpans(int index, Types.BucketSpan.Builder builderForValue) { + if (positiveSpansBuilder_ == null) { + ensurePositiveSpansIsMutable(); + positiveSpans_.add(index, builderForValue.build()); + onChanged(); + } else { + positiveSpansBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+             * Positive Buckets.
+             * 
+ * + * + * repeated .prometheus.BucketSpan positive_spans = 11 [(.gogoproto.nullable) = false]; + * + */ + public Builder addAllPositiveSpans(Iterable values) { + if (positiveSpansBuilder_ == null) { + ensurePositiveSpansIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, positiveSpans_); + onChanged(); + } else { + positiveSpansBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+             * Positive Buckets.
+             * 
+ * + * + * repeated .prometheus.BucketSpan positive_spans = 11 [(.gogoproto.nullable) = false]; + * + */ + public Builder clearPositiveSpans() { + if (positiveSpansBuilder_ == null) { + positiveSpans_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000400); + onChanged(); + } else { + positiveSpansBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+             * Positive Buckets.
+             * 
+ * + * + * repeated .prometheus.BucketSpan positive_spans = 11 [(.gogoproto.nullable) = false]; + * + */ + public Builder removePositiveSpans(int index) { + if (positiveSpansBuilder_ == null) { + ensurePositiveSpansIsMutable(); + positiveSpans_.remove(index); + onChanged(); + } else { + positiveSpansBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+             * Positive Buckets.
+             * 
+ * + * + * repeated .prometheus.BucketSpan positive_spans = 11 [(.gogoproto.nullable) = false]; + * + */ + public Types.BucketSpan.Builder getPositiveSpansBuilder(int index) { + return getPositiveSpansFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+             * Positive Buckets.
+             * 
+ * + * + * repeated .prometheus.BucketSpan positive_spans = 11 [(.gogoproto.nullable) = false]; + * + */ + public Types.BucketSpanOrBuilder getPositiveSpansOrBuilder(int index) { + if (positiveSpansBuilder_ == null) { + return positiveSpans_.get(index); + } else { + return positiveSpansBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+             * Positive Buckets.
+             * 
+ * + * + * repeated .prometheus.BucketSpan positive_spans = 11 [(.gogoproto.nullable) = false]; + * + */ + public java.util.List + getPositiveSpansOrBuilderList() { + if (positiveSpansBuilder_ != null) { + return positiveSpansBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(positiveSpans_); + } + } + + /** + * + * + *
+             * Positive Buckets.
+             * 
+ * + * + * repeated .prometheus.BucketSpan positive_spans = 11 [(.gogoproto.nullable) = false]; + * + */ + public Types.BucketSpan.Builder addPositiveSpansBuilder() { + return getPositiveSpansFieldBuilder() + .addBuilder(Types.BucketSpan.getDefaultInstance()); + } + + /** + * + * + *
+             * Positive Buckets.
+             * 
+ * + * + * repeated .prometheus.BucketSpan positive_spans = 11 [(.gogoproto.nullable) = false]; + * + */ + public Types.BucketSpan.Builder addPositiveSpansBuilder(int index) { + return getPositiveSpansFieldBuilder() + .addBuilder(index, Types.BucketSpan.getDefaultInstance()); + } + + /** + * + * + *
+             * Positive Buckets.
+             * 
+ * + * + * repeated .prometheus.BucketSpan positive_spans = 11 [(.gogoproto.nullable) = false]; + * + */ + public java.util.List getPositiveSpansBuilderList() { + return getPositiveSpansFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + Types.BucketSpan, Types.BucketSpan.Builder, Types.BucketSpanOrBuilder> + getPositiveSpansFieldBuilder() { + if (positiveSpansBuilder_ == null) { + positiveSpansBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + Types.BucketSpan, + Types.BucketSpan.Builder, + Types.BucketSpanOrBuilder>( + positiveSpans_, + ((bitField0_ & 0x00000400) != 0), + getParentForChildren(), + isClean()); + positiveSpans_ = null; + } + return positiveSpansBuilder_; + } + + private com.google.protobuf.Internal.LongList positiveDeltas_ = emptyLongList(); + + private void ensurePositiveDeltasIsMutable() { + if (!positiveDeltas_.isModifiable()) { + positiveDeltas_ = makeMutableCopy(positiveDeltas_); + } + bitField0_ |= 0x00000800; + } + + /** + * + * + *
+             * Use either "positive_deltas" or "positive_counts", the former for
+             * regular histograms with integer counts, the latter for float
+             * histograms.
+             * 
+ * + * repeated sint64 positive_deltas = 12; + * + * @return A list containing the positiveDeltas. + */ + public java.util.List getPositiveDeltasList() { + positiveDeltas_.makeImmutable(); + return positiveDeltas_; + } + + /** + * + * + *
+             * Use either "positive_deltas" or "positive_counts", the former for
+             * regular histograms with integer counts, the latter for float
+             * histograms.
+             * 
+ * + * repeated sint64 positive_deltas = 12; + * + * @return The count of positiveDeltas. + */ + public int getPositiveDeltasCount() { + return positiveDeltas_.size(); + } + + /** + * + * + *
+             * Use either "positive_deltas" or "positive_counts", the former for
+             * regular histograms with integer counts, the latter for float
+             * histograms.
+             * 
+ * + * repeated sint64 positive_deltas = 12; + * + * @param index The index of the element to return. + * @return The positiveDeltas at the given index. + */ + public long getPositiveDeltas(int index) { + return positiveDeltas_.getLong(index); + } + + /** + * + * + *
+             * Use either "positive_deltas" or "positive_counts", the former for
+             * regular histograms with integer counts, the latter for float
+             * histograms.
+             * 
+ * + * repeated sint64 positive_deltas = 12; + * + * @param index The index to set the value at. + * @param value The positiveDeltas to set. + * @return This builder for chaining. + */ + public Builder setPositiveDeltas(int index, long value) { + + ensurePositiveDeltasIsMutable(); + positiveDeltas_.setLong(index, value); + bitField0_ |= 0x00000800; + onChanged(); + return this; + } + + /** + * + * + *
+             * Use either "positive_deltas" or "positive_counts", the former for
+             * regular histograms with integer counts, the latter for float
+             * histograms.
+             * 
+ * + * repeated sint64 positive_deltas = 12; + * + * @param value The positiveDeltas to add. + * @return This builder for chaining. + */ + public Builder addPositiveDeltas(long value) { + + ensurePositiveDeltasIsMutable(); + positiveDeltas_.addLong(value); + bitField0_ |= 0x00000800; + onChanged(); + return this; + } + + /** + * + * + *
+             * Use either "positive_deltas" or "positive_counts", the former for
+             * regular histograms with integer counts, the latter for float
+             * histograms.
+             * 
+ * + * repeated sint64 positive_deltas = 12; + * + * @param values The positiveDeltas to add. + * @return This builder for chaining. + */ + public Builder addAllPositiveDeltas(Iterable values) { + ensurePositiveDeltasIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, positiveDeltas_); + bitField0_ |= 0x00000800; + onChanged(); + return this; + } + + /** + * + * + *
+             * Use either "positive_deltas" or "positive_counts", the former for
+             * regular histograms with integer counts, the latter for float
+             * histograms.
+             * 
+ * + * repeated sint64 positive_deltas = 12; + * + * @return This builder for chaining. + */ + public Builder clearPositiveDeltas() { + positiveDeltas_ = emptyLongList(); + bitField0_ = (bitField0_ & ~0x00000800); + onChanged(); + return this; + } + + private com.google.protobuf.Internal.DoubleList positiveCounts_ = emptyDoubleList(); + + private void ensurePositiveCountsIsMutable() { + if (!positiveCounts_.isModifiable()) { + positiveCounts_ = makeMutableCopy(positiveCounts_); + } + bitField0_ |= 0x00001000; + } + + private void ensurePositiveCountsIsMutable(int capacity) { + if (!positiveCounts_.isModifiable()) { + positiveCounts_ = makeMutableCopy(positiveCounts_, capacity); + } + bitField0_ |= 0x00001000; + } + + /** + * + * + *
+             * Absolute count of each bucket.
+             * 
+ * + * repeated double positive_counts = 13; + * + * @return A list containing the positiveCounts. + */ + public java.util.List getPositiveCountsList() { + positiveCounts_.makeImmutable(); + return positiveCounts_; + } + + /** + * + * + *
+             * Absolute count of each bucket.
+             * 
+ * + * repeated double positive_counts = 13; + * + * @return The count of positiveCounts. + */ + public int getPositiveCountsCount() { + return positiveCounts_.size(); + } + + /** + * + * + *
+             * Absolute count of each bucket.
+             * 
+ * + * repeated double positive_counts = 13; + * + * @param index The index of the element to return. + * @return The positiveCounts at the given index. + */ + public double getPositiveCounts(int index) { + return positiveCounts_.getDouble(index); + } + + /** + * + * + *
+             * Absolute count of each bucket.
+             * 
+ * + * repeated double positive_counts = 13; + * + * @param index The index to set the value at. + * @param value The positiveCounts to set. + * @return This builder for chaining. + */ + public Builder setPositiveCounts(int index, double value) { + + ensurePositiveCountsIsMutable(); + positiveCounts_.setDouble(index, value); + bitField0_ |= 0x00001000; + onChanged(); + return this; + } + + /** + * + * + *
+             * Absolute count of each bucket.
+             * 
+ * + * repeated double positive_counts = 13; + * + * @param value The positiveCounts to add. + * @return This builder for chaining. + */ + public Builder addPositiveCounts(double value) { + + ensurePositiveCountsIsMutable(); + positiveCounts_.addDouble(value); + bitField0_ |= 0x00001000; + onChanged(); + return this; + } + + /** + * + * + *
+             * Absolute count of each bucket.
+             * 
+ * + * repeated double positive_counts = 13; + * + * @param values The positiveCounts to add. + * @return This builder for chaining. + */ + public Builder addAllPositiveCounts(Iterable values) { + ensurePositiveCountsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, positiveCounts_); + bitField0_ |= 0x00001000; + onChanged(); + return this; + } + + /** + * + * + *
+             * Absolute count of each bucket.
+             * 
+ * + * repeated double positive_counts = 13; + * + * @return This builder for chaining. + */ + public Builder clearPositiveCounts() { + positiveCounts_ = emptyDoubleList(); + bitField0_ = (bitField0_ & ~0x00001000); + onChanged(); + return this; + } + + private int resetHint_ = 0; + + /** + * .prometheus.Histogram.ResetHint reset_hint = 14; + * + * @return The enum numeric value on the wire for resetHint. + */ + @Override + public int getResetHintValue() { + return resetHint_; + } + + /** + * .prometheus.Histogram.ResetHint reset_hint = 14; + * + * @param value The enum numeric value on the wire for resetHint to set. + * @return This builder for chaining. + */ + public Builder setResetHintValue(int value) { + resetHint_ = value; + bitField0_ |= 0x00002000; + onChanged(); + return this; + } + + /** + * .prometheus.Histogram.ResetHint reset_hint = 14; + * + * @return The resetHint. + */ + @Override + public Types.Histogram.ResetHint getResetHint() { + Types.Histogram.ResetHint result = Types.Histogram.ResetHint.forNumber(resetHint_); + return result == null ? Types.Histogram.ResetHint.UNRECOGNIZED : result; + } + + /** + * .prometheus.Histogram.ResetHint reset_hint = 14; + * + * @param value The resetHint to set. + * @return This builder for chaining. + */ + public Builder setResetHint(Types.Histogram.ResetHint value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00002000; + resetHint_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * .prometheus.Histogram.ResetHint reset_hint = 14; + * + * @return This builder for chaining. + */ + public Builder clearResetHint() { + bitField0_ = (bitField0_ & ~0x00002000); + resetHint_ = 0; + onChanged(); + return this; + } + + private long timestamp_; + + /** + * + * + *
+             * timestamp is in ms format, see model/timestamp/timestamp.go for
+             * conversion from time.Time to Prometheus timestamp.
+             * 
+ * + * int64 timestamp = 15; + * + * @return The timestamp. + */ + @Override + public long getTimestamp() { + return timestamp_; + } + + /** + * + * + *
+             * timestamp is in ms format, see model/timestamp/timestamp.go for
+             * conversion from time.Time to Prometheus timestamp.
+             * 
+ * + * int64 timestamp = 15; + * + * @param value The timestamp to set. + * @return This builder for chaining. + */ + public Builder setTimestamp(long value) { + + timestamp_ = value; + bitField0_ |= 0x00004000; + onChanged(); + return this; + } + + /** + * + * + *
+             * timestamp is in ms format, see model/timestamp/timestamp.go for
+             * conversion from time.Time to Prometheus timestamp.
+             * 
+ * + * int64 timestamp = 15; + * + * @return This builder for chaining. + */ + public Builder clearTimestamp() { + bitField0_ = (bitField0_ & ~0x00004000); + timestamp_ = 0L; + onChanged(); + return this; + } + + @Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:prometheus.Histogram) + } + + // @@protoc_insertion_point(class_scope:prometheus.Histogram) + private static final Types.Histogram DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new Types.Histogram(); + } + + public static Types.Histogram getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @Override + public Histogram parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @Override + public Types.Histogram getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface BucketSpanOrBuilder + extends + // @@protoc_insertion_point(interface_extends:prometheus.BucketSpan) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+         * Gap to previous span, or starting point for 1st span (which can be negative).
+         * 
+ * + * sint32 offset = 1; + * + * @return The offset. + */ + int getOffset(); + + /** + * + * + *
+         * Length of consecutive buckets.
+         * 
+ * + * uint32 length = 2; + * + * @return The length. + */ + int getLength(); + } + + /** + * + * + *
+     * A BucketSpan defines a number of consecutive buckets with their
+     * offset. Logically, it would be more straightforward to include the
+     * bucket counts in the Span. However, the protobuf representation is
+     * more compact in the way the data is structured here (with all the
+     * buckets in a single array separate from the Spans).
+     * 
+ * + *

Protobuf type {@code prometheus.BucketSpan} + */ + public static final class BucketSpan extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:prometheus.BucketSpan) + BucketSpanOrBuilder { + private static final long serialVersionUID = 0L; + + // Use BucketSpan.newBuilder() to construct. + private BucketSpan(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private BucketSpan() {} + + @Override + @SuppressWarnings({"unused"}) + protected Object newInstance(UnusedPrivateParameter unused) { + return new BucketSpan(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return Types.internal_static_prometheus_BucketSpan_descriptor; + } + + @Override + protected FieldAccessorTable internalGetFieldAccessorTable() { + return Types.internal_static_prometheus_BucketSpan_fieldAccessorTable + .ensureFieldAccessorsInitialized( + Types.BucketSpan.class, Types.BucketSpan.Builder.class); + } + + public static final int OFFSET_FIELD_NUMBER = 1; + private int offset_ = 0; + + /** + * + * + *

+         * Gap to previous span, or starting point for 1st span (which can be negative).
+         * 
+ * + * sint32 offset = 1; + * + * @return The offset. + */ + @Override + public int getOffset() { + return offset_; + } + + public static final int LENGTH_FIELD_NUMBER = 2; + private int length_ = 0; + + /** + * + * + *
+         * Length of consecutive buckets.
+         * 
+ * + * uint32 length = 2; + * + * @return The length. + */ + @Override + public int getLength() { + return length_; + } + + private byte memoizedIsInitialized = -1; + + @Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) { + return true; + } + if (isInitialized == 0) { + return false; + } + + memoizedIsInitialized = 1; + return true; + } + + @Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (offset_ != 0) { + output.writeSInt32(1, offset_); + } + if (length_ != 0) { + output.writeUInt32(2, length_); + } + getUnknownFields().writeTo(output); + } + + @Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) { + return size; + } + + size = 0; + if (offset_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeSInt32Size(1, offset_); + } + if (length_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeUInt32Size(2, length_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @Override + public boolean equals(final Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof Types.BucketSpan)) { + return super.equals(obj); + } + Types.BucketSpan other = (Types.BucketSpan) obj; + + if (getOffset() != other.getOffset()) { + return false; + } + if (getLength() != other.getLength()) { + return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) { + return false; + } + return true; + } + + @Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + OFFSET_FIELD_NUMBER; + hash = (53 * hash) + getOffset(); + hash = (37 * hash) + LENGTH_FIELD_NUMBER; + hash = (53 * hash) + getLength(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static Types.BucketSpan parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static Types.BucketSpan parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static Types.BucketSpan parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static Types.BucketSpan parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static Types.BucketSpan parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static Types.BucketSpan parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static Types.BucketSpan parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static Types.BucketSpan parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static Types.BucketSpan parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input); + } + + public static Types.BucketSpan parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static Types.BucketSpan parseFrom(com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static Types.BucketSpan parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(Types.BucketSpan prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @Override + protected Builder newBuilderForType(BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+         * A BucketSpan defines a number of consecutive buckets with their
+         * offset. Logically, it would be more straightforward to include the
+         * bucket counts in the Span. However, the protobuf representation is
+         * more compact in the way the data is structured here (with all the
+         * buckets in a single array separate from the Spans).
+         * 
+ * + *

Protobuf type {@code prometheus.BucketSpan} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:prometheus.BucketSpan) + Types.BucketSpanOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return Types.internal_static_prometheus_BucketSpan_descriptor; + } + + @Override + protected FieldAccessorTable internalGetFieldAccessorTable() { + return Types.internal_static_prometheus_BucketSpan_fieldAccessorTable + .ensureFieldAccessorsInitialized( + Types.BucketSpan.class, Types.BucketSpan.Builder.class); + } + + // Construct using Types.BucketSpan.newBuilder() + private Builder() {} + + private Builder(BuilderParent parent) { + super(parent); + } + + @Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + offset_ = 0; + length_ = 0; + return this; + } + + @Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return Types.internal_static_prometheus_BucketSpan_descriptor; + } + + @Override + public Types.BucketSpan getDefaultInstanceForType() { + return Types.BucketSpan.getDefaultInstance(); + } + + @Override + public Types.BucketSpan build() { + Types.BucketSpan result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @Override + public Types.BucketSpan buildPartial() { + Types.BucketSpan result = new Types.BucketSpan(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(Types.BucketSpan result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.offset_ = offset_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.length_ = length_; + } + } + + @Override + public Builder clone() { + return super.clone(); + } + + @Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + return super.setField(field, value); + } + + @Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + Object value) { + return super.setRepeatedField(field, index, value); + } + + @Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + return super.addRepeatedField(field, value); + } + + @Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof Types.BucketSpan) { + return mergeFrom((Types.BucketSpan) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(Types.BucketSpan other) { + if (other == Types.BucketSpan.getDefaultInstance()) { + return this; + } + if (other.getOffset() != 0) { + setOffset(other.getOffset()); + } + if (other.getLength() != 0) { + setLength(other.getLength()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @Override + public final boolean isInitialized() { + return true; + } + + @Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + offset_ = input.readSInt32(); + bitField0_ |= 0x00000001; + break; + } // case 8 + case 16: + { + length_ = input.readUInt32(); + bitField0_ |= 0x00000002; + break; + } // case 16 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private int offset_; + + /** + * + * + *

+             * Gap to previous span, or starting point for 1st span (which can be negative).
+             * 
+ * + * sint32 offset = 1; + * + * @return The offset. + */ + @Override + public int getOffset() { + return offset_; + } + + /** + * + * + *
+             * Gap to previous span, or starting point for 1st span (which can be negative).
+             * 
+ * + * sint32 offset = 1; + * + * @param value The offset to set. + * @return This builder for chaining. + */ + public Builder setOffset(int value) { + + offset_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+             * Gap to previous span, or starting point for 1st span (which can be negative).
+             * 
+ * + * sint32 offset = 1; + * + * @return This builder for chaining. + */ + public Builder clearOffset() { + bitField0_ = (bitField0_ & ~0x00000001); + offset_ = 0; + onChanged(); + return this; + } + + private int length_; + + /** + * + * + *
+             * Length of consecutive buckets.
+             * 
+ * + * uint32 length = 2; + * + * @return The length. + */ + @Override + public int getLength() { + return length_; + } + + /** + * + * + *
+             * Length of consecutive buckets.
+             * 
+ * + * uint32 length = 2; + * + * @param value The length to set. + * @return This builder for chaining. + */ + public Builder setLength(int value) { + + length_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+             * Length of consecutive buckets.
+             * 
+ * + * uint32 length = 2; + * + * @return This builder for chaining. + */ + public Builder clearLength() { + bitField0_ = (bitField0_ & ~0x00000002); + length_ = 0; + onChanged(); + return this; + } + + @Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:prometheus.BucketSpan) + } + + // @@protoc_insertion_point(class_scope:prometheus.BucketSpan) + private static final Types.BucketSpan DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new Types.BucketSpan(); + } + + public static Types.BucketSpan getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @Override + public BucketSpan parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @Override + public Types.BucketSpan getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface TimeSeriesOrBuilder + extends + // @@protoc_insertion_point(interface_extends:prometheus.TimeSeries) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+         * For a timeseries to be valid, and for the samples and exemplars
+         * to be ingested by the remote system properly, the labels field is required.
+         * 
+ * + * repeated .prometheus.Label labels = 1 [(.gogoproto.nullable) = false]; + */ + java.util.List getLabelsList(); + + /** + * + * + *
+         * For a timeseries to be valid, and for the samples and exemplars
+         * to be ingested by the remote system properly, the labels field is required.
+         * 
+ * + * repeated .prometheus.Label labels = 1 [(.gogoproto.nullable) = false]; + */ + Types.Label getLabels(int index); + + /** + * + * + *
+         * For a timeseries to be valid, and for the samples and exemplars
+         * to be ingested by the remote system properly, the labels field is required.
+         * 
+ * + * repeated .prometheus.Label labels = 1 [(.gogoproto.nullable) = false]; + */ + int getLabelsCount(); + + /** + * + * + *
+         * For a timeseries to be valid, and for the samples and exemplars
+         * to be ingested by the remote system properly, the labels field is required.
+         * 
+ * + * repeated .prometheus.Label labels = 1 [(.gogoproto.nullable) = false]; + */ + java.util.List getLabelsOrBuilderList(); + + /** + * + * + *
+         * For a timeseries to be valid, and for the samples and exemplars
+         * to be ingested by the remote system properly, the labels field is required.
+         * 
+ * + * repeated .prometheus.Label labels = 1 [(.gogoproto.nullable) = false]; + */ + Types.LabelOrBuilder getLabelsOrBuilder(int index); + + /** repeated .prometheus.Sample samples = 2 [(.gogoproto.nullable) = false]; */ + java.util.List getSamplesList(); + + /** repeated .prometheus.Sample samples = 2 [(.gogoproto.nullable) = false]; */ + Types.Sample getSamples(int index); + + /** repeated .prometheus.Sample samples = 2 [(.gogoproto.nullable) = false]; */ + int getSamplesCount(); + + /** repeated .prometheus.Sample samples = 2 [(.gogoproto.nullable) = false]; */ + java.util.List getSamplesOrBuilderList(); + + /** repeated .prometheus.Sample samples = 2 [(.gogoproto.nullable) = false]; */ + Types.SampleOrBuilder getSamplesOrBuilder(int index); + + /** + * repeated .prometheus.Exemplar exemplars = 3 [(.gogoproto.nullable) = false]; + */ + java.util.List getExemplarsList(); + + /** + * repeated .prometheus.Exemplar exemplars = 3 [(.gogoproto.nullable) = false]; + */ + Types.Exemplar getExemplars(int index); + + /** + * repeated .prometheus.Exemplar exemplars = 3 [(.gogoproto.nullable) = false]; + */ + int getExemplarsCount(); + + /** + * repeated .prometheus.Exemplar exemplars = 3 [(.gogoproto.nullable) = false]; + */ + java.util.List getExemplarsOrBuilderList(); + + /** + * repeated .prometheus.Exemplar exemplars = 3 [(.gogoproto.nullable) = false]; + */ + Types.ExemplarOrBuilder getExemplarsOrBuilder(int index); + + /** + * repeated .prometheus.Histogram histograms = 4 [(.gogoproto.nullable) = false]; + * + */ + java.util.List getHistogramsList(); + + /** + * repeated .prometheus.Histogram histograms = 4 [(.gogoproto.nullable) = false]; + * + */ + Types.Histogram getHistograms(int index); + + /** + * repeated .prometheus.Histogram histograms = 4 [(.gogoproto.nullable) = false]; + * + */ + int getHistogramsCount(); + + /** + * repeated .prometheus.Histogram histograms = 4 [(.gogoproto.nullable) = false]; + * + */ + java.util.List getHistogramsOrBuilderList(); + + /** + * repeated .prometheus.Histogram histograms = 4 [(.gogoproto.nullable) = false]; + * + */ + Types.HistogramOrBuilder getHistogramsOrBuilder(int index); + } + + /** + * + * + *
+     * TimeSeries represents samples and labels for a single time series.
+     * 
+ * + *

Protobuf type {@code prometheus.TimeSeries} + */ + public static final class TimeSeries extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:prometheus.TimeSeries) + TimeSeriesOrBuilder { + private static final long serialVersionUID = 0L; + + // Use TimeSeries.newBuilder() to construct. + private TimeSeries(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private TimeSeries() { + labels_ = java.util.Collections.emptyList(); + samples_ = java.util.Collections.emptyList(); + exemplars_ = java.util.Collections.emptyList(); + histograms_ = java.util.Collections.emptyList(); + } + + @Override + @SuppressWarnings({"unused"}) + protected Object newInstance(UnusedPrivateParameter unused) { + return new TimeSeries(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return Types.internal_static_prometheus_TimeSeries_descriptor; + } + + @Override + protected FieldAccessorTable internalGetFieldAccessorTable() { + return Types.internal_static_prometheus_TimeSeries_fieldAccessorTable + .ensureFieldAccessorsInitialized( + Types.TimeSeries.class, Types.TimeSeries.Builder.class); + } + + public static final int LABELS_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List labels_; + + /** + * + * + *

+         * For a timeseries to be valid, and for the samples and exemplars
+         * to be ingested by the remote system properly, the labels field is required.
+         * 
+ * + * repeated .prometheus.Label labels = 1 [(.gogoproto.nullable) = false]; + */ + @Override + public java.util.List getLabelsList() { + return labels_; + } + + /** + * + * + *
+         * For a timeseries to be valid, and for the samples and exemplars
+         * to be ingested by the remote system properly, the labels field is required.
+         * 
+ * + * repeated .prometheus.Label labels = 1 [(.gogoproto.nullable) = false]; + */ + @Override + public java.util.List getLabelsOrBuilderList() { + return labels_; + } + + /** + * + * + *
+         * For a timeseries to be valid, and for the samples and exemplars
+         * to be ingested by the remote system properly, the labels field is required.
+         * 
+ * + * repeated .prometheus.Label labels = 1 [(.gogoproto.nullable) = false]; + */ + @Override + public int getLabelsCount() { + return labels_.size(); + } + + /** + * + * + *
+         * For a timeseries to be valid, and for the samples and exemplars
+         * to be ingested by the remote system properly, the labels field is required.
+         * 
+ * + * repeated .prometheus.Label labels = 1 [(.gogoproto.nullable) = false]; + */ + @Override + public Types.Label getLabels(int index) { + return labels_.get(index); + } + + /** + * + * + *
+         * For a timeseries to be valid, and for the samples and exemplars
+         * to be ingested by the remote system properly, the labels field is required.
+         * 
+ * + * repeated .prometheus.Label labels = 1 [(.gogoproto.nullable) = false]; + */ + @Override + public Types.LabelOrBuilder getLabelsOrBuilder(int index) { + return labels_.get(index); + } + + public static final int SAMPLES_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private java.util.List samples_; + + /** repeated .prometheus.Sample samples = 2 [(.gogoproto.nullable) = false]; */ + @Override + public java.util.List getSamplesList() { + return samples_; + } + + /** repeated .prometheus.Sample samples = 2 [(.gogoproto.nullable) = false]; */ + @Override + public java.util.List getSamplesOrBuilderList() { + return samples_; + } + + /** repeated .prometheus.Sample samples = 2 [(.gogoproto.nullable) = false]; */ + @Override + public int getSamplesCount() { + return samples_.size(); + } + + /** repeated .prometheus.Sample samples = 2 [(.gogoproto.nullable) = false]; */ + @Override + public Types.Sample getSamples(int index) { + return samples_.get(index); + } + + /** repeated .prometheus.Sample samples = 2 [(.gogoproto.nullable) = false]; */ + @Override + public Types.SampleOrBuilder getSamplesOrBuilder(int index) { + return samples_.get(index); + } + + public static final int EXEMPLARS_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private java.util.List exemplars_; + + /** + * repeated .prometheus.Exemplar exemplars = 3 [(.gogoproto.nullable) = false]; + */ + @Override + public java.util.List getExemplarsList() { + return exemplars_; + } + + /** + * repeated .prometheus.Exemplar exemplars = 3 [(.gogoproto.nullable) = false]; + */ + @Override + public java.util.List getExemplarsOrBuilderList() { + return exemplars_; + } + + /** + * repeated .prometheus.Exemplar exemplars = 3 [(.gogoproto.nullable) = false]; + */ + @Override + public int getExemplarsCount() { + return exemplars_.size(); + } + + /** + * repeated .prometheus.Exemplar exemplars = 3 [(.gogoproto.nullable) = false]; + */ + @Override + public Types.Exemplar getExemplars(int index) { + return exemplars_.get(index); + } + + /** + * repeated .prometheus.Exemplar exemplars = 3 [(.gogoproto.nullable) = false]; + */ + @Override + public Types.ExemplarOrBuilder getExemplarsOrBuilder(int index) { + return exemplars_.get(index); + } + + public static final int HISTOGRAMS_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private java.util.List histograms_; + + /** + * repeated .prometheus.Histogram histograms = 4 [(.gogoproto.nullable) = false]; + * + */ + @Override + public java.util.List getHistogramsList() { + return histograms_; + } + + /** + * repeated .prometheus.Histogram histograms = 4 [(.gogoproto.nullable) = false]; + * + */ + @Override + public java.util.List getHistogramsOrBuilderList() { + return histograms_; + } + + /** + * repeated .prometheus.Histogram histograms = 4 [(.gogoproto.nullable) = false]; + * + */ + @Override + public int getHistogramsCount() { + return histograms_.size(); + } + + /** + * repeated .prometheus.Histogram histograms = 4 [(.gogoproto.nullable) = false]; + * + */ + @Override + public Types.Histogram getHistograms(int index) { + return histograms_.get(index); + } + + /** + * repeated .prometheus.Histogram histograms = 4 [(.gogoproto.nullable) = false]; + * + */ + @Override + public Types.HistogramOrBuilder getHistogramsOrBuilder(int index) { + return histograms_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) { + return true; + } + if (isInitialized == 0) { + return false; + } + + memoizedIsInitialized = 1; + return true; + } + + @Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + for (int i = 0; i < labels_.size(); i++) { + output.writeMessage(1, labels_.get(i)); + } + for (int i = 0; i < samples_.size(); i++) { + output.writeMessage(2, samples_.get(i)); + } + for (int i = 0; i < exemplars_.size(); i++) { + output.writeMessage(3, exemplars_.get(i)); + } + for (int i = 0; i < histograms_.size(); i++) { + output.writeMessage(4, histograms_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) { + return size; + } + + size = 0; + for (int i = 0; i < labels_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, labels_.get(i)); + } + for (int i = 0; i < samples_.size(); i++) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 2, samples_.get(i)); + } + for (int i = 0; i < exemplars_.size(); i++) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 3, exemplars_.get(i)); + } + for (int i = 0; i < histograms_.size(); i++) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 4, histograms_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @Override + public boolean equals(final Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof Types.TimeSeries)) { + return super.equals(obj); + } + Types.TimeSeries other = (Types.TimeSeries) obj; + + if (!getLabelsList().equals(other.getLabelsList())) { + return false; + } + if (!getSamplesList().equals(other.getSamplesList())) { + return false; + } + if (!getExemplarsList().equals(other.getExemplarsList())) { + return false; + } + if (!getHistogramsList().equals(other.getHistogramsList())) { + return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) { + return false; + } + return true; + } + + @Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getLabelsCount() > 0) { + hash = (37 * hash) + LABELS_FIELD_NUMBER; + hash = (53 * hash) + getLabelsList().hashCode(); + } + if (getSamplesCount() > 0) { + hash = (37 * hash) + SAMPLES_FIELD_NUMBER; + hash = (53 * hash) + getSamplesList().hashCode(); + } + if (getExemplarsCount() > 0) { + hash = (37 * hash) + EXEMPLARS_FIELD_NUMBER; + hash = (53 * hash) + getExemplarsList().hashCode(); + } + if (getHistogramsCount() > 0) { + hash = (37 * hash) + HISTOGRAMS_FIELD_NUMBER; + hash = (53 * hash) + getHistogramsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static Types.TimeSeries parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static Types.TimeSeries parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static Types.TimeSeries parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static Types.TimeSeries parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static Types.TimeSeries parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static Types.TimeSeries parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static Types.TimeSeries parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static Types.TimeSeries parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static Types.TimeSeries parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input); + } + + public static Types.TimeSeries parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static Types.TimeSeries parseFrom(com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static Types.TimeSeries parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(Types.TimeSeries prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @Override + protected Builder newBuilderForType(BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+         * TimeSeries represents samples and labels for a single time series.
+         * 
+ * + *

Protobuf type {@code prometheus.TimeSeries} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:prometheus.TimeSeries) + Types.TimeSeriesOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return Types.internal_static_prometheus_TimeSeries_descriptor; + } + + @Override + protected FieldAccessorTable internalGetFieldAccessorTable() { + return Types.internal_static_prometheus_TimeSeries_fieldAccessorTable + .ensureFieldAccessorsInitialized( + Types.TimeSeries.class, Types.TimeSeries.Builder.class); + } + + // Construct using Types.TimeSeries.newBuilder() + private Builder() {} + + private Builder(BuilderParent parent) { + super(parent); + } + + @Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (labelsBuilder_ == null) { + labels_ = java.util.Collections.emptyList(); + } else { + labels_ = null; + labelsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + if (samplesBuilder_ == null) { + samples_ = java.util.Collections.emptyList(); + } else { + samples_ = null; + samplesBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + if (exemplarsBuilder_ == null) { + exemplars_ = java.util.Collections.emptyList(); + } else { + exemplars_ = null; + exemplarsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + if (histogramsBuilder_ == null) { + histograms_ = java.util.Collections.emptyList(); + } else { + histograms_ = null; + histogramsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000008); + return this; + } + + @Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return Types.internal_static_prometheus_TimeSeries_descriptor; + } + + @Override + public Types.TimeSeries getDefaultInstanceForType() { + return Types.TimeSeries.getDefaultInstance(); + } + + @Override + public Types.TimeSeries build() { + Types.TimeSeries result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @Override + public Types.TimeSeries buildPartial() { + Types.TimeSeries result = new Types.TimeSeries(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields(Types.TimeSeries result) { + if (labelsBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + labels_ = java.util.Collections.unmodifiableList(labels_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.labels_ = labels_; + } else { + result.labels_ = labelsBuilder_.build(); + } + if (samplesBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0)) { + samples_ = java.util.Collections.unmodifiableList(samples_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.samples_ = samples_; + } else { + result.samples_ = samplesBuilder_.build(); + } + if (exemplarsBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0)) { + exemplars_ = java.util.Collections.unmodifiableList(exemplars_); + bitField0_ = (bitField0_ & ~0x00000004); + } + result.exemplars_ = exemplars_; + } else { + result.exemplars_ = exemplarsBuilder_.build(); + } + if (histogramsBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0)) { + histograms_ = java.util.Collections.unmodifiableList(histograms_); + bitField0_ = (bitField0_ & ~0x00000008); + } + result.histograms_ = histograms_; + } else { + result.histograms_ = histogramsBuilder_.build(); + } + } + + private void buildPartial0(Types.TimeSeries result) { + int from_bitField0_ = bitField0_; + } + + @Override + public Builder clone() { + return super.clone(); + } + + @Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + return super.setField(field, value); + } + + @Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + Object value) { + return super.setRepeatedField(field, index, value); + } + + @Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + return super.addRepeatedField(field, value); + } + + @Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof Types.TimeSeries) { + return mergeFrom((Types.TimeSeries) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(Types.TimeSeries other) { + if (other == Types.TimeSeries.getDefaultInstance()) { + return this; + } + if (labelsBuilder_ == null) { + if (!other.labels_.isEmpty()) { + if (labels_.isEmpty()) { + labels_ = other.labels_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureLabelsIsMutable(); + labels_.addAll(other.labels_); + } + onChanged(); + } + } else { + if (!other.labels_.isEmpty()) { + if (labelsBuilder_.isEmpty()) { + labelsBuilder_.dispose(); + labelsBuilder_ = null; + labels_ = other.labels_; + bitField0_ = (bitField0_ & ~0x00000001); + labelsBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getLabelsFieldBuilder() + : null; + } else { + labelsBuilder_.addAllMessages(other.labels_); + } + } + } + if (samplesBuilder_ == null) { + if (!other.samples_.isEmpty()) { + if (samples_.isEmpty()) { + samples_ = other.samples_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureSamplesIsMutable(); + samples_.addAll(other.samples_); + } + onChanged(); + } + } else { + if (!other.samples_.isEmpty()) { + if (samplesBuilder_.isEmpty()) { + samplesBuilder_.dispose(); + samplesBuilder_ = null; + samples_ = other.samples_; + bitField0_ = (bitField0_ & ~0x00000002); + samplesBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getSamplesFieldBuilder() + : null; + } else { + samplesBuilder_.addAllMessages(other.samples_); + } + } + } + if (exemplarsBuilder_ == null) { + if (!other.exemplars_.isEmpty()) { + if (exemplars_.isEmpty()) { + exemplars_ = other.exemplars_; + bitField0_ = (bitField0_ & ~0x00000004); + } else { + ensureExemplarsIsMutable(); + exemplars_.addAll(other.exemplars_); + } + onChanged(); + } + } else { + if (!other.exemplars_.isEmpty()) { + if (exemplarsBuilder_.isEmpty()) { + exemplarsBuilder_.dispose(); + exemplarsBuilder_ = null; + exemplars_ = other.exemplars_; + bitField0_ = (bitField0_ & ~0x00000004); + exemplarsBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getExemplarsFieldBuilder() + : null; + } else { + exemplarsBuilder_.addAllMessages(other.exemplars_); + } + } + } + if (histogramsBuilder_ == null) { + if (!other.histograms_.isEmpty()) { + if (histograms_.isEmpty()) { + histograms_ = other.histograms_; + bitField0_ = (bitField0_ & ~0x00000008); + } else { + ensureHistogramsIsMutable(); + histograms_.addAll(other.histograms_); + } + onChanged(); + } + } else { + if (!other.histograms_.isEmpty()) { + if (histogramsBuilder_.isEmpty()) { + histogramsBuilder_.dispose(); + histogramsBuilder_ = null; + histograms_ = other.histograms_; + bitField0_ = (bitField0_ & ~0x00000008); + histogramsBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getHistogramsFieldBuilder() + : null; + } else { + histogramsBuilder_.addAllMessages(other.histograms_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @Override + public final boolean isInitialized() { + return true; + } + + @Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + Types.Label m = + input.readMessage( + Types.Label.parser(), extensionRegistry); + if (labelsBuilder_ == null) { + ensureLabelsIsMutable(); + labels_.add(m); + } else { + labelsBuilder_.addMessage(m); + } + break; + } // case 10 + case 18: + { + Types.Sample m = + input.readMessage( + Types.Sample.parser(), extensionRegistry); + if (samplesBuilder_ == null) { + ensureSamplesIsMutable(); + samples_.add(m); + } else { + samplesBuilder_.addMessage(m); + } + break; + } // case 18 + case 26: + { + Types.Exemplar m = + input.readMessage( + Types.Exemplar.parser(), extensionRegistry); + if (exemplarsBuilder_ == null) { + ensureExemplarsIsMutable(); + exemplars_.add(m); + } else { + exemplarsBuilder_.addMessage(m); + } + break; + } // case 26 + case 34: + { + Types.Histogram m = + input.readMessage( + Types.Histogram.parser(), extensionRegistry); + if (histogramsBuilder_ == null) { + ensureHistogramsIsMutable(); + histograms_.add(m); + } else { + histogramsBuilder_.addMessage(m); + } + break; + } // case 34 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List labels_ = java.util.Collections.emptyList(); + + private void ensureLabelsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + labels_ = new java.util.ArrayList(labels_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + Types.Label, Types.Label.Builder, Types.LabelOrBuilder> + labelsBuilder_; + + /** + * + * + *

+             * For a timeseries to be valid, and for the samples and exemplars
+             * to be ingested by the remote system properly, the labels field is required.
+             * 
+ * + * repeated .prometheus.Label labels = 1 [(.gogoproto.nullable) = false]; + */ + public java.util.List getLabelsList() { + if (labelsBuilder_ == null) { + return java.util.Collections.unmodifiableList(labels_); + } else { + return labelsBuilder_.getMessageList(); + } + } + + /** + * + * + *
+             * For a timeseries to be valid, and for the samples and exemplars
+             * to be ingested by the remote system properly, the labels field is required.
+             * 
+ * + * repeated .prometheus.Label labels = 1 [(.gogoproto.nullable) = false]; + */ + public int getLabelsCount() { + if (labelsBuilder_ == null) { + return labels_.size(); + } else { + return labelsBuilder_.getCount(); + } + } + + /** + * + * + *
+             * For a timeseries to be valid, and for the samples and exemplars
+             * to be ingested by the remote system properly, the labels field is required.
+             * 
+ * + * repeated .prometheus.Label labels = 1 [(.gogoproto.nullable) = false]; + */ + public Types.Label getLabels(int index) { + if (labelsBuilder_ == null) { + return labels_.get(index); + } else { + return labelsBuilder_.getMessage(index); + } + } + + /** + * + * + *
+             * For a timeseries to be valid, and for the samples and exemplars
+             * to be ingested by the remote system properly, the labels field is required.
+             * 
+ * + * repeated .prometheus.Label labels = 1 [(.gogoproto.nullable) = false]; + */ + public Builder setLabels(int index, Types.Label value) { + if (labelsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureLabelsIsMutable(); + labels_.set(index, value); + onChanged(); + } else { + labelsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
+             * For a timeseries to be valid, and for the samples and exemplars
+             * to be ingested by the remote system properly, the labels field is required.
+             * 
+ * + * repeated .prometheus.Label labels = 1 [(.gogoproto.nullable) = false]; + */ + public Builder setLabels(int index, Types.Label.Builder builderForValue) { + if (labelsBuilder_ == null) { + ensureLabelsIsMutable(); + labels_.set(index, builderForValue.build()); + onChanged(); + } else { + labelsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+             * For a timeseries to be valid, and for the samples and exemplars
+             * to be ingested by the remote system properly, the labels field is required.
+             * 
+ * + * repeated .prometheus.Label labels = 1 [(.gogoproto.nullable) = false]; + */ + public Builder addLabels(Types.Label value) { + if (labelsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureLabelsIsMutable(); + labels_.add(value); + onChanged(); + } else { + labelsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
+             * For a timeseries to be valid, and for the samples and exemplars
+             * to be ingested by the remote system properly, the labels field is required.
+             * 
+ * + * repeated .prometheus.Label labels = 1 [(.gogoproto.nullable) = false]; + */ + public Builder addLabels(int index, Types.Label value) { + if (labelsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureLabelsIsMutable(); + labels_.add(index, value); + onChanged(); + } else { + labelsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
+             * For a timeseries to be valid, and for the samples and exemplars
+             * to be ingested by the remote system properly, the labels field is required.
+             * 
+ * + * repeated .prometheus.Label labels = 1 [(.gogoproto.nullable) = false]; + */ + public Builder addLabels(Types.Label.Builder builderForValue) { + if (labelsBuilder_ == null) { + ensureLabelsIsMutable(); + labels_.add(builderForValue.build()); + onChanged(); + } else { + labelsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
+             * For a timeseries to be valid, and for the samples and exemplars
+             * to be ingested by the remote system properly, the labels field is required.
+             * 
+ * + * repeated .prometheus.Label labels = 1 [(.gogoproto.nullable) = false]; + */ + public Builder addLabels(int index, Types.Label.Builder builderForValue) { + if (labelsBuilder_ == null) { + ensureLabelsIsMutable(); + labels_.add(index, builderForValue.build()); + onChanged(); + } else { + labelsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
+             * For a timeseries to be valid, and for the samples and exemplars
+             * to be ingested by the remote system properly, the labels field is required.
+             * 
+ * + * repeated .prometheus.Label labels = 1 [(.gogoproto.nullable) = false]; + */ + public Builder addAllLabels(Iterable values) { + if (labelsBuilder_ == null) { + ensureLabelsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, labels_); + onChanged(); + } else { + labelsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
+             * For a timeseries to be valid, and for the samples and exemplars
+             * to be ingested by the remote system properly, the labels field is required.
+             * 
+ * + * repeated .prometheus.Label labels = 1 [(.gogoproto.nullable) = false]; + */ + public Builder clearLabels() { + if (labelsBuilder_ == null) { + labels_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + labelsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
+             * For a timeseries to be valid, and for the samples and exemplars
+             * to be ingested by the remote system properly, the labels field is required.
+             * 
+ * + * repeated .prometheus.Label labels = 1 [(.gogoproto.nullable) = false]; + */ + public Builder removeLabels(int index) { + if (labelsBuilder_ == null) { + ensureLabelsIsMutable(); + labels_.remove(index); + onChanged(); + } else { + labelsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
+             * For a timeseries to be valid, and for the samples and exemplars
+             * to be ingested by the remote system properly, the labels field is required.
+             * 
+ * + * repeated .prometheus.Label labels = 1 [(.gogoproto.nullable) = false]; + */ + public Types.Label.Builder getLabelsBuilder(int index) { + return getLabelsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
+             * For a timeseries to be valid, and for the samples and exemplars
+             * to be ingested by the remote system properly, the labels field is required.
+             * 
+ * + * repeated .prometheus.Label labels = 1 [(.gogoproto.nullable) = false]; + */ + public Types.LabelOrBuilder getLabelsOrBuilder(int index) { + if (labelsBuilder_ == null) { + return labels_.get(index); + } else { + return labelsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
+             * For a timeseries to be valid, and for the samples and exemplars
+             * to be ingested by the remote system properly, the labels field is required.
+             * 
+ * + * repeated .prometheus.Label labels = 1 [(.gogoproto.nullable) = false]; + */ + public java.util.List getLabelsOrBuilderList() { + if (labelsBuilder_ != null) { + return labelsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(labels_); + } + } + + /** + * + * + *
+             * For a timeseries to be valid, and for the samples and exemplars
+             * to be ingested by the remote system properly, the labels field is required.
+             * 
+ * + * repeated .prometheus.Label labels = 1 [(.gogoproto.nullable) = false]; + */ + public Types.Label.Builder addLabelsBuilder() { + return getLabelsFieldBuilder().addBuilder(Types.Label.getDefaultInstance()); + } + + /** + * + * + *
+             * For a timeseries to be valid, and for the samples and exemplars
+             * to be ingested by the remote system properly, the labels field is required.
+             * 
+ * + * repeated .prometheus.Label labels = 1 [(.gogoproto.nullable) = false]; + */ + public Types.Label.Builder addLabelsBuilder(int index) { + return getLabelsFieldBuilder().addBuilder(index, Types.Label.getDefaultInstance()); + } + + /** + * + * + *
+             * For a timeseries to be valid, and for the samples and exemplars
+             * to be ingested by the remote system properly, the labels field is required.
+             * 
+ * + * repeated .prometheus.Label labels = 1 [(.gogoproto.nullable) = false]; + */ + public java.util.List getLabelsBuilderList() { + return getLabelsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + Types.Label, Types.Label.Builder, Types.LabelOrBuilder> + getLabelsFieldBuilder() { + if (labelsBuilder_ == null) { + labelsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + Types.Label, Types.Label.Builder, Types.LabelOrBuilder>( + labels_, + ((bitField0_ & 0x00000001) != 0), + getParentForChildren(), + isClean()); + labels_ = null; + } + return labelsBuilder_; + } + + private java.util.List samples_ = java.util.Collections.emptyList(); + + private void ensureSamplesIsMutable() { + if (!((bitField0_ & 0x00000002) != 0)) { + samples_ = new java.util.ArrayList(samples_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + Types.Sample, Types.Sample.Builder, Types.SampleOrBuilder> + samplesBuilder_; + + /** + * repeated .prometheus.Sample samples = 2 [(.gogoproto.nullable) = false]; + */ + public java.util.List getSamplesList() { + if (samplesBuilder_ == null) { + return java.util.Collections.unmodifiableList(samples_); + } else { + return samplesBuilder_.getMessageList(); + } + } + + /** + * repeated .prometheus.Sample samples = 2 [(.gogoproto.nullable) = false]; + */ + public int getSamplesCount() { + if (samplesBuilder_ == null) { + return samples_.size(); + } else { + return samplesBuilder_.getCount(); + } + } + + /** + * repeated .prometheus.Sample samples = 2 [(.gogoproto.nullable) = false]; + */ + public Types.Sample getSamples(int index) { + if (samplesBuilder_ == null) { + return samples_.get(index); + } else { + return samplesBuilder_.getMessage(index); + } + } + + /** + * repeated .prometheus.Sample samples = 2 [(.gogoproto.nullable) = false]; + */ + public Builder setSamples(int index, Types.Sample value) { + if (samplesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureSamplesIsMutable(); + samples_.set(index, value); + onChanged(); + } else { + samplesBuilder_.setMessage(index, value); + } + return this; + } + + /** + * repeated .prometheus.Sample samples = 2 [(.gogoproto.nullable) = false]; + */ + public Builder setSamples(int index, Types.Sample.Builder builderForValue) { + if (samplesBuilder_ == null) { + ensureSamplesIsMutable(); + samples_.set(index, builderForValue.build()); + onChanged(); + } else { + samplesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * repeated .prometheus.Sample samples = 2 [(.gogoproto.nullable) = false]; + */ + public Builder addSamples(Types.Sample value) { + if (samplesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureSamplesIsMutable(); + samples_.add(value); + onChanged(); + } else { + samplesBuilder_.addMessage(value); + } + return this; + } + + /** + * repeated .prometheus.Sample samples = 2 [(.gogoproto.nullable) = false]; + */ + public Builder addSamples(int index, Types.Sample value) { + if (samplesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureSamplesIsMutable(); + samples_.add(index, value); + onChanged(); + } else { + samplesBuilder_.addMessage(index, value); + } + return this; + } + + /** + * repeated .prometheus.Sample samples = 2 [(.gogoproto.nullable) = false]; + */ + public Builder addSamples(Types.Sample.Builder builderForValue) { + if (samplesBuilder_ == null) { + ensureSamplesIsMutable(); + samples_.add(builderForValue.build()); + onChanged(); + } else { + samplesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * repeated .prometheus.Sample samples = 2 [(.gogoproto.nullable) = false]; + */ + public Builder addSamples(int index, Types.Sample.Builder builderForValue) { + if (samplesBuilder_ == null) { + ensureSamplesIsMutable(); + samples_.add(index, builderForValue.build()); + onChanged(); + } else { + samplesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * repeated .prometheus.Sample samples = 2 [(.gogoproto.nullable) = false]; + */ + public Builder addAllSamples(Iterable values) { + if (samplesBuilder_ == null) { + ensureSamplesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, samples_); + onChanged(); + } else { + samplesBuilder_.addAllMessages(values); + } + return this; + } + + /** + * repeated .prometheus.Sample samples = 2 [(.gogoproto.nullable) = false]; + */ + public Builder clearSamples() { + if (samplesBuilder_ == null) { + samples_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + samplesBuilder_.clear(); + } + return this; + } + + /** + * repeated .prometheus.Sample samples = 2 [(.gogoproto.nullable) = false]; + */ + public Builder removeSamples(int index) { + if (samplesBuilder_ == null) { + ensureSamplesIsMutable(); + samples_.remove(index); + onChanged(); + } else { + samplesBuilder_.remove(index); + } + return this; + } + + /** + * repeated .prometheus.Sample samples = 2 [(.gogoproto.nullable) = false]; + */ + public Types.Sample.Builder getSamplesBuilder(int index) { + return getSamplesFieldBuilder().getBuilder(index); + } + + /** + * repeated .prometheus.Sample samples = 2 [(.gogoproto.nullable) = false]; + */ + public Types.SampleOrBuilder getSamplesOrBuilder(int index) { + if (samplesBuilder_ == null) { + return samples_.get(index); + } else { + return samplesBuilder_.getMessageOrBuilder(index); + } + } + + /** + * repeated .prometheus.Sample samples = 2 [(.gogoproto.nullable) = false]; + */ + public java.util.List getSamplesOrBuilderList() { + if (samplesBuilder_ != null) { + return samplesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(samples_); + } + } + + /** + * repeated .prometheus.Sample samples = 2 [(.gogoproto.nullable) = false]; + */ + public Types.Sample.Builder addSamplesBuilder() { + return getSamplesFieldBuilder().addBuilder(Types.Sample.getDefaultInstance()); + } + + /** + * repeated .prometheus.Sample samples = 2 [(.gogoproto.nullable) = false]; + */ + public Types.Sample.Builder addSamplesBuilder(int index) { + return getSamplesFieldBuilder() + .addBuilder(index, Types.Sample.getDefaultInstance()); + } + + /** + * repeated .prometheus.Sample samples = 2 [(.gogoproto.nullable) = false]; + */ + public java.util.List getSamplesBuilderList() { + return getSamplesFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + Types.Sample, Types.Sample.Builder, Types.SampleOrBuilder> + getSamplesFieldBuilder() { + if (samplesBuilder_ == null) { + samplesBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + Types.Sample, Types.Sample.Builder, Types.SampleOrBuilder>( + samples_, + ((bitField0_ & 0x00000002) != 0), + getParentForChildren(), + isClean()); + samples_ = null; + } + return samplesBuilder_; + } + + private java.util.List exemplars_ = java.util.Collections.emptyList(); + + private void ensureExemplarsIsMutable() { + if (!((bitField0_ & 0x00000004) != 0)) { + exemplars_ = new java.util.ArrayList(exemplars_); + bitField0_ |= 0x00000004; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + Types.Exemplar, Types.Exemplar.Builder, Types.ExemplarOrBuilder> + exemplarsBuilder_; + + /** + * repeated .prometheus.Exemplar exemplars = 3 [(.gogoproto.nullable) = false]; + * + */ + public java.util.List getExemplarsList() { + if (exemplarsBuilder_ == null) { + return java.util.Collections.unmodifiableList(exemplars_); + } else { + return exemplarsBuilder_.getMessageList(); + } + } + + /** + * repeated .prometheus.Exemplar exemplars = 3 [(.gogoproto.nullable) = false]; + * + */ + public int getExemplarsCount() { + if (exemplarsBuilder_ == null) { + return exemplars_.size(); + } else { + return exemplarsBuilder_.getCount(); + } + } + + /** + * repeated .prometheus.Exemplar exemplars = 3 [(.gogoproto.nullable) = false]; + * + */ + public Types.Exemplar getExemplars(int index) { + if (exemplarsBuilder_ == null) { + return exemplars_.get(index); + } else { + return exemplarsBuilder_.getMessage(index); + } + } + + /** + * repeated .prometheus.Exemplar exemplars = 3 [(.gogoproto.nullable) = false]; + * + */ + public Builder setExemplars(int index, Types.Exemplar value) { + if (exemplarsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureExemplarsIsMutable(); + exemplars_.set(index, value); + onChanged(); + } else { + exemplarsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * repeated .prometheus.Exemplar exemplars = 3 [(.gogoproto.nullable) = false]; + * + */ + public Builder setExemplars(int index, Types.Exemplar.Builder builderForValue) { + if (exemplarsBuilder_ == null) { + ensureExemplarsIsMutable(); + exemplars_.set(index, builderForValue.build()); + onChanged(); + } else { + exemplarsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * repeated .prometheus.Exemplar exemplars = 3 [(.gogoproto.nullable) = false]; + * + */ + public Builder addExemplars(Types.Exemplar value) { + if (exemplarsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureExemplarsIsMutable(); + exemplars_.add(value); + onChanged(); + } else { + exemplarsBuilder_.addMessage(value); + } + return this; + } + + /** + * repeated .prometheus.Exemplar exemplars = 3 [(.gogoproto.nullable) = false]; + * + */ + public Builder addExemplars(int index, Types.Exemplar value) { + if (exemplarsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureExemplarsIsMutable(); + exemplars_.add(index, value); + onChanged(); + } else { + exemplarsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * repeated .prometheus.Exemplar exemplars = 3 [(.gogoproto.nullable) = false]; + * + */ + public Builder addExemplars(Types.Exemplar.Builder builderForValue) { + if (exemplarsBuilder_ == null) { + ensureExemplarsIsMutable(); + exemplars_.add(builderForValue.build()); + onChanged(); + } else { + exemplarsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * repeated .prometheus.Exemplar exemplars = 3 [(.gogoproto.nullable) = false]; + * + */ + public Builder addExemplars(int index, Types.Exemplar.Builder builderForValue) { + if (exemplarsBuilder_ == null) { + ensureExemplarsIsMutable(); + exemplars_.add(index, builderForValue.build()); + onChanged(); + } else { + exemplarsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * repeated .prometheus.Exemplar exemplars = 3 [(.gogoproto.nullable) = false]; + * + */ + public Builder addAllExemplars(Iterable values) { + if (exemplarsBuilder_ == null) { + ensureExemplarsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, exemplars_); + onChanged(); + } else { + exemplarsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * repeated .prometheus.Exemplar exemplars = 3 [(.gogoproto.nullable) = false]; + * + */ + public Builder clearExemplars() { + if (exemplarsBuilder_ == null) { + exemplars_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + } else { + exemplarsBuilder_.clear(); + } + return this; + } + + /** + * repeated .prometheus.Exemplar exemplars = 3 [(.gogoproto.nullable) = false]; + * + */ + public Builder removeExemplars(int index) { + if (exemplarsBuilder_ == null) { + ensureExemplarsIsMutable(); + exemplars_.remove(index); + onChanged(); + } else { + exemplarsBuilder_.remove(index); + } + return this; + } + + /** + * repeated .prometheus.Exemplar exemplars = 3 [(.gogoproto.nullable) = false]; + * + */ + public Types.Exemplar.Builder getExemplarsBuilder(int index) { + return getExemplarsFieldBuilder().getBuilder(index); + } + + /** + * repeated .prometheus.Exemplar exemplars = 3 [(.gogoproto.nullable) = false]; + * + */ + public Types.ExemplarOrBuilder getExemplarsOrBuilder(int index) { + if (exemplarsBuilder_ == null) { + return exemplars_.get(index); + } else { + return exemplarsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * repeated .prometheus.Exemplar exemplars = 3 [(.gogoproto.nullable) = false]; + * + */ + public java.util.List getExemplarsOrBuilderList() { + if (exemplarsBuilder_ != null) { + return exemplarsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(exemplars_); + } + } + + /** + * repeated .prometheus.Exemplar exemplars = 3 [(.gogoproto.nullable) = false]; + * + */ + public Types.Exemplar.Builder addExemplarsBuilder() { + return getExemplarsFieldBuilder().addBuilder(Types.Exemplar.getDefaultInstance()); + } + + /** + * repeated .prometheus.Exemplar exemplars = 3 [(.gogoproto.nullable) = false]; + * + */ + public Types.Exemplar.Builder addExemplarsBuilder(int index) { + return getExemplarsFieldBuilder() + .addBuilder(index, Types.Exemplar.getDefaultInstance()); + } + + /** + * repeated .prometheus.Exemplar exemplars = 3 [(.gogoproto.nullable) = false]; + * + */ + public java.util.List getExemplarsBuilderList() { + return getExemplarsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + Types.Exemplar, Types.Exemplar.Builder, Types.ExemplarOrBuilder> + getExemplarsFieldBuilder() { + if (exemplarsBuilder_ == null) { + exemplarsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + Types.Exemplar, + Types.Exemplar.Builder, + Types.ExemplarOrBuilder>( + exemplars_, + ((bitField0_ & 0x00000004) != 0), + getParentForChildren(), + isClean()); + exemplars_ = null; + } + return exemplarsBuilder_; + } + + private java.util.List histograms_ = java.util.Collections.emptyList(); + + private void ensureHistogramsIsMutable() { + if (!((bitField0_ & 0x00000008) != 0)) { + histograms_ = new java.util.ArrayList(histograms_); + bitField0_ |= 0x00000008; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + Types.Histogram, Types.Histogram.Builder, Types.HistogramOrBuilder> + histogramsBuilder_; + + /** + * repeated .prometheus.Histogram histograms = 4 [(.gogoproto.nullable) = false]; + * + */ + public java.util.List getHistogramsList() { + if (histogramsBuilder_ == null) { + return java.util.Collections.unmodifiableList(histograms_); + } else { + return histogramsBuilder_.getMessageList(); + } + } + + /** + * repeated .prometheus.Histogram histograms = 4 [(.gogoproto.nullable) = false]; + * + */ + public int getHistogramsCount() { + if (histogramsBuilder_ == null) { + return histograms_.size(); + } else { + return histogramsBuilder_.getCount(); + } + } + + /** + * repeated .prometheus.Histogram histograms = 4 [(.gogoproto.nullable) = false]; + * + */ + public Types.Histogram getHistograms(int index) { + if (histogramsBuilder_ == null) { + return histograms_.get(index); + } else { + return histogramsBuilder_.getMessage(index); + } + } + + /** + * repeated .prometheus.Histogram histograms = 4 [(.gogoproto.nullable) = false]; + * + */ + public Builder setHistograms(int index, Types.Histogram value) { + if (histogramsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureHistogramsIsMutable(); + histograms_.set(index, value); + onChanged(); + } else { + histogramsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * repeated .prometheus.Histogram histograms = 4 [(.gogoproto.nullable) = false]; + * + */ + public Builder setHistograms(int index, Types.Histogram.Builder builderForValue) { + if (histogramsBuilder_ == null) { + ensureHistogramsIsMutable(); + histograms_.set(index, builderForValue.build()); + onChanged(); + } else { + histogramsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * repeated .prometheus.Histogram histograms = 4 [(.gogoproto.nullable) = false]; + * + */ + public Builder addHistograms(Types.Histogram value) { + if (histogramsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureHistogramsIsMutable(); + histograms_.add(value); + onChanged(); + } else { + histogramsBuilder_.addMessage(value); + } + return this; + } + + /** + * repeated .prometheus.Histogram histograms = 4 [(.gogoproto.nullable) = false]; + * + */ + public Builder addHistograms(int index, Types.Histogram value) { + if (histogramsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureHistogramsIsMutable(); + histograms_.add(index, value); + onChanged(); + } else { + histogramsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * repeated .prometheus.Histogram histograms = 4 [(.gogoproto.nullable) = false]; + * + */ + public Builder addHistograms(Types.Histogram.Builder builderForValue) { + if (histogramsBuilder_ == null) { + ensureHistogramsIsMutable(); + histograms_.add(builderForValue.build()); + onChanged(); + } else { + histogramsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * repeated .prometheus.Histogram histograms = 4 [(.gogoproto.nullable) = false]; + * + */ + public Builder addHistograms(int index, Types.Histogram.Builder builderForValue) { + if (histogramsBuilder_ == null) { + ensureHistogramsIsMutable(); + histograms_.add(index, builderForValue.build()); + onChanged(); + } else { + histogramsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * repeated .prometheus.Histogram histograms = 4 [(.gogoproto.nullable) = false]; + * + */ + public Builder addAllHistograms(Iterable values) { + if (histogramsBuilder_ == null) { + ensureHistogramsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, histograms_); + onChanged(); + } else { + histogramsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * repeated .prometheus.Histogram histograms = 4 [(.gogoproto.nullable) = false]; + * + */ + public Builder clearHistograms() { + if (histogramsBuilder_ == null) { + histograms_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + } else { + histogramsBuilder_.clear(); + } + return this; + } + + /** + * repeated .prometheus.Histogram histograms = 4 [(.gogoproto.nullable) = false]; + * + */ + public Builder removeHistograms(int index) { + if (histogramsBuilder_ == null) { + ensureHistogramsIsMutable(); + histograms_.remove(index); + onChanged(); + } else { + histogramsBuilder_.remove(index); + } + return this; + } + + /** + * repeated .prometheus.Histogram histograms = 4 [(.gogoproto.nullable) = false]; + * + */ + public Types.Histogram.Builder getHistogramsBuilder(int index) { + return getHistogramsFieldBuilder().getBuilder(index); + } + + /** + * repeated .prometheus.Histogram histograms = 4 [(.gogoproto.nullable) = false]; + * + */ + public Types.HistogramOrBuilder getHistogramsOrBuilder(int index) { + if (histogramsBuilder_ == null) { + return histograms_.get(index); + } else { + return histogramsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * repeated .prometheus.Histogram histograms = 4 [(.gogoproto.nullable) = false]; + * + */ + public java.util.List getHistogramsOrBuilderList() { + if (histogramsBuilder_ != null) { + return histogramsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(histograms_); + } + } + + /** + * repeated .prometheus.Histogram histograms = 4 [(.gogoproto.nullable) = false]; + * + */ + public Types.Histogram.Builder addHistogramsBuilder() { + return getHistogramsFieldBuilder().addBuilder(Types.Histogram.getDefaultInstance()); + } + + /** + * repeated .prometheus.Histogram histograms = 4 [(.gogoproto.nullable) = false]; + * + */ + public Types.Histogram.Builder addHistogramsBuilder(int index) { + return getHistogramsFieldBuilder() + .addBuilder(index, Types.Histogram.getDefaultInstance()); + } + + /** + * repeated .prometheus.Histogram histograms = 4 [(.gogoproto.nullable) = false]; + * + */ + public java.util.List getHistogramsBuilderList() { + return getHistogramsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + Types.Histogram, Types.Histogram.Builder, Types.HistogramOrBuilder> + getHistogramsFieldBuilder() { + if (histogramsBuilder_ == null) { + histogramsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + Types.Histogram, + Types.Histogram.Builder, + Types.HistogramOrBuilder>( + histograms_, + ((bitField0_ & 0x00000008) != 0), + getParentForChildren(), + isClean()); + histograms_ = null; + } + return histogramsBuilder_; + } + + @Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:prometheus.TimeSeries) + } + + // @@protoc_insertion_point(class_scope:prometheus.TimeSeries) + private static final Types.TimeSeries DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new Types.TimeSeries(); + } + + public static Types.TimeSeries getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @Override + public TimeSeries parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @Override + public Types.TimeSeries getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface LabelOrBuilder + extends + // @@protoc_insertion_point(interface_extends:prometheus.Label) + com.google.protobuf.MessageOrBuilder { + + /** + * string name = 1; + * + * @return The name. + */ + String getName(); + + /** + * string name = 1; + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * string value = 2; + * + * @return The value. + */ + String getValue(); + + /** + * string value = 2; + * + * @return The bytes for value. + */ + com.google.protobuf.ByteString getValueBytes(); + } + + /** Protobuf type {@code prometheus.Label} */ + public static final class Label extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:prometheus.Label) + LabelOrBuilder { + private static final long serialVersionUID = 0L; + + // Use Label.newBuilder() to construct. + private Label(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private Label() { + name_ = ""; + value_ = ""; + } + + @Override + @SuppressWarnings({"unused"}) + protected Object newInstance(UnusedPrivateParameter unused) { + return new Label(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return Types.internal_static_prometheus_Label_descriptor; + } + + @Override + protected FieldAccessorTable internalGetFieldAccessorTable() { + return Types.internal_static_prometheus_Label_fieldAccessorTable + .ensureFieldAccessorsInitialized(Types.Label.class, Types.Label.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile Object name_ = ""; + + /** + * string name = 1; + * + * @return The name. + */ + @Override + public String getName() { + Object ref = name_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * string name = 1; + * + * @return The bytes for name. + */ + @Override + public com.google.protobuf.ByteString getNameBytes() { + Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int VALUE_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile Object value_ = ""; + + /** + * string value = 2; + * + * @return The value. + */ + @Override + public String getValue() { + Object ref = value_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + value_ = s; + return s; + } + } + + /** + * string value = 2; + * + * @return The bytes for value. + */ + @Override + public com.google.protobuf.ByteString getValueBytes() { + Object ref = value_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + value_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) { + return true; + } + if (isInitialized == 0) { + return false; + } + + memoizedIsInitialized = 1; + return true; + } + + @Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(value_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, value_); + } + getUnknownFields().writeTo(output); + } + + @Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) { + return size; + } + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(value_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, value_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @Override + public boolean equals(final Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof Types.Label)) { + return super.equals(obj); + } + Types.Label other = (Types.Label) obj; + + if (!getName().equals(other.getName())) { + return false; + } + if (!getValue().equals(other.getValue())) { + return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) { + return false; + } + return true; + } + + @Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + VALUE_FIELD_NUMBER; + hash = (53 * hash) + getValue().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static Types.Label parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static Types.Label parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static Types.Label parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static Types.Label parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static Types.Label parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static Types.Label parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static Types.Label parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static Types.Label parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static Types.Label parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input); + } + + public static Types.Label parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static Types.Label parseFrom(com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static Types.Label parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(Types.Label prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @Override + protected Builder newBuilderForType(BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** Protobuf type {@code prometheus.Label} */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:prometheus.Label) + Types.LabelOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return Types.internal_static_prometheus_Label_descriptor; + } + + @Override + protected FieldAccessorTable internalGetFieldAccessorTable() { + return Types.internal_static_prometheus_Label_fieldAccessorTable + .ensureFieldAccessorsInitialized( + Types.Label.class, Types.Label.Builder.class); + } + + // Construct using Types.Label.newBuilder() + private Builder() {} + + private Builder(BuilderParent parent) { + super(parent); + } + + @Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + value_ = ""; + return this; + } + + @Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return Types.internal_static_prometheus_Label_descriptor; + } + + @Override + public Types.Label getDefaultInstanceForType() { + return Types.Label.getDefaultInstance(); + } + + @Override + public Types.Label build() { + Types.Label result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @Override + public Types.Label buildPartial() { + Types.Label result = new Types.Label(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(Types.Label result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.value_ = value_; + } + } + + @Override + public Builder clone() { + return super.clone(); + } + + @Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + return super.setField(field, value); + } + + @Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + Object value) { + return super.setRepeatedField(field, index, value); + } + + @Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + return super.addRepeatedField(field, value); + } + + @Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof Types.Label) { + return mergeFrom((Types.Label) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(Types.Label other) { + if (other == Types.Label.getDefaultInstance()) { + return this; + } + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getValue().isEmpty()) { + value_ = other.value_; + bitField0_ |= 0x00000002; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @Override + public final boolean isInitialized() { + return true; + } + + @Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + value_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private Object name_ = ""; + + /** + * string name = 1; + * + * @return The name. + */ + public String getName() { + Object ref = name_; + if (!(ref instanceof String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (String) ref; + } + } + + /** + * string name = 1; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * string name = 1; + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * string name = 1; + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * string name = 1; + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private Object value_ = ""; + + /** + * string value = 2; + * + * @return The value. + */ + public String getValue() { + Object ref = value_; + if (!(ref instanceof String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + value_ = s; + return s; + } else { + return (String) ref; + } + } + + /** + * string value = 2; + * + * @return The bytes for value. + */ + public com.google.protobuf.ByteString getValueBytes() { + Object ref = value_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + value_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * string value = 2; + * + * @param value The value to set. + * @return This builder for chaining. + */ + public Builder setValue(String value) { + if (value == null) { + throw new NullPointerException(); + } + value_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * string value = 2; + * + * @return This builder for chaining. + */ + public Builder clearValue() { + value_ = getDefaultInstance().getValue(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * string value = 2; + * + * @param value The bytes for value to set. + * @return This builder for chaining. + */ + public Builder setValueBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + value_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + @Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:prometheus.Label) + } + + // @@protoc_insertion_point(class_scope:prometheus.Label) + private static final Types.Label DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new Types.Label(); + } + + public static Types.Label getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser