diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..8e46b02 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,16 @@ +# Exclude development data directories +devel/data/ +devel/data/postgres +devel/data/risingwave + +# Exclude other development artifacts +.git +.gitignore +*.md +README.md +docker-compose.yml + +# Exclude any other temporary or cache files +.cache +*.log +*.tmp \ No newline at end of file diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index fe608ec..b1a30fd 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -6,6 +6,7 @@ on: - "v*" branches: - "*" + workflow_dispatch: env: REGISTRY: ghcr.io diff --git a/.gitignore b/.gitignore index 43f32f1..eeabf12 100644 --- a/.gitignore +++ b/.gitignore @@ -8,3 +8,6 @@ devel/data* build/ *.spkg /substreams-sink-sql +/cursor.txt +/replay.log +/spl_all_schema_hash.txt diff --git a/FROM_PROTO_ANALYSIS.md b/FROM_PROTO_ANALYSIS.md new file mode 100644 index 0000000..9cb37b6 --- /dev/null +++ b/FROM_PROTO_ANALYSIS.md @@ -0,0 +1,563 @@ +# Substreams SQL Sink: From-Proto Mode Analysis + +## Overview + +The `from-proto` mode in substreams-sink-sql enables dynamic SQL schema generation from protobuf message definitions. This mode analyzes your substream's output protobuf messages and automatically creates corresponding SQL tables, columns, and constraints based on protobuf field types and custom schema annotations. + +## Table of Contents + +- [Execution Flow](#execution-flow) +- [Schema Annotations Reference](#schema-annotations-reference) +- [Schema Processing Rules](#schema-processing-rules) +- [Database Dialect Support](#database-dialect-support) +- [Usage Examples](#usage-examples) +- [Configuration Options](#configuration-options) +- [Best Practices](#best-practices) +- [Troubleshooting](#troubleshooting) + +## Execution Flow + +### 1. Command Entry Point +**File**: `cmd/substreams-sink-sql/from_proto.go:58` + +```bash +substreams-sink-sql from-proto [output-module] +``` + +### 2. Schema Detection Process + +1. **Manifest Parsing**: Reads substreams manifest and extracts protobuf definitions +2. **Dependency Analysis**: Checks if `sf/substreams/sink/sql/schema/v1/schema.proto` is imported +3. **Proto Option Detection**: Sets `useProtoOption=true` if schema annotations are available +4. **Constraint Configuration**: Enables constraints only when proto options are detected + +### 3. Schema Generation Pipeline + +``` +Protobuf Messages → Schema Registry → SQL Tables → Constraint Application +``` + +**Key Files**: +- `db_proto/sql/schema/schema.go` - Main schema orchestration +- `db_proto/sql/schema/table.go` - Table creation logic +- `db_proto/sql/schema/column.go` - Column processing +- `proto/utils.go` - Annotation extraction + +## Schema Annotations Reference + +### Message-Level Annotations + +**Extension**: `sf.substreams.sink.sql.schema.v1.table` +**Definition**: `proto/sf/substreams/sink/sql/schema/v1/schema.proto:16-22` + +```protobuf +message Table { + string name = 1; // Custom table name (required) + optional string child_of = 2; // Parent-child relationship +} +``` + +#### Supported Options + +| Option | Type | Required | Description | Example | +|--------|------|----------|-------------|---------| +| `name` | string | Yes | Custom table name | `"customers"` | +| `child_of` | string | No | Parent table relationship | `"orders on order_id"` | + +### Field-Level Annotations + +**Extension**: `sf.substreams.sink.sql.schema.v1.field` +**Definition**: `proto/sf/substreams/sink/sql/schema/v1/schema.proto:24-29` + +```protobuf +message Column { + optional string name = 1; // Custom column name + optional string foreign_key = 2; // Foreign key reference + bool unique = 3; // Unique constraint + bool primary_key = 4; // Primary key constraint +} +``` + +#### Supported Options + +| Option | Type | Required | Description | Example | +|--------|------|----------|-------------|---------| +| `name` | string | No | Custom column name | `"customer_id"` | +| `primary_key` | bool | No | Primary key constraint | `true` | +| `unique` | bool | No | Unique constraint | `true` | +| `foreign_key` | string | No | Foreign key reference | `"customers on customer_id"` | + +#### Foreign Key Format + +Foreign keys use the format: `"target_table on target_field"` + +Examples: +- `"customers on customer_id"` +- `"items on item_id"` +- `"orders on order_id"` + +#### Child Table Format + +Child relationships use the format: `"parent_table on parent_field"` + +Examples: +- `"orders on order_id"` +- `"customers on customer_id"` + +## Schema Processing Rules + +### Field Inclusion Rules +**Location**: `db_proto/sql/schema/table.go:76-115` + +#### Included Fields ✅ +- **Scalar fields**: `string`, `int32`, `int64`, `uint32`, `uint64`, `bool`, `double`, `float`, etc. +- **Timestamp fields**: `google.protobuf.Timestamp` +- **Non-repeated fields**: Single-value fields only +- **Non-oneof fields**: Regular message fields + +#### Excluded Fields ❌ +- **Repeated fields**: Automatically become child tables or are ignored +- **Oneof fields**: Used for polymorphic entity handling +- **Non-timestamp message fields**: Become separate tables +- **Extension fields**: Protobuf extensions are ignored + +### Table Creation Logic + +#### Automatic Table Names +If no `table` annotation is provided: +- Uses protobuf message name as table name +- Only created when `useProtoOption=false` + +#### Constraint Processing +**Primary Keys**: +- Only one primary key per table allowed +- Generates `ALTER TABLE ... ADD CONSTRAINT pk_table_name PRIMARY KEY (field)` + +**Unique Constraints**: +- Multiple unique constraints allowed per table +- Generates `ALTER TABLE ... ADD CONSTRAINT table_field_unique UNIQUE (field)` + +**Foreign Keys**: +- Supports both explicit and implicit foreign keys +- Explicit: Defined via `foreign_key` annotation +- Implicit: Created for child table relationships + +## Database Dialect Support + +### PostgreSQL +**Files**: `db_proto/sql/postgres/` + +**Features**: +- Full constraint support (PK, FK, UNIQUE) +- Separate ALTER TABLE statements for constraints +- Transaction-based constraint application + +**SQL Generation Example**: +```sql +CREATE TABLE customers (customer_id TEXT, name TEXT); +ALTER TABLE customers ADD CONSTRAINT customers_pk PRIMARY KEY (customer_id); +ALTER TABLE customers ADD CONSTRAINT customers_customer_id_unique UNIQUE (customer_id); +``` + +### RisingWave +**Files**: `db_proto/sql/risingwave/` + +**Features**: +- Same constraint pattern as PostgreSQL +- UPSERT-based data handling +- Autocommit mode for block undo operations + +**SQL Generation**: Identical to PostgreSQL + +### ClickHouse +**Files**: `db_proto/sql/click_house/` + +**Features**: +- Primary keys embedded in CREATE TABLE +- ReplacingMergeTree engine with versioning +- Partition by block timestamp +- Limited constraint support (PK only) + +**SQL Generation Example**: +```sql +CREATE TABLE customers ( + customer_id String, + name String, + block_timestamp DateTime, + version UInt64 +) ENGINE = ReplacingMergeTree(version) +PARTITION BY (toYYYYMM(block_timestamp)) +PRIMARY KEY (customer_id) +ORDER BY (customer_id); +``` + +## Usage Examples + +### Basic Proto Definition + +```protobuf +syntax = "proto3"; +import "sf/substreams/sink/sql/schema/v1/schema.proto"; + +message Output { + repeated Entity entities = 1; +} + +message Entity { + oneof entity { + Customer customer = 1; + Order order = 2; + OrderItem order_item = 3; + Item item = 4; + } +} +``` + +### Customer Table + +```protobuf +message Customer { + option (sf.substreams.sink.sql.schema.v1.table) = { + name: "customers" + }; + + string customer_id = 1 [(sf.substreams.sink.sql.schema.v1.field) = { + primary_key: true + }]; + string name = 2; + string email = 3 [(sf.substreams.sink.sql.schema.v1.field) = { + unique: true + }]; +} +``` + +**Generated SQL**: +```sql +CREATE TABLE customers ( + customer_id TEXT, + name TEXT, + email TEXT +); +ALTER TABLE customers ADD CONSTRAINT customers_pk PRIMARY KEY (customer_id); +ALTER TABLE customers ADD CONSTRAINT customers_email_unique UNIQUE (email); +``` + +### Order Table with Foreign Key + +```protobuf +message Order { + option (sf.substreams.sink.sql.schema.v1.table) = { + name: "orders" + }; + + string order_id = 1 [(sf.substreams.sink.sql.schema.v1.field) = { + primary_key: true + }]; + string customer_ref_id = 2 [(sf.substreams.sink.sql.schema.v1.field) = { + foreign_key: "customers on customer_id" + }]; + google.protobuf.Timestamp created_at = 3; + repeated OrderItem items = 4; // Becomes child table +} +``` + +**Generated SQL**: +```sql +CREATE TABLE orders ( + order_id TEXT, + customer_ref_id TEXT, + created_at TIMESTAMP +); +ALTER TABLE orders ADD CONSTRAINT orders_pk PRIMARY KEY (order_id); +ALTER TABLE orders ADD CONSTRAINT fk_customer_ref_id FOREIGN KEY (customer_ref_id) REFERENCES customers(customer_id); +``` + +### Child Table Relationship + +```protobuf +message OrderItem { + option (sf.substreams.sink.sql.schema.v1.table) = { + name: "order_items", + child_of: "orders on order_id" + }; + + string item_id = 1 [(sf.substreams.sink.sql.schema.v1.field) = { + foreign_key: "items on item_id" + }]; + int64 quantity = 2; + double unit_price = 3; +} +``` + +**Generated SQL**: +```sql +CREATE TABLE order_items ( + item_id TEXT, + quantity BIGINT, + unit_price DOUBLE PRECISION, + orders_order_id TEXT -- Auto-generated parent reference +); +ALTER TABLE order_items ADD CONSTRAINT fk_item_id FOREIGN KEY (item_id) REFERENCES items(item_id); +ALTER TABLE order_items ADD CONSTRAINT fk_order_items FOREIGN KEY (orders_order_id) REFERENCES orders(order_id); +``` + +### Complete Example + +```protobuf +syntax = "proto3"; +import "google/protobuf/timestamp.proto"; +import "sf/substreams/sink/sql/schema/v1/schema.proto"; + +message Output { + repeated Entity entities = 1; +} + +message Entity { + oneof entity { + Customer customer = 1; + Order order = 2; + Item item = 3; + } +} + +message Customer { + option (sf.substreams.sink.sql.schema.v1.table) = { name: "customers" }; + + string customer_id = 1 [(sf.substreams.sink.sql.schema.v1.field) = { primary_key: true }]; + string name = 2; + string email = 3 [(sf.substreams.sink.sql.schema.v1.field) = { unique: true }]; +} + +message Order { + option (sf.substreams.sink.sql.schema.v1.table) = { name: "orders" }; + + string order_id = 1 [(sf.substreams.sink.sql.schema.v1.field) = { primary_key: true }]; + string customer_ref_id = 2 [(sf.substreams.sink.sql.schema.v1.field) = { foreign_key: "customers on customer_id" }]; + google.protobuf.Timestamp created_at = 3; + repeated OrderItem items = 4; +} + +message OrderItem { + option (sf.substreams.sink.sql.schema.v1.table) = { + name: "order_items", + child_of: "orders on order_id" + }; + + string item_id = 1 [(sf.substreams.sink.sql.schema.v1.field) = { foreign_key: "items on item_id" }]; + int64 quantity = 2; + double unit_price = 3; +} + +message Item { + option (sf.substreams.sink.sql.schema.v1.table) = { name: "items" }; + + string item_id = 1 [(sf.substreams.sink.sql.schema.v1.field) = { primary_key: true }]; + string name = 2; + double price = 3; +} +``` + +## Configuration Options + +### Command Line Flags + +| Flag | Default | Description | +|------|---------|-------------| +| `--no-constraints` | `false` | Disable constraint generation for faster imports | +| `--block-batch-size` | `25` | Number of blocks to process at once | +| `--start-block` | `""` | Starting block number | +| `--stop-block` | `"0"` | Ending block number | + +### Environment Variables + +| Variable | Description | +|----------|-------------| +| `SUBSTREAMS_ENDPOINT_` | Network-specific endpoints | + +### DSN Examples + +```bash +# PostgreSQL +postgresql://user:password@localhost:5432/database?sslmode=disable + +# RisingWave +postgresql://root@localhost:4566/dev?sslmode=disable + +# ClickHouse +clickhouse://default@localhost:9000/default +``` + +## Best Practices + +### 1. Schema Design + +**✅ Do:** +- Use meaningful table and column names +- Define primary keys for all entities +- Use foreign keys to maintain referential integrity +- Leverage child table relationships for one-to-many data +- Use unique constraints for business keys + +**❌ Don't:** +- Create tables without primary keys +- Use overly complex nested message structures +- Ignore foreign key relationships +- Mix entity types in the same message + +### 2. Performance Optimization + +**Initial Import:** +```bash +# Disable constraints for faster initial import +substreams-sink-sql from-proto --no-constraints + +# Apply constraints after import +substreams-sink-sql from-proto-apply-constraints +``` + +**Batch Processing:** +```bash +# Increase batch size for faster processing +substreams-sink-sql from-proto --block-batch-size=100 +``` + +### 3. Migration Strategy + +**Schema Changes:** +1. The system automatically detects schema changes via hash comparison +2. Creates temporary schemas with new hash suffix +3. Validates data consistency between old and new schemas +4. Promotes new schema when validation passes + +**Version Control:** +- Keep protobuf definitions in version control +- Test schema changes in development environment +- Use consistent naming conventions + +## Troubleshooting + +### Common Issues + +#### 1. Schema Detection Failure +**Problem**: Schema annotations not recognized +**Solution**: Ensure `sf/substreams/sink/sql/schema/v1/schema.proto` is imported + +```protobuf +import "sf/substreams/sink/sql/schema/v1/schema.proto"; +``` + +#### 2. Multiple Primary Keys Error +**Problem**: `multiple primary keys are not supported in message` +**Solution**: Only one field per message can have `primary_key: true` + +#### 3. Foreign Key Format Error +**Problem**: `invalid foreign key format` +**Solution**: Use correct format: `"target_table on target_field"` + +#### 4. Missing Table Name +**Problem**: `table name is required for message` +**Solution**: Always specify table name in table annotation + +```protobuf +option (sf.substreams.sink.sql.schema.v1.table) = { name: "my_table" }; +``` + +#### 5. Constraint Application Failures +**Problem**: Constraints fail to apply +**Solutions**: +- Check that referenced tables exist +- Verify foreign key target columns exist +- Ensure data consistency before applying constraints +- Use `--no-constraints` for problematic imports + +### Debug Commands + +```bash +# Check schema detection +substreams-sink-sql from-proto --help + +# Validate manifest +substreams run -t +10 + +# Test database connection +psql +clickhouse-client --host +``` + +### Log Analysis + +Enable debug logging to trace schema processing: +```bash +export RUST_LOG=debug +substreams-sink-sql from-proto +``` + +Look for these log messages: +- `creating schema` - Schema initialization +- `creating table message descriptor` - Table creation +- `walking message descriptor` - Field processing +- `apply constraints` - Constraint application + +## Advanced Features + +### Custom Column Names + +```protobuf +message Customer { + string id = 1 [(sf.substreams.sink.sql.schema.v1.field) = { + name: "customer_identifier", + primary_key: true + }]; +} +``` + +### Complex Relationships + +```protobuf +// Many-to-many through junction table +message CustomerOrder { + option (sf.substreams.sink.sql.schema.v1.table) = { name: "customer_orders" }; + + string customer_id = 1 [(sf.substreams.sink.sql.schema.v1.field) = { + foreign_key: "customers on customer_id" + }]; + string order_id = 2 [(sf.substreams.sink.sql.schema.v1.field) = { + foreign_key: "orders on order_id" + }]; +} +``` + +### Polymorphic Entities + +```protobuf +message Entity { + oneof entity_type { + Customer customer = 1; + Business business = 2; + Individual individual = 3; + } +} +``` + +## File Reference + +### Core Files +- `cmd/substreams-sink-sql/from_proto.go` - Main command implementation +- `proto/sf/substreams/sink/sql/schema/v1/schema.proto` - Schema annotation definitions +- `db_proto/sql/schema/schema.go` - Schema orchestration +- `db_proto/sql/schema/table.go` - Table creation logic +- `db_proto/sql/schema/column.go` - Column processing +- `proto/utils.go` - Annotation extraction utilities + +### Dialect Implementations +- `db_proto/sql/postgres/` - PostgreSQL dialect +- `db_proto/sql/risingwave/` - RisingWave dialect +- `db_proto/sql/click_house/` - ClickHouse dialect + +### Example Files +- `proto/test/relations/relations.proto` - Example schema definitions +- `db_proto/test/substreams/order/` - Complete test substream + +--- + +This documentation covers the complete from-proto functionality in substreams-sink-sql. For additional support, refer to the source code or open an issue in the project repository. \ No newline at end of file diff --git a/README.md b/README.md index e6c9ade..77a565a 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # Substreams:SQL Sink -The Substreams:SQL sink helps you quickly and easily sync Substreams modules to a PostgreSQL or Clickhouse database. +The Substreams:SQL sink helps you quickly and easily sync Substreams modules to a PostgreSQL, RisingWave, or ClickHouse database. ### Quickstart @@ -22,7 +22,19 @@ The Substreams:SQL sink helps you quickly and easily sync Substreams modules to docker compose up -d ``` - > You can wipe the database and restart from scratch by doing `docker compose down` and `rm -rf ./devel/data/postgres`. + This will start PostgreSQL, ClickHouse, and RisingWave services. Individual services can be started with: + ```bash + # Start only PostgreSQL + docker compose up -d postgres + + # Start only RisingWave + docker compose up -d risingwave + + # Start only ClickHouse + docker compose up -d database + ``` + + > You can wipe the databases and restart from scratch by doing `docker compose down` and `rm -rf ./devel/data/`. 1. Run the setup command: @@ -33,7 +45,16 @@ The Substreams:SQL sink helps you quickly and easily sync Substreams modules to substreams-sink-sql setup $DSN docs/tutorial/substreams.yaml ``` - **Clickhouse** + **RisingWave** + + ```bash + export DSN="risingwave://root:@localhost:4566/dev?schema=public" + substreams-sink-sql setup $DSN docs/tutorial/substreams.risingwave.yaml + ``` + + > **Note** RisingWave's dashboard is available at http://localhost:5691 when using Docker Compose. The default user for the playground mode is `root` with no password. + + **ClickHouse** ```bash export DSN="clickhouse://default:@localhost:9000/default" @@ -76,7 +97,7 @@ The [Substreams manifest in the tutorial](docs/tutorial/substreams.yaml#L37) def DSN stands for Data Source Name (or Database Source Name) and `substreams-sink-sql` expects a URL input that defines how to connect to the right driver. An example input for Postgres is `psql://dev-node:insecure-change-me-in-prod@localhost:5432/dev-node?sslmode=disable` which lists hostname, user, password, port and database (with some options) in a single string input. -The URL's scheme is used to determine the driver to use, `psql`, `clickhouse`, etc. In the example case above, the picked driver will be Postgres. The generic format of a DSN is of the form: +The URL's scheme is used to determine the driver to use, `psql`, `risingwave`, `clickhouse`, etc. In the example case above, the picked driver will be Postgres. The generic format of a DSN is of the form: ``` :://:@:/? @@ -109,9 +130,34 @@ Where `` is URL query parameters in `=` format, multiple op Moreover, the `schema` option key can be used to select a particular schema within the `` database. +#### RisingWave + +The DSN format for RisingWave is: + +``` +risingwave://:@:/[?] +``` + +RisingWave is a PostgreSQL-compatible streaming database, so it uses similar connection parameters as PostgreSQL. The default port for RisingWave is typically `4566`. Supported options are similar to PostgreSQL since RisingWave implements the PostgreSQL wire protocol. + +**Example DSNs:** +```bash +# Local RisingWave instance +risingwave://root:@localhost:4566/dev?schema=public + +# RisingWave with authentication +risingwave://username:password@risingwave-host:4566/database?schema=substreams + +# RisingWave with SSL (if configured) +risingwave://user:pass@host:4566/db?schema=public&sslmode=require +``` + +> [!NOTE] +> RisingWave optimizes SQL for streaming workloads and provides real-time materialized views. While PostgreSQL-compatible, it uses RisingWave-specific data type mappings and SQL optimizations for better streaming performance. + #### Others -Only `psql` and `clickhouse` are supported today, adding support for a new _dialect_ is quite easy: +Currently supported drivers are `psql` (PostgreSQL), `risingwave` (RisingWave), and `clickhouse` (ClickHouse). Adding support for a new _dialect_ is quite easy: - Copy [db/dialect_clickhouse.go](db_changes/db/dialect_clickhouse.go) to a new file `db/dialect_.go` implementing the right functionality. - Update [`db.driverDialect` map](https://github.com/streamingfast/substreams-sink-sql/blob/develop/db/dialect.go#L27-L31) to add you dialect (key is the Golang type of your dialect implementation). @@ -130,6 +176,59 @@ By convention, we name the `map` module that emits [sf.substreams.sink.database. > Note that using prior versions (0.2.0, 0.1.\*) of `substreams-database-change`, you have to use `substreams.database.v1.DatabaseChanges` in your `substreams.yaml` and put the respected version of the `spkg` in your `substreams.yaml` +### RisingWave Integration + +RisingWave is a cloud-native streaming database designed for real-time analytics. It provides several advantages when used with Substreams: + +#### Streaming-First Architecture +- **Real-time Materialized Views**: RisingWave automatically maintains materialized views as new data arrives from Substreams +- **Incremental Computation**: Efficiently processes only new/changed data rather than recomputing entire datasets +- **SQL-based Stream Processing**: Use standard SQL to define complex analytics on streaming blockchain data + +#### Data Type Optimization +RisingWave uses optimized data types for blockchain data: +- `NUMERIC` for large unsigned integers (uint64) +- `TIMESTAMPTZ` for blockchain timestamps with timezone support +- `BYTEA` with hex encoding for blockchain addresses and hashes +- `VARCHAR` instead of `TEXT` for better performance on indexed string fields + +#### Transaction Limitations +⚠️ **Important**: RisingWave does not support traditional ACID transactions: +- **No read-write transactions**: All operations use autocommit mode +- **No rollback capability**: Each statement is immediately committed +- **Streaming-first design**: Optimized for append-only, event-driven workloads +- **PostgreSQL wire compatible**: Uses PostgreSQL protocol but with streaming semantics + +This makes RisingWave ideal for: +- ✅ Real-time analytics and metrics +- ✅ Event streaming and processing +- ✅ Append-heavy blockchain data ingestion +- ❌ Traditional OLTP applications requiring transactions + +#### Setup Example +```bash +# Start RisingWave (example with Docker) +docker run -d --name risingwave \ + -p 4566:4566 \ + -p 5691:5691 \ + risingwavelabs/risingwave:latest \ + playground + +# Run substreams-sink-sql with RisingWave +export DSN="risingwave://root:@localhost:4566/dev?schema=public" +substreams-sink-sql setup $DSN your-substreams.yaml +substreams-sink-sql run $DSN your-substreams.yaml +``` + +#### Performance Considerations +- RisingWave excels at **append-heavy workloads** typical in blockchain data +- **Materialized views** can pre-aggregate data for fast analytical queries +- **Horizontal scaling** is built-in for handling high-throughput Substreams +- Use **streaming joins** to combine data from multiple Substreams modules in real-time + +> [!TIP] +> For optimal performance with RisingWave, design your Substreams output to minimize updates and maximize inserts, leveraging RisingWave's streaming-first architecture. + ### Protobuf models - protobuf bindings are generated using `buf generate` at the root of this repo. See https://buf.build/docs/installation to install buf. @@ -145,10 +244,19 @@ The `substreams-sink-sql` contains a fast injection mechanism for cases where bi The idea is to first dump the Substreams data to `CSV` files using `substreams-sink-sql generate-csv` command: +**PostgreSQL:** ```bash substreams-sink-sql generate-csv "psql://dev-node:insecure-change-me-in-prod@localhost:5432/dev-node?sslmode=disable" --output-dir ./data/tables :14490000 ``` +**RisingWave:** +```bash +substreams-sink-sql generate-csv "risingwave://root:@localhost:4566/dev?schema=public" --output-dir ./data/tables :14490000 +``` + +> [!NOTE] +> RisingWave's streaming architecture makes it particularly well-suited for high-throughput injection scenarios. Its append-optimized design can handle large CSV imports efficiently while maintaining real-time query performance. + > [!NOTE] > We are using 14490000 as our stop block, pick you stop block close to chain's HEAD or smaller like us to perform an experiment, adjust to your needs. @@ -156,6 +264,7 @@ This will generate block segmented CSV files for each table in your schema insid We offer `substreams-sink-sql inject-csv` command as a convenience. It's a per table invocation but feel free to run each table concurrently, your are bound by your database as this point, so it's up to you to decide you much concurrency you want to use. Here a small `Bash` command to loop through all tables and inject them all +**PostgreSQL:** ```bash for i in `ls ./data/tables | grep -v state.yaml`; do \ substreams-sink-sql inject-csv "psql://dev-node:insecure-change-me-in-prod@localhost:5432/dev-node?sslmode=disable" ./data/tables "$i" :14490000; \ @@ -163,6 +272,14 @@ for i in `ls ./data/tables | grep -v state.yaml`; do \ done ``` +**RisingWave:** +```bash +for i in `ls ./data/tables | grep -v state.yaml`; do \ + substreams-sink-sql inject-csv "risingwave://root:@localhost:4566/dev?schema=public" ./data/tables "$i" :14490000; \ + if [[ $? != 0 ]]; then break; fi; \ +done +``` + Those files are then inserted in the database efficiently by doing a `COPY FROM` and reading the data from a network pipe directly. The command above will also pick up the `cursors` table injection as it's a standard table to write. The table is a bit special as it contains a single file which is contains the `cursor` that will handoff between CSV injection and going back to "live" blocks. It's extremely important that you validate that this table has been properly populated. You can do this simply by doing: diff --git a/SEMANTIC_TYPES.md b/SEMANTIC_TYPES.md new file mode 100644 index 0000000..df5d07a --- /dev/null +++ b/SEMANTIC_TYPES.md @@ -0,0 +1,394 @@ +# Semantic Type Annotations for SQL Schema Generation + +## Overview + +The semantic type annotation system allows you to specify high-level semantic meanings for protobuf fields that get automatically mapped to optimal SQL types for each database dialect. This enables support for specialized types like RisingWave's `rw_int256` while maintaining compatibility across PostgreSQL, RisingWave, and ClickHouse. + +## Quick Start + +1. Import the schema annotations in your protobuf: +```protobuf +import "sf/substreams/sink/sql/schema/v1/schema.proto"; +``` + +2. Add semantic type annotations to your fields: +```protobuf +message EthereumTransaction { + option (sf.substreams.sink.sql.schema.v1.table) = { name: "eth_transactions" }; + + string hash = 1 [(sf.substreams.sink.sql.schema.v1.field) = { + primary_key: true, + semantic_type: "hash" // Optimized hash storage + }]; + + string value = 2 [(sf.substreams.sink.sql.schema.v1.field) = { + semantic_type: "uint256", // Uses RisingWave's rw_int256 + format_hint: "decimal" + }]; + + string from_address = 3 [(sf.substreams.sink.sql.schema.v1.field) = { + semantic_type: "address" // Blockchain address format + }]; +} +``` + +## Supported Semantic Types + +### Blockchain/Crypto Types + +| Semantic Type | Description | RisingWave | PostgreSQL | ClickHouse | +|---------------|-------------|------------|------------|------------| +| `uint256` | 256-bit unsigned integer | `rw_uint256` | `NUMERIC(78,0)` | `String` | +| `int256` | 256-bit signed integer | `rw_int256` | `NUMERIC(78,0)` | `String` | +| `address` | Blockchain address (42 chars) | `CHARACTER VARYING` | `CHAR(42)` | `FixedString(42)` | +| `hash` | Cryptographic hash (66 chars) | `CHARACTER VARYING` | `CHAR(66)` | `FixedString(66)` | +| `signature` | Cryptographic signature | `CHARACTER VARYING` | `VARCHAR` | `String` | +| `pubkey` | Public key | `CHARACTER VARYING` | `VARCHAR` | `String` | + + +### Text/Binary Types + +| Semantic Type | Description | RisingWave | PostgreSQL | ClickHouse | +|---------------|-------------|------------|------------|------------| +| `hex` | Hexadecimal string | `CHARACTER VARYING` | `VARCHAR` | `String` | +| `base64` | Base64 encoded data | `CHARACTER VARYING` | `TEXT` | `String` | +| `json` | JSON structured data | `JSONB` | `JSONB` | `String` | +| `uuid` | UUID identifier | `CHARACTER VARYING` | `UUID` | `String` | + +### Time Types + +| Semantic Type | Description | RisingWave | PostgreSQL | ClickHouse | +|---------------|-------------|------------|------------|------------| +| `unix_timestamp` | Unix timestamp (seconds) | `TIMESTAMP WITH TIME ZONE` | `TIMESTAMP WITH TIME ZONE` | `DateTime` | +| `unix_timestamp_ms` | Unix timestamp (milliseconds) | `TIMESTAMP WITH TIME ZONE` | `TIMESTAMP WITH TIME ZONE` | `DateTime64(3)` | +| `block_timestamp` | Blockchain timestamp | `TIMESTAMP WITH TIME ZONE` | `TIMESTAMP WITH TIME ZONE` | `DateTime` | + +## Format Hints + +Format hints provide additional guidance for value conversion: + +| Format Hint | Description | Usage | +|-------------|-------------|-------| +| `hex` | Hexadecimal format | For `uint256`, `int256` fields containing hex strings | +| `decimal` | Decimal format | For numeric fields containing decimal strings | +| `base64` | Base64 format | For binary data encoded as base64 | +| `string` | String format | Default string handling | + +## Complete Example + +```protobuf +syntax = "proto3"; +import "sf/substreams/sink/sql/schema/v1/schema.proto"; + +message EthereumTransaction { + option (sf.substreams.sink.sql.schema.v1.table) = { name: "eth_transactions" }; + + // Hash fields - optimized storage + string tx_hash = 1 [(sf.substreams.sink.sql.schema.v1.field) = { + primary_key: true, + semantic_type: "hash" + }]; + + string block_hash = 2 [(sf.substreams.sink.sql.schema.v1.field) = { + semantic_type: "hash" + }]; + + // Large integers - uses rw_int256 in RisingWave + string value = 3 [(sf.substreams.sink.sql.schema.v1.field) = { + semantic_type: "uint256", + format_hint: "decimal" + }]; + + string gas_price = 4 [(sf.substreams.sink.sql.schema.v1.field) = { + semantic_type: "uint256", + format_hint: "hex" + }]; + + // Addresses - validated format + string from_address = 5 [(sf.substreams.sink.sql.schema.v1.field) = { + semantic_type: "address" + }]; + + string to_address = 6 [(sf.substreams.sink.sql.schema.v1.field) = { + semantic_type: "address" + }]; + + // Large token amounts - use uint256 for full precision + string token_amount = 7 [(sf.substreams.sink.sql.schema.v1.field) = { + semantic_type: "uint256", + format_hint: "decimal" + }]; + + // Timestamps + int64 block_timestamp = 9 [(sf.substreams.sink.sql.schema.v1.field) = { + semantic_type: "unix_timestamp" + }]; + + // JSON metadata + string metadata = 10 [(sf.substreams.sink.sql.schema.v1.field) = { + semantic_type: "json" + }]; + + // UUID tracking + string trace_id = 11 [(sf.substreams.sink.sql.schema.v1.field) = { + semantic_type: "uuid" + }]; +} +``` + +## Generated SQL Examples + +### RisingWave Output +```sql +CREATE TABLE eth_transactions ( + tx_hash CHARACTER VARYING, -- hash semantic type + block_hash CHARACTER VARYING, -- hash semantic type + value rw_uint256, -- uint256 → rw_uint256 (RisingWave-specific) + gas_price rw_uint256, -- uint256 → rw_uint256 + from_address CHARACTER VARYING, -- address semantic type + to_address CHARACTER VARYING, -- address semantic type + token_amount rw_uint256, -- uint256 semantic type + block_timestamp TIMESTAMP WITH TIME ZONE, -- unix_timestamp + metadata JSONB, -- json semantic type + trace_id CHARACTER VARYING -- uuid semantic type +); + +-- Sample insert with rw_int256 casting +INSERT INTO eth_transactions VALUES ( + '0x1234...abcd', + '0x5678...efab', + '115792089237316195423570985008687907853269984665640564039457584007913129639935'::rw_uint256, + '0x1bc16d674ec80000'::rw_uint256, + '0x742d35cc6636C0532925a3b8D0A3e5A5F2d5De8e', + '0x8ba1f109551bD432803012645Hac136c5ae5c9e6', + '1000123456789012345678'::rw_uint256, + '2024-01-01 00:00:00+00', + '{"type": "transfer"}'::jsonb, + '550e8400-e29b-41d4-a716-446655440000' +); +``` + +### PostgreSQL Output +```sql +CREATE TABLE eth_transactions ( + tx_hash CHAR(66), -- hash semantic type + block_hash CHAR(66), -- hash semantic type + value NUMERIC(78,0), -- uint256 → NUMERIC fallback + gas_price NUMERIC(78,0), -- uint256 → NUMERIC fallback + from_address CHAR(42), -- address semantic type + to_address CHAR(42), -- address semantic type + token_amount rw_uint256, -- uint256 semantic type + block_timestamp TIMESTAMP WITH TIME ZONE, -- unix_timestamp + metadata JSONB, -- json semantic type + trace_id UUID -- uuid → PostgreSQL UUID type +); +``` + +### ClickHouse Output +```sql +CREATE TABLE eth_transactions ( + tx_hash FixedString(66), -- hash semantic type + block_hash FixedString(66), -- hash semantic type + value String, -- uint256 → String (no native UInt256) + gas_price String, -- uint256 → String (no native UInt256) + from_address FixedString(42), -- address semantic type + to_address FixedString(42), -- address semantic type + token_amount String, -- uint256 semantic type + block_timestamp DateTime, -- unix_timestamp + metadata String, -- json → String fallback + trace_id String -- uuid → String fallback +) ENGINE = ReplacingMergeTree(version); +``` + +## Value Conversion Examples + +### RisingWave rw_uint256 and rw_int256 Conversion + +**Input Values:** +```protobuf +// In your protobuf data +value: "115792089237316195423570985008687907853269984665640564039457584007913129639935" +gas_price: "0x1bc16d674ec80000" +``` + +**Generated SQL:** +```sql +-- Decimal format for uint256 +INSERT INTO table VALUES ('115792089237316195423570985008687907853269984665640564039457584007913129639935'::rw_uint256); + +-- Hex format for uint256 +INSERT INTO table VALUES ('0x1bc16d674ec80000'::rw_uint256); + +-- Signed values use rw_int256 +INSERT INTO table VALUES ('-12345'::rw_int256); +``` + +### Address Validation + +**Valid Formats:** +```protobuf +from_address: "0x742d35cc6636C0532925a3b8D0A3e5A5F2d5De8e" // 42 chars with 0x +to_address: "742d35cc6636C0532925a3b8D0A3e5A5F2d5De8e" // 40 chars, 0x added automatically +``` + +**Generated SQL:** +```sql +INSERT INTO table VALUES ( + '0x742d35cc6636C0532925a3b8D0A3e5A5F2d5De8e', + '0x742d35cc6636C0532925a3b8D0A3e5A5F2d5De8e' +); +``` + +### Timestamp Conversion + +**Input Values:** +```protobuf +block_timestamp: 1640995200 // Unix timestamp in seconds +updated_at_ms: 1640995200000 // Unix timestamp in milliseconds +``` + +**Generated SQL:** +```sql +INSERT INTO table VALUES ( + '2022-01-01 00:00:00+00', -- Converted from unix timestamp + '2022-01-01 00:00:00+00' -- Converted from unix timestamp ms +); +``` + +## Migration from Existing Schemas + +### Step 1: Add Semantic Types Gradually +```protobuf +message Transaction { + // Existing field without semantic type + string hash = 1; + + // New field with semantic type + string block_hash = 2 [(sf.substreams.sink.sql.schema.v1.field) = { + semantic_type: "hash" + }]; + + // Existing large number field + string value = 3; + + // Updated with semantic type for better performance + string gas_price = 4 [(sf.substreams.sink.sql.schema.v1.field) = { + semantic_type: "uint256", + format_hint: "hex" + }]; +} +``` + +### Step 2: Update All Fields +```protobuf +message Transaction { + // All fields now use semantic types + string hash = 1 [(sf.substreams.sink.sql.schema.v1.field) = { + primary_key: true, + semantic_type: "hash" + }]; + + string block_hash = 2 [(sf.substreams.sink.sql.schema.v1.field) = { + semantic_type: "hash" + }]; + + string value = 3 [(sf.substreams.sink.sql.schema.v1.field) = { + semantic_type: "uint256", + format_hint: "decimal" + }]; + + string gas_price = 4 [(sf.substreams.sink.sql.schema.v1.field) = { + semantic_type: "uint256", + format_hint: "hex" + }]; +} +``` + +## Best Practices + +### 1. Choose Appropriate Semantic Types +- Use `uint256`/`int256` for large blockchain values that need arithmetic operations +- Use `address` for blockchain addresses to get validation and optimal storage +- Use `hash` for fixed-length hashes (transaction hashes, block hashes) +- Use `uint256` for large blockchain values that need full 256-bit precision + +### 2. Use Format Hints Consistently +- Add `format_hint: "hex"` for fields containing hexadecimal strings +- Add `format_hint: "decimal"` for fields containing decimal number strings +- Consistent format hints help with validation and conversion + +### 3. Leverage Database-Specific Features +- **RisingWave**: Use `uint256`/`int256` semantic types to leverage `rw_uint256`/`rw_int256` for efficient 256-bit arithmetic +- **PostgreSQL**: Semantic types map to optimized native types like `UUID`, `JSONB`, and `NUMERIC` with proper precision +- **ClickHouse**: Leverages native `UInt256`/`Int256`, `FixedString`, and `Decimal` types for optimal performance +- All dialects gracefully handle unsupported semantic types with appropriate fallbacks + +### 4. Plan for Multi-Dialect Deployment +- Test your schema generation across all target dialects +- Verify that fallback types meet your precision and performance requirements +- Consider dialect-specific optimizations for high-volume data + +## Troubleshooting + +### Common Issues + +1. **Semantic type not recognized** + - Ensure you've imported `sf/substreams/sink/sql/schema/v1/schema.proto` + - Check that the semantic type name matches exactly (case-sensitive) + +2. **Value conversion errors** + - Verify format hints match your data format (`hex` vs `decimal`) + - Check that address formats are valid (40 hex chars or 42 with 0x prefix) + - Ensure timestamp values are valid Unix timestamps + +3. **Type fallback unexpected** + - Check if the dialect supports the semantic type + - RisingWave supports more specialized types than PostgreSQL/ClickHouse + - Review the semantic type mapping table above + +### Debug Commands + +```bash +# Test schema generation +substreams-sink-sql from-proto --help + +# Validate protobuf syntax +buf lint proto/ + +# Test with specific dialect +substreams-sink-sql from-proto "postgresql://..." manifest.yaml +substreams-sink-sql from-proto "risingwave://..." manifest.yaml +substreams-sink-sql from-proto "clickhouse://..." manifest.yaml +``` + +## Performance Impact + +### RisingWave Benefits +- `rw_uint256` and `rw_int256` provide native 256-bit arithmetic operations +- Proper unsigned/signed type distinction for accurate mathematical operations +- Optimized storage for large integers compared to string fallbacks +- Better query performance for mathematical operations on blockchain data + +### PostgreSQL Benefits +- Native `UUID` type for efficient UUID operations and indexing +- `JSONB` for structured data queries with GIN indexing support +- Fixed-length `CHAR` types for blockchain addresses and hashes provide storage optimization +- Proper `NUMERIC` precision prevents overflow issues with large numbers + +### ClickHouse Benefits +- `FixedString` types provide optimal storage for fixed-length data like addresses and hashes +- Specialized `Decimal` types with configurable precision for financial calculations +- `String` type for large integers (256-bit values) with efficient columnar compression +- Columnar storage optimizations work best with proper type selection + +### Storage Optimization +- `address` and `hash` types use fixed-length storage where supported across all dialects +- Precision decimal types prevent unnecessary precision overhead +- JSON types enable efficient structured data queries (JSONB in PostgreSQL/RisingWave) + +### Query Performance +- Semantic types enable database-specific optimizations across all supported dialects +- Proper type selection improves index performance +- Reduced type conversion overhead in queries + +This semantic type system provides a powerful way to leverage database-specific features like RisingWave's `rw_int256` while maintaining broad compatibility across different SQL databases. \ No newline at end of file diff --git a/cmd/substreams-sink-sql/from_proto.go b/cmd/substreams-sink-sql/from_proto.go index accd3d1..dc18463 100644 --- a/cmd/substreams-sink-sql/from_proto.go +++ b/cmd/substreams-sink-sql/from_proto.go @@ -15,6 +15,7 @@ import ( protosql "github.com/streamingfast/substreams-sink-sql/db_proto/sql" clickhouse "github.com/streamingfast/substreams-sink-sql/db_proto/sql/click_house" "github.com/streamingfast/substreams-sink-sql/db_proto/sql/postgres" + "github.com/streamingfast/substreams-sink-sql/db_proto/sql/risingwave" schema2 "github.com/streamingfast/substreams-sink-sql/db_proto/sql/schema" stats2 "github.com/streamingfast/substreams-sink-sql/db_proto/stats" pbsql "github.com/streamingfast/substreams-sink-sql/pb/sf/substreams/sink/sql/services/v1" @@ -204,6 +205,12 @@ func fromProtoE(cmd *cobra.Command, args []string) error { return fmt.Errorf("creating postgres database: %w", err) } + case "risingwave": + database, err = risingwave.NewDatabase(schema, dsn, outputModuleName, rootMessageDescriptor, useProtoOption, useConstraints, zlog) + if err != nil { + return fmt.Errorf("creating risingwave database: %w", err) + } + case "clickhouse": database, err = clickhouse.NewDatabase( cmd.Context(), @@ -247,6 +254,9 @@ func fromProtoE(cmd *cobra.Command, args []string) error { } err = database.CommitTransaction() + if err != nil { + return fmt.Errorf("commit transaction: %w", err) + } } else { migrationNeeded := sinkInfo.SchemaHash != database.GetDialect().SchemaHash() @@ -317,4 +327,4 @@ func fromProtoE(cmd *cobra.Command, args []string) error { fmt.Println("Goodbye") return nil -} +} \ No newline at end of file diff --git a/db_changes/db/db.go b/db_changes/db/db.go index 294d34b..8cf6d32 100644 --- a/db_changes/db/db.go +++ b/db_changes/db/db.go @@ -61,12 +61,12 @@ func NewLoader( tracer logging.Tracer, ) (*Loader, error) { - sqlDB, err := sql.Open(dsn.Driver(), dsn.ConnString()) + sqlDB, err := sql.Open(dsn.SqlDriver(), dsn.ConnString()) if err != nil { return nil, fmt.Errorf("open db connection: %w", err) } - dialect, err := newDialect(sqlDB.Driver(), dsn.Schema(), cursorTableName, historyTableName, clickhouseCluster) + dialect, err := newDialect(dsn, sqlDB.Driver(), dsn.Schema(), cursorTableName, historyTableName, clickhouseCluster) if err != nil { return nil, fmt.Errorf("get dialect: %w", err) } @@ -113,15 +113,28 @@ func NewLoader( return l, nil } -func newDialect(driver driver.Driver, schemaName string, cursorTableName string, historyTableName string, clickHouseClusterName string) (Dialect, error) { +func newDialect(dsn *DSN, driver driver.Driver, schemaName string, cursorTableName string, historyTableName string, clickHouseClusterName string) (Dialect, error) { + // Use DSN driver name first, fallback to SQL driver type for unknown DSN drivers + dsnDriver := dsn.Driver() driverType := fmt.Sprintf("%T", driver) - switch driverType { - case "*pq.Driver": + + switch dsnDriver { + case "postgres": return NewPostgresDialect(schemaName, cursorTableName, historyTableName), nil - case "*clickhouse.stdDriver": + case "risingwave": + return NewRisingwaveDialect(schemaName, cursorTableName, historyTableName), nil + case "clickhouse": return NewClickhouseDialect(schemaName, cursorTableName, clickHouseClusterName), nil default: - return nil, fmt.Errorf("unsupported driver: %s", driverType) + // Fallback to driver type for unknown DSN drivers + switch driverType { + case "*pq.Driver": + return NewPostgresDialect(schemaName, cursorTableName, historyTableName), nil + case "*clickhouse.stdDriver": + return NewClickhouseDialect(schemaName, cursorTableName, clickHouseClusterName), nil + default: + return nil, fmt.Errorf("unsupported driver: %s (dsn: %s)", driverType, dsnDriver) + } } } @@ -140,7 +153,56 @@ func (l *Loader) BeginTx(ctx context.Context, opts *sql.TxOptions) (Tx, error) { if l.testTx != nil { return l.testTx, nil } - return l.DB.BeginTx(ctx, opts) + + // RisingWave-specific behavior: RisingWave does not support read-write transactions + // According to RisingWave docs: "The BEGIN command starts the read-write transaction mode, + // which is not supported yet in RisingWave. For compatibility reasons, this command will + // still succeed but no transaction is actually started." + // Therefore, we use autocommit mode for all operations. + if l.dsn.Driver() == "risingwave" { + l.logger.Debug("RisingWave: using autocommit mode instead of transactions") + return &RisingWaveAutocommitTx{ + conn: l.DB, + logger: l.logger, + }, nil + } + + tx, err := l.DB.BeginTx(ctx, opts) + if err != nil { + return nil, err + } + + return tx, nil +} + +// RisingWaveAutocommitTx implements the Tx interface for RisingWave's autocommit mode. +// RisingWave does not support read-write transactions, so all operations are executed +// directly against the database connection in autocommit mode. +type RisingWaveAutocommitTx struct { + conn *sql.DB + logger *zap.Logger +} + +func (tx *RisingWaveAutocommitTx) Rollback() error { + // RisingWave operates in autocommit mode. Rollback is not supported/needed + // since each statement is automatically committed. + tx.logger.Debug("RisingWave: rollback is no-op in autocommit mode") + return nil +} + +func (tx *RisingWaveAutocommitTx) Commit() error { + // RisingWave operates in autocommit mode. Commit is not needed + // since each statement is automatically committed. + tx.logger.Debug("RisingWave: commit is no-op in autocommit mode") + return nil +} + +func (tx *RisingWaveAutocommitTx) ExecContext(ctx context.Context, query string, args ...any) (sql.Result, error) { + return tx.conn.ExecContext(ctx, query, args...) +} + +func (tx *RisingWaveAutocommitTx) QueryContext(ctx context.Context, query string, args ...any) (*sql.Rows, error) { + return tx.conn.QueryContext(ctx, query, args...) } func (l *Loader) BatchBlockFlushInterval() int { diff --git a/db_changes/db/dialect_clickhouse.go b/db_changes/db/dialect_clickhouse.go index c6028fb..79173d1 100644 --- a/db_changes/db/dialect_clickhouse.go +++ b/db_changes/db/dialect_clickhouse.go @@ -67,10 +67,6 @@ func (d ClickhouseDialect) Flush(tx Tx, ctx context.Context, l *Loader, outputMo for entryPair := entries.Oldest(); entryPair != nil; entryPair = entryPair.Next() { entry := entryPair.Value - if err != nil { - return entryCount, fmt.Errorf("failed to get query: %w", err) - } - if l.tracer.Enabled() { l.logger.Debug("adding query from operation to transaction", zap.Stringer("op", entry), zap.String("query", query)) } diff --git a/db_changes/db/dialect_risingwave.go b/db_changes/db/dialect_risingwave.go new file mode 100644 index 0000000..a16fd29 --- /dev/null +++ b/db_changes/db/dialect_risingwave.go @@ -0,0 +1,521 @@ +package db + +import ( + "context" + "crypto/sha256" + "database/sql" + "encoding/json" + "fmt" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/streamingfast/cli" + sink "github.com/streamingfast/substreams-sink" + "go.uber.org/zap" + "golang.org/x/exp/maps" +) + +type RisingwaveDialect struct { + cursorTableName string + historyTableName string + schemaName string +} + +func NewRisingwaveDialect(schemaName string, cursorTableName string, historyTableName string) *RisingwaveDialect { + return &RisingwaveDialect{ + cursorTableName: cursorTableName, + historyTableName: historyTableName, + schemaName: schemaName, + } +} + +func (d RisingwaveDialect) Revert(tx Tx, ctx context.Context, l *Loader, lastValidFinalBlock uint64) error { + query := fmt.Sprintf(`SELECT op,table_name,pk,prev_value,block_num FROM %s WHERE "block_num" > %d ORDER BY "block_num" DESC`, + d.historyTable(d.schemaName), + lastValidFinalBlock, + ) + + rows, err := tx.QueryContext(ctx, query) + if err != nil { + return err + } + + var reversions []func() error + l.logger.Info("reverting forked block block(s)", zap.Uint64("last_valid_final_block", lastValidFinalBlock)) + if rows != nil { // rows will be nil with no error only in testing scenarios + defer rows.Close() + for rows.Next() { + var op string + var table_name string + var pk string + var prev_value_nullable sql.NullString + var block_num uint64 + if err := rows.Scan(&op, &table_name, &pk, &prev_value_nullable, &block_num); err != nil { + return fmt.Errorf("scanning row: %w", err) + } + l.logger.Debug("reverting", zap.String("operation", op), zap.String("table_name", table_name), zap.String("pk", pk), zap.Uint64("block_num", block_num)) + prev_value := prev_value_nullable.String + + // we can't call revertOp inside this loop, because it calls tx.ExecContext, + // which can't run while this query is "active" or it will silently discard the remaining rows! + reversions = append(reversions, func() error { + if err := d.revertOp(tx, ctx, op, table_name, pk, prev_value, block_num); err != nil { + return fmt.Errorf("revertOp: %w", err) + } + return nil + }) + } + if err := rows.Err(); err != nil { + return fmt.Errorf("iterating on rows from query %q: %w", query, err) + } + for _, reversion := range reversions { + if err := reversion(); err != nil { + return fmt.Errorf("execution revert operation: %w", err) + } + } + } + pruneHistory := fmt.Sprintf(`DELETE FROM %s WHERE "block_num" > %d;`, + d.historyTable(d.schemaName), + lastValidFinalBlock, + ) + + _, err = tx.ExecContext(ctx, pruneHistory) + if err != nil { + return fmt.Errorf("executing pruneHistory: %w", err) + } + return nil +} + +func (d RisingwaveDialect) Flush(tx Tx, ctx context.Context, l *Loader, outputModuleHash string, lastFinalBlock uint64) (int, error) { + var rowCount int + for entriesPair := l.entries.Oldest(); entriesPair != nil; entriesPair = entriesPair.Next() { + tableName := entriesPair.Key + entries := entriesPair.Value + + if l.tracer.Enabled() { + l.logger.Debug("flushing table rows", zap.String("table_name", tableName), zap.Int("row_count", entries.Len())) + } + for entryPair := entries.Oldest(); entryPair != nil; entryPair = entryPair.Next() { + entry := entryPair.Value + + query, err := d.prepareStatement(d.schemaName, entry) + if err != nil { + return 0, fmt.Errorf("failed to prepare statement: %w", err) + } + + if l.tracer.Enabled() { + l.logger.Debug("adding query from operation to transaction", zap.Stringer("op", entry), zap.String("query", query)) + } + + if _, err := tx.ExecContext(ctx, query); err != nil { + return 0, fmt.Errorf("executing query %q: %w", query, err) + } + } + rowCount += entries.Len() + } + + if err := d.pruneReversibleSegment(tx, ctx, d.schemaName, lastFinalBlock); err != nil { + return 0, err + } + + return rowCount, nil +} + +func (d RisingwaveDialect) revertOp(tx Tx, ctx context.Context, op, escaped_table_name, pk, prev_value string, block_num uint64) error { + + pkmap := make(map[string]string) + if err := json.Unmarshal([]byte(pk), &pkmap); err != nil { + return fmt.Errorf("revertOp: unmarshalling %q: %w", pk, err) + } + switch op { + case "I": + query := fmt.Sprintf(`DELETE FROM %s WHERE %s;`, + escaped_table_name, + getPrimaryKeyWhereClause(pkmap, ""), + ) + if _, err := tx.ExecContext(ctx, query); err != nil { + return fmt.Errorf("executing revert query %q: %w", query, err) + } + case "D": + query := fmt.Sprintf(`INSERT INTO %s SELECT * FROM jsonb_populate_record(null::%s,%s);`, + escaped_table_name, + escaped_table_name, + escapeStringValue(prev_value), + ) + if _, err := tx.ExecContext(ctx, query); err != nil { + return fmt.Errorf("executing revert query %q: %w", query, err) + } + + case "U": + columns, err := sqlColumnNamesFromJSON(prev_value) + if err != nil { + return err + } + + query := fmt.Sprintf(`UPDATE %s SET(%s)=((SELECT %s FROM jsonb_populate_record(null::%s,%s))) WHERE %s;`, + escaped_table_name, + columns, + columns, + escaped_table_name, + escapeStringValue(prev_value), + getPrimaryKeyWhereClause(pkmap, ""), + ) + if _, err := tx.ExecContext(ctx, query); err != nil { + return fmt.Errorf("executing revert query %q: %w", query, err) + } + default: + panic("invalid op in revert command") + } + return nil +} + +func (d RisingwaveDialect) pruneReversibleSegment(tx Tx, ctx context.Context, schema string, highestFinalBlock uint64) error { + query := fmt.Sprintf(`DELETE FROM %s WHERE block_num <= %d;`, d.historyTable(schema), highestFinalBlock) + if _, err := tx.ExecContext(ctx, query); err != nil { + return fmt.Errorf("executing prune query %q: %w", query, err) + } + return nil +} + +func (d RisingwaveDialect) GetCreateCursorQuery(schema string, withPostgraphile bool) string { + out := fmt.Sprintf(cli.Dedent(` + create table if not exists %s.%s + ( + id varchar not null constraint %s primary key, + cursor varchar, + block_num bigint, + block_id varchar + ); + `), EscapeIdentifier(schema), EscapeIdentifier(d.cursorTableName), EscapeIdentifier(d.cursorTableName+"_pk")) + if withPostgraphile { + out += fmt.Sprintf("COMMENT ON TABLE %s.%s IS E'@omit';", + EscapeIdentifier(schema), EscapeIdentifier(d.cursorTableName)) + } + return out +} + +func (d RisingwaveDialect) GetCreateHistoryQuery(schema string, withPostgraphile bool) string { + out := fmt.Sprintf(cli.Dedent(` + create table if not exists %s + ( + id BIGINT NOT NULL, + op CHARACTER VARYING, + table_name varchar, + pk varchar, + prev_value varchar, + block_num bigint, + PRIMARY KEY (id) + ); + `), + d.historyTable(schema), + ) + if withPostgraphile { + out += fmt.Sprintf("COMMENT ON TABLE %s.%s IS E'@omit';", + EscapeIdentifier(schema), EscapeIdentifier(d.historyTableName)) + } + return out +} + +func (d RisingwaveDialect) ExecuteSetupScript(ctx context.Context, l *Loader, schemaSql string) error { + if _, err := l.ExecContext(ctx, schemaSql); err != nil { + return fmt.Errorf("exec schemaName: %w", err) + } + return nil +} + +func (d RisingwaveDialect) GetUpdateCursorQuery(table, moduleHash string, cursor *sink.Cursor, block_num uint64, block_id string) string { + return query(` + UPDATE %s set "cursor" = '%s', block_num = %d, block_id = '%s' WHERE id = '%s'; + `, table, cursor, block_num, block_id, moduleHash) +} + +func (d RisingwaveDialect) GetAllCursorsQuery(table string) string { + return fmt.Sprintf("SELECT id, cursor, block_num, block_id FROM %s", table) +} + +func (d RisingwaveDialect) ParseDatetimeNormalization(value string) string { + return escapeStringValue(value) +} + +func (d RisingwaveDialect) DriverSupportRowsAffected() bool { + return true +} + +func (d RisingwaveDialect) OnlyInserts() bool { + return false +} + +func (d RisingwaveDialect) AllowPkDuplicates() bool { + return false +} + +func (d RisingwaveDialect) CreateUser(tx Tx, ctx context.Context, l *Loader, username string, password string, database string, readOnly bool) error { + user, pass, db := EscapeIdentifier(username), password, EscapeIdentifier(database) + var q string + if readOnly { + q = fmt.Sprintf(` + CREATE ROLE %s LOGIN PASSWORD '%s'; + GRANT CONNECT ON DATABASE %s TO %s; + GRANT USAGE ON SCHEMA public TO %s; + ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON TABLES TO %s; + GRANT SELECT ON ALL TABLES IN SCHEMA public TO %s; + `, user, pass, db, user, user, user, user) + } else { + q = fmt.Sprintf("CREATE USER %s WITH PASSWORD '%s'; GRANT ALL PRIVILEGES ON DATABASE %s TO %s;", user, pass, db, user) + } + + _, err := tx.ExecContext(ctx, q) + if err != nil { + return fmt.Errorf("executing query %q: %w", q, err) + } + + return nil +} + +func (d RisingwaveDialect) historyTable(schema string) string { + return fmt.Sprintf("%s.%s", EscapeIdentifier(schema), EscapeIdentifier(d.historyTableName)) +} + +// generateHistoryID creates a unique ID for history table entries +// using block number and a hash of the operation details +func (d RisingwaveDialect) generateHistoryID(blockNum uint64, op, pk string) int64 { + // Create a hash from the operation details to ensure uniqueness within a block + hasher := sha256.New() + hasher.Write([]byte(fmt.Sprintf("%s:%s:%d", op, pk, time.Now().UnixNano()))) + hash := hasher.Sum(nil) + + // Use first 4 bytes of hash as a 32-bit number + hashInt := int64(hash[0])<<24 | int64(hash[1])<<16 | int64(hash[2])<<8 | int64(hash[3]) + + // Combine block number (shifted left) with hash to ensure global uniqueness + // Block number in high bits, hash in low bits + return (int64(blockNum) << 32) | (hashInt & 0xFFFFFFFF) +} + +func (d RisingwaveDialect) saveInsert(schema string, table string, primaryKey map[string]string, blockNum uint64) string { + // Generate unique ID using block number and hash of primary key + id := d.generateHistoryID(blockNum, "I", primaryKeyToJSON(primaryKey)) + return fmt.Sprintf(`INSERT INTO %s (id,op,table_name,pk,block_num) values (%d,%s,%s,%s,%d);`, + d.historyTable(schema), + id, + escapeStringValue("I"), + escapeStringValue(table), + escapeStringValue(primaryKeyToJSON(primaryKey)), + blockNum, + ) +} + +/* +with t as (select 'default' id) +select CASE WHEN block_meta.id is null THEN 'I' ELSE 'U' END AS op, '"public"."block_meta"', 'allo', row_to_json(block_meta),10 from t left join block_meta on block_meta.id='default'; +*/ +func (d RisingwaveDialect) saveUpsert(schema string, escapedTableName string, primaryKey map[string]string, blockNum uint64) string { + schemaAndTable := fmt.Sprintf("%s.%s", EscapeIdentifier(schema), escapedTableName) + // Generate unique ID for this upsert operation + id := d.generateHistoryID(blockNum, "U", primaryKeyToJSON(primaryKey)) + + return fmt.Sprintf(` + WITH t as (select %s) + INSERT INTO %s (id,op,table_name,pk,prev_value,block_num) + SELECT %d, CASE WHEN %s THEN 'I' ELSE 'U' END AS op, %s, %s, to_jsonb(%s),%d from t left join %s.%s on %s;`, + + getPrimaryKeyFakeEmptyValues(primaryKey), + d.historyTable(schema), + id, + + getPrimaryKeyFakeEmptyValuesAssertion(primaryKey, escapedTableName), + + escapeStringValue(schemaAndTable), escapeStringValue(primaryKeyToJSON(primaryKey)), escapedTableName, blockNum, + EscapeIdentifier(schema), escapedTableName, + getPrimaryKeyWhereClause(primaryKey, escapedTableName), + ) + +} + +func (d RisingwaveDialect) saveUpdate(schema string, escapedTableName string, primaryKey map[string]string, blockNum uint64) string { + return d.saveRow("U", schema, escapedTableName, primaryKey, blockNum) +} + +func (d RisingwaveDialect) saveDelete(schema string, escapedTableName string, primaryKey map[string]string, blockNum uint64) string { + return d.saveRow("D", schema, escapedTableName, primaryKey, blockNum) +} + +func (d RisingwaveDialect) saveRow(op, schema, escapedTableName string, primaryKey map[string]string, blockNum uint64) string { + schemaAndTable := fmt.Sprintf("%s.%s", EscapeIdentifier(schema), escapedTableName) + // Generate unique ID for this operation + id := d.generateHistoryID(blockNum, op, primaryKeyToJSON(primaryKey)) + return fmt.Sprintf(`INSERT INTO %s (id,op,table_name,pk,prev_value,block_num) SELECT %d,%s,%s,%s,to_jsonb(%s),%d FROM %s.%s WHERE %s;`, + d.historyTable(schema), + id, + escapeStringValue(op), escapeStringValue(schemaAndTable), escapeStringValue(primaryKeyToJSON(primaryKey)), escapedTableName, blockNum, + EscapeIdentifier(schema), escapedTableName, + getPrimaryKeyWhereClause(primaryKey, ""), + ) + +} + +func (d *RisingwaveDialect) prepareStatement(schema string, o *Operation) (string, error) { + var columns, values []string + if o.opType == OperationTypeInsert || o.opType == OperationTypeUpsert || o.opType == OperationTypeUpdate { + var err error + columns, values, err = d.prepareColValues(o.table, o.data) + if err != nil { + return "", fmt.Errorf("preparing column & values: %w", err) + } + } + + if o.opType == OperationTypeUpsert || o.opType == OperationTypeUpdate || o.opType == OperationTypeDelete { + // A table without a primary key set yield a `primaryKey` map with a single entry where the key is an empty string + if _, found := o.primaryKey[""]; found { + return "", fmt.Errorf("trying to perform %s operation but table %q don't have a primary key set, this is not accepted", o.opType, o.table.name) + } + } + + switch o.opType { + case OperationTypeInsert: + insertQuery := fmt.Sprintf("INSERT INTO %s (%s) VALUES (%s);", + o.table.identifier, + strings.Join(columns, ","), + strings.Join(values, ","), + ) + if o.reversibleBlockNum != nil { + return d.saveInsert(schema, o.table.identifier, o.primaryKey, *o.reversibleBlockNum) + insertQuery, nil + } + return insertQuery, nil + + case OperationTypeUpsert: + // RisingWave doesn't support PostgreSQL's ON CONFLICT syntax in INSERT statements + // However, since we control table creation, we ensure tables are created with ON CONFLICT OVERWRITE + // This means a simple INSERT will automatically handle conflicts by overwriting + insertQuery := fmt.Sprintf("INSERT INTO %s (%s) VALUES (%s);", + o.table.identifier, + strings.Join(columns, ","), + strings.Join(values, ","), + ) + + if o.reversibleBlockNum != nil { + return d.saveUpsert(schema, o.table.nameEscaped, o.primaryKey, *o.reversibleBlockNum) + insertQuery, nil + } + return insertQuery, nil + + case OperationTypeUpdate: + updates := make([]string, len(columns)) + for i := 0; i < len(columns); i++ { + updates[i] = fmt.Sprintf("%s=%s", columns[i], values[i]) + } + + primaryKeySelector := getPrimaryKeyWhereClause(o.primaryKey, "") + + updateQuery := fmt.Sprintf("UPDATE %s SET %s WHERE %s", + o.table.identifier, + strings.Join(updates, ", "), + primaryKeySelector, + ) + + if o.reversibleBlockNum != nil { + return d.saveUpdate(schema, o.table.nameEscaped, o.primaryKey, *o.reversibleBlockNum) + updateQuery, nil + } + return updateQuery, nil + + case OperationTypeDelete: + primaryKeyWhereClause := getPrimaryKeyWhereClause(o.primaryKey, "") + deleteQuery := fmt.Sprintf("DELETE FROM %s WHERE %s", + o.table.identifier, + primaryKeyWhereClause, + ) + if o.reversibleBlockNum != nil { + return d.saveDelete(schema, o.table.nameEscaped, o.primaryKey, *o.reversibleBlockNum) + deleteQuery, nil + } + return deleteQuery, nil + + default: + panic(fmt.Errorf("unknown operation type %q", o.opType)) + } +} + +func (d *RisingwaveDialect) prepareColValues(table *TableInfo, colValues map[string]string) (columns []string, values []string, err error) { + if len(colValues) == 0 { + return + } + + columns = make([]string, len(colValues)) + values = make([]string, len(colValues)) + + i := 0 + for colName := range colValues { + columns[i] = colName + i++ + } + sort.Strings(columns) // sorted for determinism in tests + + for i, columnName := range columns { + value := colValues[columnName] + columnInfo, found := table.columnsByName[columnName] + if !found { + return nil, nil, fmt.Errorf("cannot find column %q for table %q (valid columns are %q)", columnName, table.identifier, strings.Join(maps.Keys(table.columnsByName), ", ")) + } + + normalizedValue, err := d.normalizeValueType(value, columnInfo.scanType) + if err != nil { + return nil, nil, fmt.Errorf("getting sql value from table %s for column %q raw value %q: %w", table.identifier, columnName, value, err) + } + + values[i] = normalizedValue + columns[i] = columnInfo.escapedName // escape the column name + } + return +} + +// Format based on type, value returned unescaped +func (d *RisingwaveDialect) normalizeValueType(value string, valueType reflect.Type) (string, error) { + switch valueType.Kind() { + case reflect.String: + // replace unicode null character with empty string + value = strings.ReplaceAll(value, "\u0000", "") + return escapeStringValue(value), nil + + // BYTES in Postgres must be escaped, we receive a Vec from substreams + case reflect.Slice: + return escapeStringValue(value), nil + + case reflect.Bool: + return fmt.Sprintf("'%s'", value), nil + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return value, nil + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return value, nil + + case reflect.Float32, reflect.Float64: + return value, nil + + case reflect.Struct: + if valueType == reflectTypeTime { + if integerRegex.MatchString(value) { + i, err := strconv.Atoi(value) + if err != nil { + return "", fmt.Errorf("could not convert %s to int: %w", value, err) + } + + return escapeStringValue(time.Unix(int64(i), 0).Format(time.RFC3339)), nil + } + + // It's a plain string, parse by dialect it and pass it to the databaseName + return d.ParseDatetimeNormalization(value), nil + } + + return "", fmt.Errorf("unsupported struct type %s", valueType) + default: + // It's a column's type the schemaName parsing don't know how to represents as + // a Go type. In that case, we pass it unmodified to the databaseName engine. It + // will be the responsibility of the one sending the data to correctly represent + // it in the way accepted by the databaseName. + // + // In most cases, it going to just work. + return value, nil + } +} diff --git a/db_changes/db/dialect_risingwave_test.go b/db_changes/db/dialect_risingwave_test.go new file mode 100644 index 0000000..9fd5ad8 --- /dev/null +++ b/db_changes/db/dialect_risingwave_test.go @@ -0,0 +1,202 @@ +package db + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestRisingwavePrimaryKeyToJSON(t *testing.T) { + tests := []struct { + name string + keys map[string]string + expect string + }{ + { + name: "single key", + keys: map[string]string{ + "id": "0xdeadbeef", + }, + expect: `{"id":"0xdeadbeef"}`, + }, + { + name: "two keys", + keys: map[string]string{ + "hash": "0xdeadbeef", + "idx": "5", + }, + expect: `{"hash":"0xdeadbeef","idx":"5"}`, + }, + { + name: "determinism", + keys: map[string]string{ + "bbb": "1", + "ccc": "2", + "aaa": "3", + "ddd": "4", + }, + expect: `{"aaa":"3","bbb":"1","ccc":"2","ddd":"4"}`, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + jsonKey := primaryKeyToJSON(test.keys) + assert.Equal(t, test.expect, jsonKey) + }) + } +} + +func TestRisingwaveJSONToPrimaryKey(t *testing.T) { + tests := []struct { + name string + in string + expect map[string]string + }{ + { + name: "single key", + in: `{"id":"0xdeadbeef"}`, + expect: map[string]string{ + "id": "0xdeadbeef", + }, + }, + { + name: "two keys", + in: `{"hash":"0xdeadbeef","idx":"5"}`, + expect: map[string]string{ + "hash": "0xdeadbeef", + "idx": "5", + }, + }, + { + name: "determinism", + in: `{"aaa":"3","bbb":"1","ccc":"2","ddd":"4"}`, + expect: map[string]string{ + "bbb": "1", + "ccc": "2", + "aaa": "3", + "ddd": "4", + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + out, err := jsonToPrimaryKey(test.in) + require.NoError(t, err) + assert.Equal(t, test.expect, out) + }) + } +} + +func TestRisingwaveGetPrimaryKeyFakeEmptyValues(t *testing.T) { + tests := []struct { + name string + primaryKey map[string]string + expected string + }{ + { + name: "single key", + primaryKey: map[string]string{ + "id": "value-not-used", + }, + expected: `'' "id"`, + }, + { + name: "multiple keys", + primaryKey: map[string]string{ + "id": "value-not-used", + "block": "value-not-used", + "idx": "value-not-used", + }, + expected: `'' "block",'' "id",'' "idx"`, + }, + { + name: "keys with special characters", + primaryKey: map[string]string{ + "user_id": "value-not-used", + "order-num": "value-not-used", + }, + expected: `'' "order-num",'' "user_id"`, + }, + { + name: "empty map", + primaryKey: map[string]string{}, + expected: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := getPrimaryKeyFakeEmptyValues(tt.primaryKey) + assert.Equal(t, tt.expected, result) + + // For multiple keys, verify the order is predictable (alphabetical) + if len(tt.primaryKey) > 1 { + parts := strings.Split(result, ",") + for i := 1; i < len(parts); i++ { + assert.True(t, strings.Compare(parts[i-1], parts[i]) <= 0, + "Expected sorted keys, but got %s before %s", parts[i-1], parts[i]) + } + } + }) + } +} + +func TestRisingwaveGetPrimaryKeyFakeEmptyValuesAssertion(t *testing.T) { + tests := []struct { + name string + primaryKey map[string]string + escapedTableName string + expected string + }{ + { + name: "single key", + primaryKey: map[string]string{ + "id": "value-not-used", + }, + escapedTableName: `"users"`, + expected: `"users"."id" IS NULL`, + }, + { + name: "multiple keys", + primaryKey: map[string]string{ + "id": "value-not-used", + "block": "value-not-used", + "idx": "value-not-used", + }, + escapedTableName: `"transactions"`, + expected: `"transactions"."block" IS NULL AND "transactions"."id" IS NULL AND "transactions"."idx" IS NULL`, + }, + { + name: "schema qualified table", + primaryKey: map[string]string{ + "user_id": "value-not-used", + }, + escapedTableName: `"public"."users"`, + expected: `"public"."users"."user_id" IS NULL`, + }, + { + name: "empty map", + primaryKey: map[string]string{}, + escapedTableName: `"table"`, + expected: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := getPrimaryKeyFakeEmptyValuesAssertion(tt.primaryKey, tt.escapedTableName) + assert.Equal(t, tt.expected, result) + + // For multiple keys, verify the order is predictable (alphabetical) + if len(tt.primaryKey) > 1 { + parts := strings.Split(result, "AND ") + for i := 1; i < len(parts); i++ { + assert.True(t, strings.Compare(parts[i-1], parts[i]) <= 0, + "Expected sorted parts, but got %s before %s", parts[i-1], parts[i]) + } + } + }) + } +} diff --git a/db_changes/db/dsn.go b/db_changes/db/dsn.go index 2e51dde..4be9d86 100644 --- a/db_changes/db/dsn.go +++ b/db_changes/db/dsn.go @@ -28,6 +28,7 @@ var driverMap = map[string]string{ "psql": "postgres", "postgres": "postgres", "clickhouse": "clickhouse", + "risingwave": "risingwave", } func ParseDSN(dsn string) (*DSN, error) { @@ -103,6 +104,15 @@ func (c *DSN) Driver() string { return c.driver } +// SqlDriver returns the SQL driver name that should be used with sql.Open() +// For RisingWave, this returns "postgres" since RisingWave uses the PostgreSQL wire protocol +func (c *DSN) SqlDriver() string { + if c.driver == "risingwave" { + return "postgres" + } + return c.driver +} + func (c *DSN) ConnString() string { if c.driver == "clickhouse" { for _, option := range c.Options { diff --git a/db_changes/db/dsn_test.go b/db_changes/db/dsn_test.go index 7b8f90f..c4c5b68 100644 --- a/db_changes/db/dsn_test.go +++ b/db_changes/db/dsn_test.go @@ -44,6 +44,13 @@ func TestParseDSN(t *testing.T) { expectSchema: "default", expectPassword: "", }, + { + name: "risingwave DSN", + dns: "risingwave://root@risingwave:4566/dev?sslmode=disable", + expectConnString: "host=risingwave port=4566 dbname=dev sslmode=disable user=root", + expectSchema: "public", + expectPassword: "", + }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { @@ -60,3 +67,40 @@ func TestParseDSN(t *testing.T) { } } + +func TestDSN_SqlDriver(t *testing.T) { + tests := []struct { + name string + dsn string + expectedDriver string + expectedSqlDriver string + }{ + { + name: "postgres DSN", + dsn: "postgres://user:pass@localhost/db", + expectedDriver: "postgres", + expectedSqlDriver: "postgres", + }, + { + name: "risingwave DSN", + dsn: "risingwave://root@risingwave:4566/dev", + expectedDriver: "risingwave", + expectedSqlDriver: "postgres", + }, + { + name: "clickhouse DSN", + dsn: "clickhouse://default@localhost:9000/default", + expectedDriver: "clickhouse", + expectedSqlDriver: "clickhouse", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + d, err := ParseDSN(test.dsn) + require.NoError(t, err) + assert.Equal(t, test.expectedDriver, d.Driver()) + assert.Equal(t, test.expectedSqlDriver, d.SqlDriver()) + }) + } +} diff --git a/db_changes/db/flush.go b/db_changes/db/flush.go index af01dc7..c1e96ad 100644 --- a/db_changes/db/flush.go +++ b/db_changes/db/flush.go @@ -14,10 +14,12 @@ func (l *Loader) Flush(ctx context.Context, outputModuleHash string, cursor *sin ctx = clickhouse.Context(context.Background(), clickhouse.WithStdAsync(false)) startAt := time.Now() + tx, err := l.BeginTx(ctx, nil) if err != nil { return 0, fmt.Errorf("failed to being db transaction: %w", err) } + defer func() { if err != nil { if err := tx.Rollback(); err != nil { @@ -39,6 +41,7 @@ func (l *Loader) Flush(ctx context.Context, outputModuleHash string, cursor *sin if err := tx.Commit(); err != nil { return 0, fmt.Errorf("failed to commit db transaction: %w", err) } + l.reset() // We add + 1 to the table count because the `cursors` table is an implicit table diff --git a/db_changes/sinker/sinker.go b/db_changes/sinker/sinker.go index f31867b..01199a7 100644 --- a/db_changes/sinker/sinker.go +++ b/db_changes/sinker/sinker.go @@ -228,7 +228,6 @@ func (s *SQLSinker) applyDatabaseChanges(dbChanges *pbdatabase.DatabaseChanges, } func (s *SQLSinker) HandleBlockRangeCompletion(ctx context.Context, cursor *sink.Cursor) error { - s.logger.Info("stream completed, flushing to database", zap.Stringer("block", cursor.Block())) _, err := s.loader.Flush(ctx, s.OutputModuleHash(), cursor, cursor.Block().Num()) if err != nil { diff --git a/db_proto/sinker.go b/db_proto/sinker.go index d1e26ff..1070519 100644 --- a/db_proto/sinker.go +++ b/db_proto/sinker.go @@ -144,7 +144,6 @@ func (s *Sinker) HandleBlockScopedData(ctx context.Context, data *pbsubstreamsrp err = s.processHolder(h, s.stats) if err != nil { if s.useTransaction { - s.logger.Error("rolling back transaction", zap.Error(err)) s.db.RollbackTransaction() } return fmt.Errorf("process holder: %w", err) diff --git a/db_proto/sql/click_house/accumulator_inserter.go b/db_proto/sql/click_house/accumulator_inserter.go index f113493..c41b11a 100644 --- a/db_proto/sql/click_house/accumulator_inserter.go +++ b/db_proto/sql/click_house/accumulator_inserter.go @@ -8,7 +8,9 @@ import ( "github.com/ClickHouse/ch-go" "github.com/ClickHouse/ch-go/proto" + sql2 "github.com/streamingfast/substreams-sink-sql/db_proto/sql" "go.uber.org/zap" + "google.golang.org/protobuf/types/known/timestamppb" ) type accumulator struct { @@ -38,6 +40,10 @@ func NewAccumulatorInserter(database *Database, logger *zap.Logger) (*Accumulato } func createAccumulators(dialect *DialectClickHouse) (map[string]*accumulator, error) { + if dialect == nil { + panic("dialect is nil") + } + accumulators := map[string]*accumulator{} accumulators["_blocks_"] = &accumulator{ @@ -64,14 +70,14 @@ func createAccumulators(dialect *DialectClickHouse) (map[string]*accumulator, er input := map[string]proto.ColInput{} columns := map[int]string{} - input["block_number"] = &proto.ColUInt64{} - columns[0] = "block_number" - input["block_timestamp"] = &proto.ColDateTime{} - columns[1] = "block_timestamp" - input["version"] = &proto.ColInt64{} - columns[2] = "version" - input["deleted"] = &proto.ColBool{} - columns[3] = "deleted" + input[sql2.DialectFieldBlockNumber] = &proto.ColUInt64{} + columns[0] = sql2.DialectFieldBlockNumber + input[sql2.DialectFieldBlockTimestamp] = &proto.ColDateTime{} + columns[1] = sql2.DialectFieldBlockTimestamp + input[sql2.DialectFieldVersion] = &proto.ColInt64{} + columns[2] = sql2.DialectFieldVersion + input[sql2.DialectFieldDeleted] = &proto.ColBool{} + columns[3] = sql2.DialectFieldDeleted primaryName := "" if table.PrimaryKey != nil { @@ -140,7 +146,13 @@ func (i *AccumulatorInserter) insert(table string, values []any) error { switch input := input.(type) { case *proto.ColDateTime: - input.Append(value.(time.Time)) + if t, ok := value.(*timestamppb.Timestamp); ok { + input.Append(t.AsTime()) + } else if t, ok := value.(time.Time); ok { + input.Append(t) + } else { + panic(fmt.Sprintf("unknown time base input type %T for column %s of table %s", input, colName, table)) + } case *proto.ColInt32: input.Append(value.(int32)) case *proto.ColInt64: diff --git a/db_proto/sql/click_house/database.go b/db_proto/sql/click_house/database.go index 86fc209..bd92419 100644 --- a/db_proto/sql/click_house/database.go +++ b/db_proto/sql/click_house/database.go @@ -50,6 +50,10 @@ func NewDatabase( return nil, fmt.Errorf("creating base database: %w", err) } dialect, err := NewDialectClickHouse(schema, logger) + if err != nil { + return nil, fmt.Errorf("creating dialect: %w", err) + } + database := &Database{ ctx: ctx, dsn: dsn, @@ -84,7 +88,7 @@ func newClient(dsn *db.DSN) (*ch.Client, error) { for _, option := range dsn.Options { parts := strings.Split(option, "=") - if parts[0] == "secure" { + if parts[0] == "secure" && parts[1] == "true" { chOption.TLS = &tls.Config{} continue } @@ -161,7 +165,7 @@ func (d *Database) CreateDatabase(useConstraints bool) error { if err := client.Do(d.ctx, ch.Query{ Body: statement, }); err != nil { - return fmt.Errorf("executing create table sql: %w", err) + return fmt.Errorf("executing create table sql: %w %q", err, statement) } d.logger.Info("table created", zap.String("table_name", statement), zap.String("schema_name", d.schema.Name)) } @@ -370,9 +374,9 @@ func (d *Database) HandleBlocksUndo(lastValidBlockNum uint64) error { } query := fmt.Sprintf(` INSERT INTO %s - SELECT block_number, block_timestamp, %d, true %s - FROM %s WHERE block_number > %d - `, tableFullName, version, fields, tableFullName, lastValidBlockNum) + SELECT %s, %s, %d, true %s + FROM %s WHERE %s > %d + `, tableFullName, sql.DialectFieldBlockNumber, sql.DialectFieldBlockTimestamp, version, fields, tableFullName, sql.DialectFieldBlockNumber, lastValidBlockNum) err := client.Do(d.ctx, ch.Query{ Body: query, diff --git a/db_proto/sql/click_house/dialect.go b/db_proto/sql/click_house/dialect.go index c13ff50..eae15d7 100644 --- a/db_proto/sql/click_house/dialect.go +++ b/db_proto/sql/click_house/dialect.go @@ -9,6 +9,7 @@ import ( sql2 "github.com/streamingfast/substreams-sink-sql/db_proto/sql" "github.com/streamingfast/substreams-sink-sql/db_proto/sql/schema" + pbSchmema "github.com/streamingfast/substreams-sink-sql/pb/sf/substreams/sink/sql/schema/v1" "go.uber.org/zap" ) @@ -22,7 +23,7 @@ const staticSqlCreateBlock = ` timestamp timestamp, version Int64, deleted bool - + ) ENGINE = ReplacingMergeTree(version) PARTITION BY (toYYYYMM(timestamp)) @@ -75,10 +76,10 @@ func (d *DialectClickHouse) createTable(table *schema.Table) error { sb.WriteString(fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s (", tableName)) - sb.WriteString(" block_number Int64 NOT NULL,") - sb.WriteString(" block_timestamp timestamp NOT NULL,") - sb.WriteString(" version Int64 NOT NULL,") - sb.WriteString(" deleted bool NOT NULL,") + sb.WriteString(fmt.Sprintf(" %s Int64 NOT NULL,", sql2.DialectFieldBlockNumber)) + sb.WriteString(fmt.Sprintf(" %s timestamp NOT NULL,", sql2.DialectFieldBlockTimestamp)) + sb.WriteString(fmt.Sprintf(" %s Int64 NOT NULL,", sql2.DialectFieldVersion)) + sb.WriteString(fmt.Sprintf(" %s bool NOT NULL,", sql2.DialectFieldDeleted)) var primaryKeyFieldName string if table.PrimaryKey != nil { @@ -119,7 +120,7 @@ func (d *DialectClickHouse) createTable(table *schema.Table) error { case f.IsMessage: case f.ForeignKey != nil: } - //fmt.Printf("Table %s, field %s\n", table.Name, fieldName) + fieldType := MapFieldType(f.FieldDescriptor) sb.WriteString(fmt.Sprintf("%s %s", fieldName, fieldType)) sb.WriteString(",") @@ -131,37 +132,33 @@ func (d *DialectClickHouse) createTable(table *schema.Table) error { sb = strings.Builder{} sb.WriteString(temp) - orderByFields := make([]string, 0) - if primaryKeyFieldName != "" { - orderByFields = append(orderByFields, primaryKeyFieldName) + replacingMergeTree, err := replacingMergeTreeString(table) + if err != nil { + return fmt.Errorf("getting 'replacing merge tree' string: %w", err) } - //this is tricky. handling one to one relation - if primaryKeyFieldName == "" && table.ChildOf != nil { - parentTable, parentFound := d.TableRegistry[table.ChildOf.ParentTable] - if !parentFound { - return fmt.Errorf("parent table %q not found", table.ChildOf.ParentTable) - } + primaryKey := "" + if primaryKeyFieldName != "" { + primaryKey = fmt.Sprintf("PRIMARY KEY (%s)", primaryKeyFieldName) + } - for _, parentField := range parentTable.Columns { - if parentField.Name == table.ChildOf.ParentTableField && !parentField.IsRepeated { - orderByFields = append(orderByFields, parentField.Name) - break - } - } + orderBy, err := orderByString(table) + if err != nil { + return fmt.Errorf("getting 'order by' string: %w", err) } - if len(orderByFields) == 0 { - return fmt.Errorf("missing order by fields") + partitionBy, err := partitionByString(table) + if err != nil { + return fmt.Errorf("getting 'partition by' string: %w", err) } - primaryKey := "" - if primaryKeyFieldName != "" { - primaryKey = fmt.Sprintf("PRIMARY KEY (%s)", primaryKeyFieldName) + // Add indexes if they exist + indexes, err := indexString(table) + if err != nil { + return fmt.Errorf("getting 'index' string: %w", err) } - orderBy := strings.Join(orderByFields, ",") - sb.WriteString(fmt.Sprintf(") ENGINE = ReplacingMergeTree(version) PARTITION BY (toYYYYMM(block_timestamp)) %s ORDER BY (%s);", primaryKey, orderBy)) + sb.WriteString(fmt.Sprintf(" %s) ENGINE = %s %s %s %s;", indexes, replacingMergeTree, primaryKey, partitionBy, orderBy)) d.AddCreateTableSql(table.Name, sb.String()) @@ -228,3 +225,103 @@ func (d *DialectClickHouse) SchemaHash() string { func tableName(schemaName string, tableName string) string { return fmt.Sprintf("%s.%s", schemaName, tableName) } + +func orderByString(table *schema.Table) (string, error) { + info := table.PbTableInfo.ClickhouseTableOptions + if info == nil { + return "", fmt.Errorf("clickhouse table options not set for table %q", table.Name) + } + + if len(info.OrderByFields) == 0 { + return "", fmt.Errorf("clickhouse table options for table %q don't have any order by fields. Require at least 1", table.Name) + } + + out := "" + for i, field := range info.OrderByFields { + w := wrapWithClickhouseFunction(field.Name, field.Function) + if field.Descending { + w += " desc" + } + out += w + if i < len(info.OrderByFields)-1 { + out += ", " + } + } + + return fmt.Sprintf("ORDER BY (%s)", out), nil +} + +func partitionByString(table *schema.Table) (string, error) { + info := table.PbTableInfo.ClickhouseTableOptions + if info == nil { + return "", fmt.Errorf("clickhouse table options not set for table %q", table.Name) + } + + out := sql2.DialectFieldBlockTimestamp + for _, field := range info.PartitionFields { + w := wrapWithClickhouseFunction(field.Name, field.Function) + out += ", " + w + } + + return fmt.Sprintf("PARTITION BY (%s)", out), nil +} + +func replacingMergeTreeString(table *schema.Table) (string, error) { + info := table.PbTableInfo.ClickhouseTableOptions + if info == nil { + return "", fmt.Errorf("clickhouse table options not set for table %q", table.Name) + } + + out := sql2.DialectFieldVersion + for _, field := range info.ReplacingFields { + out += ", " + field.Name + } + + return fmt.Sprintf("ReplacingMergeTree(%s)", out), nil +} + +func wrapWithClickhouseFunction(fieldName string, function pbSchmema.Function) string { + format := "%s" + switch function { + case pbSchmema.Function_unset: + case pbSchmema.Function_toMonth: + format = "toMonth(%s)" + case pbSchmema.Function_toDate: + format = "toDate(%s)" + case pbSchmema.Function_toStartOfMonth: + case pbSchmema.Function_toYear: + format = "toYear(%s)" + case pbSchmema.Function_toYYYYDD: + format = "toYYYYMMDD(%s)" + case pbSchmema.Function_toYYYYMM: + format = "toYYYYMM(%s)" + } + return fmt.Sprintf(format, fieldName) +} + +func indexString(table *schema.Table) (string, error) { + indexes := "" + if table.PbTableInfo != nil && table.PbTableInfo.ClickhouseTableOptions != nil { + if len(table.PbTableInfo.ClickhouseTableOptions.IndexFields) > 0 { + var indexStrings []string + for _, indexField := range table.PbTableInfo.ClickhouseTableOptions.IndexFields { + fieldName := indexField.FieldName + if indexField.Function != pbSchmema.Function_unset { + fieldName = fmt.Sprintf("%s(%s)", indexField.Function.String(), fieldName) + } + + indexStr := fmt.Sprintf("INDEX %s %s TYPE %s GRANULARITY %d", + indexField.Name, + fieldName, + indexField.Type.String(), + indexField.Granularity) + indexStrings = append(indexStrings, indexStr) + } + + if len(indexStrings) > 0 { + indexes = ", " + strings.Join(indexStrings, ", ") + } + } + } + return indexes, nil +} diff --git a/db_proto/sql/click_house/types.go b/db_proto/sql/click_house/types.go index b52815d..17fecfb 100644 --- a/db_proto/sql/click_house/types.go +++ b/db_proto/sql/click_house/types.go @@ -2,6 +2,7 @@ package clickhouse import ( "encoding/base64" + "encoding/hex" "fmt" "strconv" "strings" @@ -10,6 +11,8 @@ import ( "github.com/ClickHouse/ch-go/proto" "github.com/golang/protobuf/protoc-gen-go/descriptor" "github.com/jhump/protoreflect/desc" + sql2 "github.com/streamingfast/substreams-sink-sql/db_proto/sql" + protoutil "github.com/streamingfast/substreams-sink-sql/proto" "google.golang.org/protobuf/types/known/timestamppb" ) @@ -33,17 +36,73 @@ const ( TypeFloat32 DataType = "Float32" TypeFloat64 DataType = "Float64" - TypeBool DataType = "Bool" - TypeVarchar DataType = "VARCHAR" - - TypeDateTime DataType = "DateTime" + TypeBool DataType = "Bool" + TypeString DataType = "String" + TypeVarchar DataType = "VARCHAR" + TypeDateTime DataType = "DateTime" + TypeDateTime64 DataType = "DateTime64" + + // ClickHouse semantic type mappings + TypeFixedString DataType = "FixedString" + TypeDecimal32 DataType = "Decimal32" + TypeDecimal64 DataType = "Decimal64" + TypeDecimal128 DataType = "Decimal128" + TypeDecimal256 DataType = "Decimal256" ) func (s DataType) String() string { return string(s) } +// MapSemanticType maps semantic types to ClickHouse-specific SQL types +func MapSemanticType(semanticType sql2.SemanticType) (string, bool) { + switch semanticType { + case sql2.SemanticUint256: + return string(TypeString), true // ClickHouse stores as String (no native UInt256) + case sql2.SemanticInt256: + return string(TypeString), true // ClickHouse stores as String (no native Int256) + case sql2.SemanticAddress: + return "FixedString(42)", true // Fixed-length for blockchain addresses + case sql2.SemanticHash: + return "FixedString(66)", true // Fixed-length for blockchain hashes + case sql2.SemanticSignature: + return string(TypeString), true + case sql2.SemanticPubkey: + return string(TypeString), true + case sql2.SemanticHex: + return string(TypeString), true + case sql2.SemanticBase64: + return string(TypeString), true + case sql2.SemanticJSON: + return string(TypeString), true // ClickHouse doesn't have native JSON, use String + case sql2.SemanticUUID: + return string(TypeString), true // Store UUID as string in ClickHouse + case sql2.SemanticUnixTimestamp, sql2.SemanticBlockTimestamp: + return string(TypeDateTime), true + case sql2.SemanticUnixTimestampMS: + return "DateTime64(3)", true // ClickHouse DateTime64 with millisecond precision + default: + return "", false // Not supported + } +} + +// SupportsSemanticType returns true if ClickHouse supports the semantic type +func SupportsSemanticType(semanticType sql2.SemanticType) bool { + _, supported := MapSemanticType(semanticType) + return supported +} + func MapFieldType(fd *desc.FieldDescriptor) DataType { + // Check for semantic type annotation first + semanticType, _, hasSemanticType := protoutil.SemanticTypeInfo(fd) + if hasSemanticType { + if sqlType, supported := MapSemanticType(sql2.SemanticType(semanticType)); supported { + return DataType(sqlType) + } + // Fall through to default mapping if semantic type not supported + } + + // Default protobuf type mapping t := fd.GetType() switch t { case descriptor.FieldDescriptorProto_TYPE_MESSAGE: @@ -100,7 +159,7 @@ func ColInputForColumn(fd *desc.FieldDescriptor) proto.ColInput { case descriptor.FieldDescriptorProto_TYPE_UINT32, descriptor.FieldDescriptorProto_TYPE_FIXED32: return &proto.ColUInt32{} case descriptor.FieldDescriptorProto_TYPE_FLOAT: - return &proto.ColFloat64{} + return &proto.ColFloat32{} case descriptor.FieldDescriptorProto_TYPE_DOUBLE: return &proto.ColFloat64{} case descriptor.FieldDescriptorProto_TYPE_STRING: @@ -145,3 +204,197 @@ func ValueToString(value any) (s string) { } return } + +// ConvertSemanticValue converts a value according to semantic type and format hint for ClickHouse +func ConvertSemanticValue(semanticType sql2.SemanticType, value interface{}, formatHint string) (string, error) { + switch semanticType { + case sql2.SemanticUint256: + return convertToString(value) + case sql2.SemanticInt256: + return convertToString(value) + case sql2.SemanticAddress: + return convertToFixedStringAddress(value) + case sql2.SemanticHash: + return convertToFixedStringHash(value) + case sql2.SemanticSignature, sql2.SemanticPubkey, sql2.SemanticHex: + return convertToString(value) + case sql2.SemanticJSON: + return convertToString(value) // ClickHouse stores JSON as String + case sql2.SemanticUUID: + return convertToString(value) // ClickHouse stores UUID as String + case sql2.SemanticUnixTimestamp, sql2.SemanticBlockTimestamp: + return convertUnixTimestamp(value, false) + case sql2.SemanticUnixTimestampMS: + return convertUnixTimestamp(value, true) + default: + // Fallback to default value conversion + return ValueToString(value), nil + } +} + +// convertToUInt256 converts values to ClickHouse UInt256 type +func convertToUInt256(value interface{}, formatHint string) (string, error) { + switch v := value.(type) { + case string: + // Handle hex strings (0x...) + if strings.HasPrefix(v, "0x") { + return v, nil // ClickHouse UInt256 can handle hex directly + } + // Handle decimal strings + if formatHint == "hex" && !strings.HasPrefix(v, "0x") { + // Add 0x prefix for ClickHouse UInt256 + return "0x" + v, nil + } + return v, nil + case []byte: + // Convert bytes to hex for ClickHouse UInt256 + return "0x" + hex.EncodeToString(v), nil + case int64, uint64, int32, uint32: + // Convert numeric types to string + return fmt.Sprintf("%v", v), nil + default: + return "", fmt.Errorf("cannot convert %T to UInt256", value) + } +} + +// convertToInt256 converts values to ClickHouse Int256 type +func convertToInt256(value interface{}, formatHint string) (string, error) { + switch v := value.(type) { + case string: + // Handle hex strings (0x...) + if strings.HasPrefix(v, "0x") { + return v, nil // ClickHouse Int256 can handle hex directly + } + // Handle decimal strings + if formatHint == "hex" && !strings.HasPrefix(v, "0x") { + // Add 0x prefix for ClickHouse Int256 + return "0x" + v, nil + } + return v, nil + case []byte: + // Convert bytes to hex for ClickHouse Int256 + return "0x" + hex.EncodeToString(v), nil + case int64, uint64, int32, uint32: + // Convert numeric types to string + return fmt.Sprintf("%v", v), nil + default: + return "", fmt.Errorf("cannot convert %T to Int256", value) + } +} + +// convertToFixedStringAddress converts values to ClickHouse FixedString(42) format +func convertToFixedStringAddress(value interface{}) (string, error) { + switch v := value.(type) { + case string: + // Validate address format + if strings.HasPrefix(v, "0x") { + // Has 0x prefix - validate hex part is exactly 40 chars + hexPart := v[2:] + if len(hexPart) == 40 { + return "'" + v + "'", nil + } + return "", fmt.Errorf("invalid address format: %s (expected 40 hex chars after 0x)", v) + } + // No 0x prefix - should be exactly 40 hex chars + if len(v) == 40 { + return "'0x" + v + "'", nil + } + return "", fmt.Errorf("invalid address format: %s (expected 40 or 42 chars)", v) + case []byte: + if len(v) == 20 { + return "'0x" + hex.EncodeToString(v) + "'", nil + } + return "", fmt.Errorf("invalid address byte length: %d (expected 20)", len(v)) + default: + return "", fmt.Errorf("cannot convert %T to address", value) + } +} + +// convertToFixedStringHash converts values to ClickHouse FixedString(66) format +func convertToFixedStringHash(value interface{}) (string, error) { + switch v := value.(type) { + case string: + // Validate hash format + if len(v) == 66 && strings.HasPrefix(v, "0x") { + return "'" + v + "'", nil + } + if len(v) == 64 { + // Add 0x prefix if missing + return "'0x" + v + "'", nil + } + return "", fmt.Errorf("invalid hash format: %s (expected 64 or 66 chars)", v) + case []byte: + if len(v) == 32 { + return "'0x" + hex.EncodeToString(v) + "'", nil + } + return "", fmt.Errorf("invalid hash byte length: %d (expected 32)", len(v)) + default: + return "", fmt.Errorf("cannot convert %T to hash", value) + } +} + +// convertToString converts values to ClickHouse String format +func convertToString(value interface{}) (string, error) { + switch v := value.(type) { + case string: + return "'" + strings.ReplaceAll(strings.ReplaceAll(v, "'", "''"), "\\", "\\\\") + "'", nil + case []byte: + // For binary data, encode as hex + return "'0x" + hex.EncodeToString(v) + "'", nil + default: + return fmt.Sprintf("'%v'", value), nil + } +} + +// convertToDecimal converts values to ClickHouse decimal format +func convertToDecimal(value interface{}) (string, error) { + switch v := value.(type) { + case string: + return v, nil // ClickHouse decimals accept string literals directly + case float64, float32: + return fmt.Sprintf("%v", v), nil + case int64, uint64, int32, uint32, int, uint: + return fmt.Sprintf("%v", v), nil + default: + return "", fmt.Errorf("cannot convert %T to decimal", value) + } +} + +// convertUnixTimestamp converts unix timestamps to ClickHouse timestamp format +func convertUnixTimestamp(value interface{}, isMilliseconds bool) (string, error) { + var t time.Time + + switch v := value.(type) { + case int64: + if isMilliseconds { + t = time.Unix(v/1000, (v%1000)*1000000) + } else { + t = time.Unix(v, 0) + } + case uint64: + if isMilliseconds { + t = time.Unix(int64(v/1000), int64((v%1000)*1000000)) + } else { + t = time.Unix(int64(v), 0) + } + case string: + // Try to parse as number + if val, err := strconv.ParseInt(v, 10, 64); err == nil { + if isMilliseconds { + t = time.Unix(val/1000, (val%1000)*1000000) + } else { + t = time.Unix(val, 0) + } + } else { + return "", fmt.Errorf("cannot parse timestamp string: %s", v) + } + default: + return "", fmt.Errorf("cannot convert %T to timestamp", value) + } + + if isMilliseconds { + // ClickHouse DateTime64 format with milliseconds + return "'" + t.UTC().Format("2006-01-02 15:04:05.000") + "'", nil + } + return "'" + t.UTC().Format("2006-01-02 15:04:05") + "'", nil +} diff --git a/db_proto/sql/click_house/types_test.go b/db_proto/sql/click_house/types_test.go new file mode 100644 index 0000000..70a379e --- /dev/null +++ b/db_proto/sql/click_house/types_test.go @@ -0,0 +1,441 @@ +package clickhouse + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + sql2 "github.com/streamingfast/substreams-sink-sql/db_proto/sql" + "google.golang.org/protobuf/types/known/timestamppb" +) + +func TestValueToString(t *testing.T) { + tests := []struct { + name string + input interface{} + expected string + }{ + // String values + {"simple string", "hello", "'hello'"}, + {"string with quotes", "hello'world", "'hello''world'"}, + {"string with backslash", "hello\\world", "'hello\\\\world'"}, + {"empty string", "", "''"}, + + // Integer values + {"int64", int64(123), "123"}, + {"int64 negative", int64(-456), "-456"}, + {"int32", int32(456), "456"}, + {"int", int(789), "789"}, + + // Unsigned integer values + {"uint64", uint64(123), "123"}, + {"uint32", uint32(456), "456"}, + {"uint", uint(789), "789"}, + + // Float values + {"float64", float64(123.45), "123.45"}, + {"float32", float32(67.89), "67.89"}, + + // Boolean values + {"bool true", true, "true"}, + {"bool false", false, "false"}, + + // Byte slice (should be base64 encoded) + {"bytes", []uint8{0xDE, 0xAD, 0xBE, 0xEF}, "'3q2+7w=='"}, + {"empty bytes", []uint8{}, "''"}, + + // Time values + {"time", time.Date(2023, 1, 15, 10, 30, 0, 0, time.UTC), "'2023-01-15 10:30:00'"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := ValueToString(tt.input) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestValueToStringTimestamp(t *testing.T) { + // Test protobuf timestamp + testTime := time.Date(2023, 1, 15, 10, 30, 0, 0, time.UTC) + pbTime := timestamppb.New(testTime) + result := ValueToString(pbTime) + assert.Equal(t, "'2023-01-15 10:30:00'", result) +} + +func TestValueToStringPanic(t *testing.T) { + // Test unsupported type should panic + assert.Panics(t, func() { + ValueToString(complex64(1 + 2i)) + }) +} + +func TestDataTypeString(t *testing.T) { + tests := []struct { + dataType DataType + expected string + }{ + {TypeInteger8, "Int8"}, + {TypeInteger16, "Int16"}, + {TypeInteger32, "Int32"}, + {TypeInteger64, "Int64"}, + {TypeInteger128, "Int128"}, + {TypeInteger256, "Int256"}, + {TypeUInt8, "UInt8"}, + {TypeUInt16, "UInt16"}, + {TypeUInt32, "UInt32"}, + {TypeUInt64, "UInt64"}, + {TypeUInt128, "UInt128"}, + {TypeUInt256, "UInt256"}, + {TypeFloat32, "Float32"}, + {TypeFloat64, "Float64"}, + {TypeBool, "Bool"}, + {TypeString, "String"}, + {TypeVarchar, "VARCHAR"}, + {TypeDateTime, "DateTime"}, + {TypeDateTime64, "DateTime64"}, + {TypeFixedString, "FixedString"}, + {TypeDecimal32, "Decimal32"}, + {TypeDecimal64, "Decimal64"}, + {TypeDecimal128, "Decimal128"}, + {TypeDecimal256, "Decimal256"}, + } + + for _, tt := range tests { + t.Run(string(tt.dataType), func(t *testing.T) { + assert.Equal(t, tt.expected, tt.dataType.String()) + }) + } +} + +func TestMapSemanticType(t *testing.T) { + tests := []struct { + name string + semanticType sql2.SemanticType + expectedSQL string + shouldSupport bool + }{ + { + name: "uint256 maps to String", + semanticType: sql2.SemanticUint256, + expectedSQL: "String", + shouldSupport: true, + }, + { + name: "int256 maps to String", + semanticType: sql2.SemanticInt256, + expectedSQL: "String", + shouldSupport: true, + }, + { + name: "address maps to FixedString(42)", + semanticType: sql2.SemanticAddress, + expectedSQL: "FixedString(42)", + shouldSupport: true, + }, + { + name: "hash maps to FixedString(66)", + semanticType: sql2.SemanticHash, + expectedSQL: "FixedString(66)", + shouldSupport: true, + }, + { + name: "json maps to String", + semanticType: sql2.SemanticJSON, + expectedSQL: "String", + shouldSupport: true, + }, + { + name: "uuid maps to String", + semanticType: sql2.SemanticUUID, + expectedSQL: "String", + shouldSupport: true, + }, + { + name: "unix_timestamp maps to DateTime", + semanticType: sql2.SemanticUnixTimestamp, + expectedSQL: "DateTime", + shouldSupport: true, + }, + { + name: "unix_timestamp_ms maps to DateTime64(3)", + semanticType: sql2.SemanticUnixTimestampMS, + expectedSQL: "DateTime64(3)", + shouldSupport: true, + }, + { + name: "unsupported type", + semanticType: sql2.SemanticType("unsupported"), + expectedSQL: "", + shouldSupport: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + sqlType, supported := MapSemanticType(tt.semanticType) + + assert.Equal(t, tt.shouldSupport, supported, "MapSemanticType() supported") + assert.Equal(t, tt.expectedSQL, sqlType, "MapSemanticType() sqlType") + + // Test SupportsSemanticType consistency + assert.Equal(t, tt.shouldSupport, SupportsSemanticType(tt.semanticType), "SupportsSemanticType() consistency") + }) + } +} + +func TestConvertToUInt256(t *testing.T) { + tests := []struct { + name string + value interface{} + formatHint string + expected string + shouldError bool + }{ + { + name: "hex string with 0x prefix", + value: "0x1234567890abcdef", + formatHint: "hex", + expected: "0x1234567890abcdef", + shouldError: false, + }, + { + name: "hex string without 0x prefix with hex hint", + value: "1234567890abcdef", + formatHint: "hex", + expected: "0x1234567890abcdef", + shouldError: false, + }, + { + name: "decimal string", + value: "123456789012345678901234567890", + formatHint: "decimal", + expected: "123456789012345678901234567890", + shouldError: false, + }, + { + name: "byte array", + value: []byte{0x12, 0x34, 0x56, 0x78}, + formatHint: "", + expected: "0x12345678", + shouldError: false, + }, + { + name: "int64 value", + value: int64(12345), + formatHint: "", + expected: "12345", + shouldError: false, + }, + { + name: "uint64 value", + value: uint64(12345), + formatHint: "", + expected: "12345", + shouldError: false, + }, + { + name: "unsupported type", + value: float64(123.45), + formatHint: "", + expected: "", + shouldError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := convertToUInt256(tt.value, tt.formatHint) + + if tt.shouldError { + assert.Error(t, err, "convertToUInt256() should error") + return + } + + assert.NoError(t, err, "convertToUInt256() should not error") + assert.Equal(t, tt.expected, result, "convertToUInt256() result") + }) + } +} + +func TestConvertToInt256(t *testing.T) { + tests := []struct { + name string + value interface{} + formatHint string + expected string + shouldError bool + }{ + { + name: "hex string with 0x prefix", + value: "0x1234567890abcdef", + formatHint: "hex", + expected: "0x1234567890abcdef", + shouldError: false, + }, + { + name: "decimal string", + value: "123456789012345678901234567890", + formatHint: "decimal", + expected: "123456789012345678901234567890", + shouldError: false, + }, + { + name: "int64 value", + value: int64(-12345), + formatHint: "", + expected: "-12345", + shouldError: false, + }, + { + name: "unsupported type", + value: float64(123.45), + formatHint: "", + expected: "", + shouldError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := convertToInt256(tt.value, tt.formatHint) + + if tt.shouldError { + assert.Error(t, err, "convertToInt256() should error") + return + } + + assert.NoError(t, err, "convertToInt256() should not error") + assert.Equal(t, tt.expected, result, "convertToInt256() result") + }) + } +} + +func TestConvertToFixedStringAddress(t *testing.T) { + tests := []struct { + name string + value interface{} + expected string + shouldError bool + }{ + { + name: "valid address with 0x prefix", + value: "0x742d35cc6636C0532925a3b8D0A3e5A5F2d5De8e", + expected: "'0x742d35cc6636C0532925a3b8D0A3e5A5F2d5De8e'", + shouldError: false, + }, + { + name: "valid address without 0x prefix", + value: "742d35cc6636C0532925a3b8D0A3e5A5F2d5De8e", + expected: "'0x742d35cc6636C0532925a3b8D0A3e5A5F2d5De8e'", + shouldError: false, + }, + { + name: "20-byte array", + value: []byte{0x74, 0x2d, 0x35, 0xcc, 0x66, 0x36, 0xc0, 0x53, 0x29, 0x25, 0xa3, 0xb8, 0xd0, 0xa3, 0xe5, 0xa5, 0xf2, 0xd5, 0xde, 0x8e}, + expected: "'0x742d35cc6636c0532925a3b8d0a3e5a5f2d5de8e'", + shouldError: false, + }, + { + name: "invalid address length", + value: "0x742d35cc6636C0532925a3b8D0A3e5A5F2d5De", + expected: "", + shouldError: true, + }, + { + name: "unsupported type", + value: 123, + expected: "", + shouldError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := convertToFixedStringAddress(tt.value) + + if tt.shouldError { + assert.Error(t, err, "convertToFixedStringAddress() should error") + return + } + + assert.NoError(t, err, "convertToFixedStringAddress() should not error") + assert.Equal(t, tt.expected, result, "convertToFixedStringAddress() result") + }) + } +} + +func TestConvertSemanticValue(t *testing.T) { + tests := []struct { + name string + semanticType sql2.SemanticType + value interface{} + formatHint string + shouldError bool + }{ + { + name: "uint256 conversion", + semanticType: sql2.SemanticUint256, + value: "0x123456789", + formatHint: "hex", + shouldError: false, + }, + { + name: "int256 conversion", + semanticType: sql2.SemanticInt256, + value: "123456789", + formatHint: "decimal", + shouldError: false, + }, + { + name: "address conversion", + semanticType: sql2.SemanticAddress, + value: "0x742d35cc6636C0532925a3b8D0A3e5A5F2d5De8e", + formatHint: "", + shouldError: false, + }, + { + name: "string conversion", + semanticType: sql2.SemanticJSON, + value: `{"key": "value"}`, + formatHint: "", + shouldError: false, + }, + { + name: "unix timestamp conversion", + semanticType: sql2.SemanticUnixTimestamp, + value: int64(1640995200), + formatHint: "", + shouldError: false, + }, + { + name: "unix timestamp ms conversion", + semanticType: sql2.SemanticUnixTimestampMS, + value: int64(1640995200000), + formatHint: "", + shouldError: false, + }, + { + name: "fallback to default conversion", + semanticType: sql2.SemanticType("unknown"), + value: "test", + formatHint: "", + shouldError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := ConvertSemanticValue(tt.semanticType, tt.value, tt.formatHint) + + if tt.shouldError { + assert.Error(t, err, "ConvertSemanticValue() should error") + return + } + + assert.NoError(t, err, "ConvertSemanticValue() should not error") + assert.NotEmpty(t, result, "ConvertSemanticValue() should return non-empty result") + }) + } +} \ No newline at end of file diff --git a/db_proto/sql/dialect.go b/db_proto/sql/dialect.go index 5f2b6a2..fd3200e 100644 --- a/db_proto/sql/dialect.go +++ b/db_proto/sql/dialect.go @@ -6,6 +6,14 @@ import ( "golang.org/x/exp/maps" ) +const DialectTableBlock = "_blocks_" +const DialectTableCursor = "_cursors_" + +const DialectFieldBlockNumber = "_block_number_" +const DialectFieldBlockTimestamp = "_block_timestamp_" +const DialectFieldVersion = "_version_" +const DialectFieldDeleted = "_deleted_" + type Dialect interface { SchemaHash() string FullTableName(table *schema.Table) string diff --git a/db_proto/sql/postgres/dialect.go b/db_proto/sql/postgres/dialect.go index 5ebe921..b0359d7 100644 --- a/db_proto/sql/postgres/dialect.go +++ b/db_proto/sql/postgres/dialect.go @@ -68,7 +68,7 @@ func (d *DialectPostgres) UseDeletedField() bool { } func (d *DialectPostgres) init() error { - d.AddPrimaryKeySql("_blocks_", fmt.Sprintf("alter table %s._blocks_ add constraint block_pk primary key (number);", d.schemaName)) + d.AddPrimaryKeySql(sql2.DialectTableBlock, fmt.Sprintf("alter table %s.%s add constraint block_pk primary key (number);", d.schemaName, sql2.DialectTableBlock)) return nil } @@ -79,8 +79,8 @@ func (d *DialectPostgres) createTable(table *schema.Table) error { sb.WriteString(fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s (", tableName)) - sb.WriteString(" block_number INTEGER NOT NULL,") - sb.WriteString(" block_timestamp TIMESTAMP NOT NULL,") + sb.WriteString(fmt.Sprintf(" %s INTEGER NOT NULL,", sql2.DialectFieldBlockNumber)) + sb.WriteString(fmt.Sprintf(" %s TIMESTAMP NOT NULL,", sql2.DialectFieldBlockTimestamp)) var primaryKeyFieldName string if table.PrimaryKey != nil { @@ -191,7 +191,7 @@ func (d *DialectPostgres) createTable(table *schema.Table) error { sb.WriteString(");\n") - d.AddForeignKeySql(tableName, fmt.Sprintf("ALTER TABLE %s ADD CONSTRAINT fk_block FOREIGN KEY (block_number) REFERENCES %s._blocks_(number) ON DELETE CASCADE", tableName, d.schemaName)) + d.AddForeignKeySql(tableName, fmt.Sprintf("ALTER TABLE %s ADD CONSTRAINT fk_block FOREIGN KEY (%s) REFERENCES %s.%s(number) ON DELETE CASCADE", tableName, sql2.DialectFieldBlockNumber, d.schemaName, sql2.DialectTableBlock)) d.AddCreateTableSql(table.Name, sb.String()) return nil diff --git a/db_proto/sql/postgres/types.go b/db_proto/sql/postgres/types.go index ee34222..95d97e8 100644 --- a/db_proto/sql/postgres/types.go +++ b/db_proto/sql/postgres/types.go @@ -2,6 +2,7 @@ package postgres import ( "encoding/base64" + "encoding/hex" "fmt" "strconv" "strings" @@ -9,22 +10,28 @@ import ( "github.com/golang/protobuf/protoc-gen-go/descriptor" "github.com/jhump/protoreflect/desc" + sql2 "github.com/streamingfast/substreams-sink-sql/db_proto/sql" + "github.com/streamingfast/substreams-sink-sql/proto" "google.golang.org/protobuf/types/known/timestamppb" ) type DataType string const ( - TypeNumeric DataType = "NUMERIC" - TypeInteger DataType = "INTEGER" - TypeBool DataType = "BOOLEAN" - TypeBigInt DataType = "BIGINT" - TypeDecimal DataType = "DECIMAL" - TypeDouble DataType = "DOUBLE PRECISION" - TypeText DataType = "TEXT" - TypeBlob DataType = "BLOB" - TypeVarchar DataType = "VARCHAR(255)" - TypeTimestamp DataType = "TIMESTAMP" + TypeNumeric DataType = "NUMERIC" + TypeInteger DataType = "INTEGER" + TypeBool DataType = "BOOLEAN" + TypeBigInt DataType = "BIGINT" + TypeDecimal DataType = "DECIMAL" + TypeDouble DataType = "DOUBLE PRECISION" + TypeText DataType = "TEXT" + TypeBlob DataType = "BLOB" + TypeVarchar DataType = "VARCHAR(255)" + TypeTimestamp DataType = "TIMESTAMP" + TypeTimestamptz DataType = "TIMESTAMP WITH TIME ZONE" + TypeJsonb DataType = "JSONB" + TypeUUID DataType = "UUID" + TypeChar DataType = "CHAR" ) func (s DataType) String() string { @@ -40,7 +47,51 @@ func IsWellKnownType(fd *desc.FieldDescriptor) bool { } } +// MapSemanticType maps semantic types to PostgreSQL-specific SQL types +func MapSemanticType(semanticType sql2.SemanticType) (string, bool) { + switch semanticType { + case sql2.SemanticUint256, sql2.SemanticInt256: + return "NUMERIC(78,0)", true // PostgreSQL NUMERIC for 256-bit integers + case sql2.SemanticAddress: + return "CHAR(42)", true // Fixed-length for blockchain addresses + case sql2.SemanticHash: + return "CHAR(66)", true // Fixed-length for blockchain hashes + case sql2.SemanticSignature: + return "VARCHAR", true + case sql2.SemanticPubkey: + return "VARCHAR", true + case sql2.SemanticHex: + return "VARCHAR", true + case sql2.SemanticBase64: + return "TEXT", true + case sql2.SemanticJSON: + return string(TypeJsonb), true // PostgreSQL native JSONB + case sql2.SemanticUUID: + return string(TypeUUID), true // PostgreSQL native UUID + case sql2.SemanticUnixTimestamp, sql2.SemanticUnixTimestampMS, sql2.SemanticBlockTimestamp: + return string(TypeTimestamptz), true + default: + return "", false // Not supported + } +} + +// SupportsSemanticType returns true if PostgreSQL supports the semantic type +func SupportsSemanticType(semanticType sql2.SemanticType) bool { + _, supported := MapSemanticType(semanticType) + return supported +} + func MapFieldType(fd *desc.FieldDescriptor) DataType { + // Check for semantic type annotation first + semanticType, _, hasSemanticType := proto.SemanticTypeInfo(fd) + if hasSemanticType { + if sqlType, supported := MapSemanticType(sql2.SemanticType(semanticType)); supported { + return DataType(sqlType) + } + // Fall through to default mapping if semantic type not supported + } + + // Default protobuf type mapping t := fd.GetType() switch t { case descriptor.FieldDescriptorProto_TYPE_MESSAGE: @@ -108,3 +159,208 @@ func ValueToString(value any) (s string) { } return } + +// ConvertSemanticValue converts a value according to semantic type and format hint for PostgreSQL +func ConvertSemanticValue(semanticType sql2.SemanticType, value interface{}, formatHint string) (string, error) { + switch semanticType { + case sql2.SemanticUint256, sql2.SemanticInt256: + return convertToNumeric(value, formatHint) + case sql2.SemanticAddress: + return convertToAddress(value) + case sql2.SemanticHash: + return convertToHash(value) + case sql2.SemanticSignature, sql2.SemanticPubkey, sql2.SemanticHex: + return convertToHexString(value) + case sql2.SemanticJSON: + return convertToJSONB(value) + case sql2.SemanticUUID: + return convertToUUID(value) + case sql2.SemanticUnixTimestamp: + return convertUnixTimestamp(value, false) + case sql2.SemanticUnixTimestampMS: + return convertUnixTimestamp(value, true) + case sql2.SemanticBlockTimestamp: + return convertUnixTimestamp(value, false) + default: + // Fallback to default value conversion + return ValueToString(value), nil + } +} + +// convertToNumeric converts values to PostgreSQL NUMERIC type +func convertToNumeric(value interface{}, formatHint string) (string, error) { + switch v := value.(type) { + case string: + // Handle hex strings (0x...) + if strings.HasPrefix(v, "0x") { + // Remove 0x prefix for PostgreSQL NUMERIC + hexStr := v[2:] + // Convert hex to decimal for PostgreSQL + if val, err := strconv.ParseUint(hexStr, 16, 64); err == nil { + return strconv.FormatUint(val, 10), nil + } + // For very large hex numbers, keep as string and validate + return "'" + v + "'", nil + } + // Handle decimal strings + if formatHint == "hex" && !strings.HasPrefix(v, "0x") { + // Convert hex string to decimal + if val, err := strconv.ParseUint(v, 16, 64); err == nil { + return strconv.FormatUint(val, 10), nil + } + } + return "'" + v + "'", nil + case []byte: + // Convert bytes to decimal string + hexStr := hex.EncodeToString(v) + if val, err := strconv.ParseUint(hexStr, 16, 64); err == nil { + return strconv.FormatUint(val, 10), nil + } + return "'0x" + hexStr + "'", nil + case int64, uint64, int32, uint32: + return fmt.Sprintf("%v", v), nil + default: + return "", fmt.Errorf("cannot convert %T to numeric", value) + } +} + +// convertToAddress converts values to PostgreSQL CHAR(42) format +func convertToAddress(value interface{}) (string, error) { + switch v := value.(type) { + case string: + // Validate address format + if strings.HasPrefix(v, "0x") { + // Has 0x prefix - validate hex part is exactly 40 chars + hexPart := v[2:] + if len(hexPart) == 40 { + return "'" + v + "'", nil + } + return "", fmt.Errorf("invalid address format: %s (expected 40 hex chars after 0x)", v) + } + // No 0x prefix - should be exactly 40 hex chars + if len(v) == 40 { + return "'0x" + v + "'", nil + } + return "", fmt.Errorf("invalid address format: %s (expected 40 or 42 chars)", v) + case []byte: + if len(v) == 20 { + return "'0x" + hex.EncodeToString(v) + "'", nil + } + return "", fmt.Errorf("invalid address byte length: %d (expected 20)", len(v)) + default: + return "", fmt.Errorf("cannot convert %T to address", value) + } +} + +// convertToHash converts values to PostgreSQL CHAR(66) format +func convertToHash(value interface{}) (string, error) { + switch v := value.(type) { + case string: + // Validate hash format + if len(v) == 66 && strings.HasPrefix(v, "0x") { + return "'" + v + "'", nil + } + if len(v) == 64 { + // Add 0x prefix if missing + return "'0x" + v + "'", nil + } + return "", fmt.Errorf("invalid hash format: %s (expected 64 or 66 chars)", v) + case []byte: + if len(v) == 32 { + return "'0x" + hex.EncodeToString(v) + "'", nil + } + return "", fmt.Errorf("invalid hash byte length: %d (expected 32)", len(v)) + default: + return "", fmt.Errorf("cannot convert %T to hash", value) + } +} + +// convertToHexString converts values to hex string format +func convertToHexString(value interface{}) (string, error) { + switch v := value.(type) { + case string: + if strings.HasPrefix(v, "0x") { + return "'" + v + "'", nil + } + // Add 0x prefix if missing + return "'0x" + v + "'", nil + case []byte: + return "'0x" + hex.EncodeToString(v) + "'", nil + default: + return "", fmt.Errorf("cannot convert %T to hex string", value) + } +} + +// convertToDecimal converts values to decimal format +func convertToDecimal(value interface{}) (string, error) { + switch v := value.(type) { + case string: + return "'" + v + "'", nil + case float64, float32: + return fmt.Sprintf("%v", v), nil + case int64, uint64, int32, uint32, int, uint: + return fmt.Sprintf("%v", v), nil + default: + return "", fmt.Errorf("cannot convert %T to decimal", value) + } +} + +// convertToJSONB converts values to PostgreSQL JSONB format +func convertToJSONB(value interface{}) (string, error) { + switch v := value.(type) { + case string: + // Assume string is already valid JSON + return "'" + strings.ReplaceAll(v, "'", "''") + "'::jsonb", nil + default: + return "", fmt.Errorf("cannot convert %T to JSONB", value) + } +} + +// convertToUUID converts values to PostgreSQL UUID format +func convertToUUID(value interface{}) (string, error) { + switch v := value.(type) { + case string: + // Basic UUID validation (length check) + if len(v) == 36 { + return "'" + v + "'::uuid", nil + } + return "", fmt.Errorf("invalid UUID format: %s (expected 36 chars)", v) + default: + return "", fmt.Errorf("cannot convert %T to UUID", value) + } +} + +// convertUnixTimestamp converts unix timestamps to PostgreSQL timestamp format +func convertUnixTimestamp(value interface{}, isMilliseconds bool) (string, error) { + var t time.Time + + switch v := value.(type) { + case int64: + if isMilliseconds { + t = time.Unix(v/1000, (v%1000)*1000000) + } else { + t = time.Unix(v, 0) + } + case uint64: + if isMilliseconds { + t = time.Unix(int64(v/1000), int64((v%1000)*1000000)) + } else { + t = time.Unix(int64(v), 0) + } + case string: + // Try to parse as number + if val, err := strconv.ParseInt(v, 10, 64); err == nil { + if isMilliseconds { + t = time.Unix(val/1000, (val%1000)*1000000) + } else { + t = time.Unix(val, 0) + } + } else { + return "", fmt.Errorf("cannot parse timestamp string: %s", v) + } + default: + return "", fmt.Errorf("cannot convert %T to timestamp", value) + } + + return "'" + t.UTC().Format(time.RFC3339) + "'", nil +} diff --git a/db_proto/sql/postgres/types_test.go b/db_proto/sql/postgres/types_test.go new file mode 100644 index 0000000..4b0715b --- /dev/null +++ b/db_proto/sql/postgres/types_test.go @@ -0,0 +1,371 @@ +package postgres + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + sql2 "github.com/streamingfast/substreams-sink-sql/db_proto/sql" + "google.golang.org/protobuf/types/known/timestamppb" +) + +func TestValueToString(t *testing.T) { + tests := []struct { + name string + input interface{} + expected string + }{ + // String values + {"simple string", "hello", "'hello'"}, + {"string with quotes", "hello'world", "'hello''world'"}, + {"string with backslash", "hello\\world", "'hello\\\\world'"}, + {"empty string", "", "''"}, + + // Integer values + {"int64", int64(123), "123"}, + {"int64 negative", int64(-456), "-456"}, + {"int32", int32(456), "456"}, + {"int", int(789), "789"}, + + // Unsigned integer values + {"uint64", uint64(123), "'123'"}, + {"uint32", uint32(456), "456"}, + {"uint", uint(789), "789"}, + + // Float values + {"float64", float64(123.45), "123.45"}, + {"float32", float32(67.89), "67.89"}, + + // Boolean values + {"bool true", true, "true"}, + {"bool false", false, "false"}, + + // Byte slice (should be base64 encoded) + {"bytes", []uint8{0xDE, 0xAD, 0xBE, 0xEF}, "'3q2+7w=='"}, + {"empty bytes", []uint8{}, "''"}, + + // Time values + {"time", time.Date(2023, 1, 15, 10, 30, 0, 0, time.UTC), "'2023-01-15T10:30:00Z'"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := ValueToString(tt.input) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestValueToStringTimestamp(t *testing.T) { + // Test protobuf timestamp + testTime := time.Date(2023, 1, 15, 10, 30, 0, 0, time.UTC) + pbTime := timestamppb.New(testTime) + result := ValueToString(pbTime) + assert.Equal(t, "'2023-01-15T10:30:00Z'", result) +} + +func TestValueToStringPanic(t *testing.T) { + // Test unsupported type should panic + assert.Panics(t, func() { + ValueToString(complex64(1 + 2i)) + }) +} + +func TestDataTypeString(t *testing.T) { + tests := []struct { + dataType DataType + expected string + }{ + {TypeNumeric, "NUMERIC"}, + {TypeInteger, "INTEGER"}, + {TypeBool, "BOOLEAN"}, + {TypeBigInt, "BIGINT"}, + {TypeDecimal, "DECIMAL"}, + {TypeDouble, "DOUBLE PRECISION"}, + {TypeText, "TEXT"}, + {TypeBlob, "BLOB"}, + {TypeVarchar, "VARCHAR(255)"}, + {TypeTimestamp, "TIMESTAMP"}, + {TypeTimestamptz, "TIMESTAMP WITH TIME ZONE"}, + {TypeJsonb, "JSONB"}, + {TypeUUID, "UUID"}, + {TypeChar, "CHAR"}, + } + + for _, tt := range tests { + t.Run(string(tt.dataType), func(t *testing.T) { + assert.Equal(t, tt.expected, tt.dataType.String()) + }) + } +} + +func TestMapSemanticType(t *testing.T) { + tests := []struct { + name string + semanticType sql2.SemanticType + expectedSQL string + shouldSupport bool + }{ + { + name: "uint256 maps to NUMERIC(78,0)", + semanticType: sql2.SemanticUint256, + expectedSQL: "NUMERIC(78,0)", + shouldSupport: true, + }, + { + name: "int256 maps to NUMERIC(78,0)", + semanticType: sql2.SemanticInt256, + expectedSQL: "NUMERIC(78,0)", + shouldSupport: true, + }, + { + name: "address maps to CHAR(42)", + semanticType: sql2.SemanticAddress, + expectedSQL: "CHAR(42)", + shouldSupport: true, + }, + { + name: "hash maps to CHAR(66)", + semanticType: sql2.SemanticHash, + expectedSQL: "CHAR(66)", + shouldSupport: true, + }, + { + name: "json maps to JSONB", + semanticType: sql2.SemanticJSON, + expectedSQL: "JSONB", + shouldSupport: true, + }, + { + name: "uuid maps to UUID", + semanticType: sql2.SemanticUUID, + expectedSQL: "UUID", + shouldSupport: true, + }, + { + name: "unix_timestamp maps to TIMESTAMP WITH TIME ZONE", + semanticType: sql2.SemanticUnixTimestamp, + expectedSQL: "TIMESTAMP WITH TIME ZONE", + shouldSupport: true, + }, + { + name: "unsupported type", + semanticType: sql2.SemanticType("unsupported"), + expectedSQL: "", + shouldSupport: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + sqlType, supported := MapSemanticType(tt.semanticType) + + assert.Equal(t, tt.shouldSupport, supported, "MapSemanticType() supported") + assert.Equal(t, tt.expectedSQL, sqlType, "MapSemanticType() sqlType") + + // Test SupportsSemanticType consistency + assert.Equal(t, tt.shouldSupport, SupportsSemanticType(tt.semanticType), "SupportsSemanticType() consistency") + }) + } +} + +func TestConvertToNumeric(t *testing.T) { + tests := []struct { + name string + value interface{} + formatHint string + expected string + shouldError bool + }{ + { + name: "hex string with 0x prefix", + value: "0x1234567890abcdef", + formatHint: "hex", + expected: "1311768467294899695", // converted to decimal + shouldError: false, + }, + { + name: "hex string without 0x prefix with hex hint", + value: "1234567890abcdef", + formatHint: "hex", + expected: "1311768467294899695", // converted to decimal + shouldError: false, + }, + { + name: "decimal string", + value: "123456789012345678901234567890", + formatHint: "decimal", + expected: "'123456789012345678901234567890'", + shouldError: false, + }, + { + name: "byte array", + value: []byte{0x12, 0x34, 0x56, 0x78}, + formatHint: "", + expected: "305419896", // converted to decimal + shouldError: false, + }, + { + name: "int64 value", + value: int64(12345), + formatHint: "", + expected: "12345", + shouldError: false, + }, + { + name: "uint64 value", + value: uint64(12345), + formatHint: "", + expected: "12345", + shouldError: false, + }, + { + name: "unsupported type", + value: float64(123.45), + formatHint: "", + expected: "", + shouldError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := convertToNumeric(tt.value, tt.formatHint) + + if tt.shouldError { + assert.Error(t, err, "convertToNumeric() should error") + return + } + + assert.NoError(t, err, "convertToNumeric() should not error") + assert.Equal(t, tt.expected, result, "convertToNumeric() result") + }) + } +} + +func TestConvertToAddress(t *testing.T) { + tests := []struct { + name string + value interface{} + expected string + shouldError bool + }{ + { + name: "valid address with 0x prefix", + value: "0x742d35cc6636C0532925a3b8D0A3e5A5F2d5De8e", + expected: "'0x742d35cc6636C0532925a3b8D0A3e5A5F2d5De8e'", + shouldError: false, + }, + { + name: "valid address without 0x prefix", + value: "742d35cc6636C0532925a3b8D0A3e5A5F2d5De8e", + expected: "'0x742d35cc6636C0532925a3b8D0A3e5A5F2d5De8e'", + shouldError: false, + }, + { + name: "20-byte array", + value: []byte{0x74, 0x2d, 0x35, 0xcc, 0x66, 0x36, 0xc0, 0x53, 0x29, 0x25, 0xa3, 0xb8, 0xd0, 0xa3, 0xe5, 0xa5, 0xf2, 0xd5, 0xde, 0x8e}, + expected: "'0x742d35cc6636c0532925a3b8d0a3e5a5f2d5de8e'", + shouldError: false, + }, + { + name: "invalid address length", + value: "0x742d35cc6636C0532925a3b8D0A3e5A5F2d5De", + expected: "", + shouldError: true, + }, + { + name: "invalid byte array length", + value: []byte{0x74, 0x2d, 0x35}, + expected: "", + shouldError: true, + }, + { + name: "unsupported type", + value: 123, + expected: "", + shouldError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := convertToAddress(tt.value) + + if tt.shouldError { + assert.Error(t, err, "convertToAddress() should error") + return + } + + assert.NoError(t, err, "convertToAddress() should not error") + assert.Equal(t, tt.expected, result, "convertToAddress() result") + }) + } +} + +func TestConvertSemanticValue(t *testing.T) { + tests := []struct { + name string + semanticType sql2.SemanticType + value interface{} + formatHint string + shouldError bool + }{ + { + name: "uint256 conversion", + semanticType: sql2.SemanticUint256, + value: "0x123456789", + formatHint: "hex", + shouldError: false, + }, + { + name: "address conversion", + semanticType: sql2.SemanticAddress, + value: "0x742d35cc6636C0532925a3b8D0A3e5A5F2d5De8e", + formatHint: "", + shouldError: false, + }, + { + name: "jsonb conversion", + semanticType: sql2.SemanticJSON, + value: `{"key": "value"}`, + formatHint: "", + shouldError: false, + }, + { + name: "uuid conversion", + semanticType: sql2.SemanticUUID, + value: "550e8400-e29b-41d4-a716-446655440000", + formatHint: "", + shouldError: false, + }, + { + name: "unix timestamp conversion", + semanticType: sql2.SemanticUnixTimestamp, + value: int64(1640995200), + formatHint: "", + shouldError: false, + }, + { + name: "fallback to default conversion", + semanticType: sql2.SemanticType("unknown"), + value: "test", + formatHint: "", + shouldError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := ConvertSemanticValue(tt.semanticType, tt.value, tt.formatHint) + + if tt.shouldError { + assert.Error(t, err, "ConvertSemanticValue() should error") + return + } + + assert.NoError(t, err, "ConvertSemanticValue() should not error") + assert.NotEmpty(t, result, "ConvertSemanticValue() should return non-empty result") + }) + } +} \ No newline at end of file diff --git a/db_proto/sql/risingwave/accumulator_inserter.go b/db_proto/sql/risingwave/accumulator_inserter.go new file mode 100644 index 0000000..b156fd9 --- /dev/null +++ b/db_proto/sql/risingwave/accumulator_inserter.go @@ -0,0 +1,147 @@ +package risingwave + +import ( + "database/sql" + "fmt" + "strings" + + sql2 "github.com/streamingfast/substreams-sink-sql/db_proto/sql" + "github.com/streamingfast/substreams-sink-sql/db_proto/sql/schema" + "go.uber.org/zap" +) + +type accumulator struct { + query string + rowValues [][]string +} + +type AccumulatorInserter struct { + accumulators map[string]*accumulator + cursorStmt *sql.Stmt + logger *zap.Logger +} + +func NewAccumulatorInserter(logger *zap.Logger) (*AccumulatorInserter, error) { + logger = logger.Named("risingwave inserter") + + return &AccumulatorInserter{ + logger: logger, + }, nil +} + +func (i *AccumulatorInserter) init(database *Database) error { + tables := database.dialect.GetTables() + accumulators := map[string]*accumulator{} + + for _, table := range tables { + query, err := createInsertFromDescriptorAcc(table, database.dialect) + if err != nil { + return fmt.Errorf("creating insert from descriptor for table %q: %w", table.Name, err) + } + accumulators[table.Name] = &accumulator{ + query: query, + } + } + accumulators["_blocks_"] = &accumulator{ + query: fmt.Sprintf("INSERT INTO %s (number, hash, timestamp) VALUES ", tableName(database.schema.Name, "_blocks_")), + } + + // RisingWave doesn't support PostgreSQL's ON CONFLICT syntax + // We'll use a simple INSERT for the cursor, relying on the table to handle conflicts + // The _cursor_ table should be created with appropriate ON CONFLICT behavior + cursorQuery := fmt.Sprintf("INSERT INTO %s (name, cursor) VALUES ($1, $2)", tableName(database.schema.Name, "_cursor_")) + cs, err := database.db.Prepare(cursorQuery) + if err != nil { + return fmt.Errorf("preparing statement %q: %w", cursorQuery, err) + } + + i.accumulators = accumulators + i.cursorStmt = cs + + return nil +} + +func createInsertFromDescriptorAcc(table *schema.Table, dialect sql2.Dialect) (string, error) { + tableName := dialect.FullTableName(table) + fields := table.Columns + + var fieldNames []string + + // Add standard block metadata columns + fieldNames = append(fieldNames, "block_number") + fieldNames = append(fieldNames, "block_timestamp") + + if pk := table.PrimaryKey; pk != nil { + fieldNames = append(fieldNames, pk.Name) + } + + if table.ChildOf != nil { + fieldNames = append(fieldNames, table.ChildOf.ParentTableField) + } + + for _, field := range fields { + if table.PrimaryKey != nil && field.Name == table.PrimaryKey.Name { + continue + } + + if field.IsRepeated || field.IsExtension { + continue + } + fieldNames = append(fieldNames, field.QuotedName()) + } + + return fmt.Sprintf("INSERT INTO %s (%s) VALUES ", + tableName, + strings.Join(fieldNames, ", "), + ), nil +} + +func (i *AccumulatorInserter) insert(table string, values []any, database *Database) error { + var v []string + if table == "_cursor_" { + stmt := database.wrapInsertStatement(i.cursorStmt) + _, err := stmt.Exec(values...) + if err != nil { + return fmt.Errorf("executing insert: %w", err) + } + return nil + } + for _, value := range values { + v = append(v, ValueToString(value)) + } + accumulator := i.accumulators[table] + if accumulator == nil { + return fmt.Errorf("accumulator not found for table %q", table) + } + accumulator.rowValues = append(accumulator.rowValues, v) + + return nil +} + +func (i *AccumulatorInserter) flush(database *Database) error { + for _, acc := range i.accumulators { + if len(acc.rowValues) == 0 { + continue + } + var b strings.Builder + b.WriteString(acc.query) + for _, values := range acc.rowValues { + b.WriteString("(") + b.WriteString(strings.Join(values, ",")) + b.WriteString("),") + } + insert := strings.Trim(b.String(), ",") + + _, err := database.execSql(insert) + if err != nil { + shortInsert := insert + if len(insert) > 256 { + shortInsert = insert[:256] + "..." + } + return fmt.Errorf("risingwave accumulator inserter: executing insert %s: %w", shortInsert, err) + } + acc.rowValues = acc.rowValues[:0] + } + + return nil +} diff --git a/db_proto/sql/risingwave/database.go b/db_proto/sql/risingwave/database.go new file mode 100644 index 0000000..5727434 --- /dev/null +++ b/db_proto/sql/risingwave/database.go @@ -0,0 +1,452 @@ +package risingwave + +import ( + "context" + pqsql "database/sql" + "fmt" + "hash/fnv" + "time" + + "github.com/jhump/protoreflect/desc" + "github.com/jhump/protoreflect/dynamic" + sink "github.com/streamingfast/substreams-sink" + "github.com/streamingfast/substreams-sink-sql/db_changes/db" + "github.com/streamingfast/substreams-sink-sql/db_proto/sql" + "github.com/streamingfast/substreams-sink-sql/db_proto/sql/schema" + "go.uber.org/zap" +) + +// Database represents a RisingWave database connection. +// Important: RisingWave does not support read-write transactions and operates in autocommit mode. +// This means: +// - All data modifications are immediately committed +// - No rollback capability for individual operations +// - ACID transaction semantics are not available +// - Suitable for streaming/append-only workloads +type Database struct { + *sql.BaseDatabase + db *pqsql.DB + tx *pqsql.Tx // Always nil for RisingWave due to autocommit mode + schema *schema.Schema + logger *zap.Logger + dialect *DialectRisingwave + inserter pgInserter + flusher pgFlusher + useConstraints bool +} + +func NewDatabase(schema *schema.Schema, dsn *db.DSN, moduleOutputType string, rootMessageDescriptor *desc.MessageDescriptor, useProtoOptions bool, useConstraints bool, logger *zap.Logger) (*Database, error) { + logger = logger.Named("risingwave") + + connectionString := dsn.ConnString() + logger.Info("connecting to db", zap.String("dsn", connectionString)) + logger.Info("RisingWave operates in autocommit mode - no transaction semantics available") + sqlDB, err := pqsql.Open(dsn.SqlDriver(), connectionString) + if err != nil { + return nil, fmt.Errorf("open db connection: %w", err) + } + + if reachable, err := isDatabaseReachable(sqlDB); !reachable { + return nil, fmt.Errorf("database not reachable: %w", err) + } + + dialect, err := NewDialectRisingwave(schema.Name, schema.TableRegistry, logger) + if err != nil { + return nil, fmt.Errorf("creating risingwave dialect: %w", err) + } + + baseDB, err := sql.NewBaseDatabase(moduleOutputType, rootMessageDescriptor, useProtoOptions, logger) + if err != nil { + return nil, fmt.Errorf("failed to create base database: %w", err) + } + database := &Database{ + db: sqlDB, + schema: schema, + useConstraints: useConstraints, + BaseDatabase: baseDB, + dialect: dialect, + logger: logger, + } + + return database, nil +} + +func (d *Database) Open() error { + if d.useConstraints { + inserter, err := NewRowInserter(d.logger) + if err != nil { + return fmt.Errorf("creating row inserter: %w", err) + } + if err := inserter.init(d); err != nil { + return fmt.Errorf("initializing row inserter: %w", err) + } + d.inserter = inserter + d.flusher = inserter + } else { + inserter, err := NewAccumulatorInserter(d.logger) + if err != nil { + return fmt.Errorf("creating accumulator inserter: %w", err) + } + if err := inserter.init(d); err != nil { + return fmt.Errorf("initializing accumulator inserter: %w", err) + } + d.inserter = inserter + d.flusher = inserter + } + return nil +} + +func (d *Database) GetDialect() sql.Dialect { + return d.dialect +} + +func (d *Database) CreateDatabase(useConstraints bool) error { + err := d.createDatabase() + if err != nil { + return fmt.Errorf("creating database: %w", err) + } + + if useConstraints { + err = d.applyConstraints() + if err != nil { + return fmt.Errorf("applying constraints: %w", err) + } + } + + return nil +} + +func (d *Database) createDatabase() error { + staticSql := fmt.Sprintf(risingwaveStaticSql, d.schema.Name, d.schema.Name, d.schema.Name, d.schema.Name) + _, err := d.execSql(staticSql) + if err != nil { + return fmt.Errorf("executing static staticSql: %w\n%s", err, staticSql) + } + + for _, statement := range d.dialect.CreateTableSql { + d.logger.Info("executing create statement", zap.String("sql", statement)) + _, err := d.execSql(statement) + if err != nil { + return fmt.Errorf("executing create statement: %w %s", err, statement) + } + } + return nil +} + +func (d *Database) applyConstraints() error { + startAt := time.Now() + for _, constraint := range d.dialect.PrimaryKeySql { + d.logger.Info("executing pk statement", zap.String("sql", constraint.Sql)) + _, err := d.execSql(constraint.Sql) + if err != nil { + return fmt.Errorf("executing pk statement: %w %s", err, constraint.Sql) + } + } + for _, constraint := range d.dialect.UniqueConstraintSql { + d.logger.Info("executing unique statement", zap.String("sql", constraint.Sql)) + _, err := d.execSql(constraint.Sql) + if err != nil { + return fmt.Errorf("executing unique statement: %w %s", err, constraint.Sql) + } + } + for _, constraint := range d.dialect.ForeignKeySql { + d.logger.Info("executing fk constraint statement", zap.String("sql", constraint.Sql)) + _, err := d.execSql(constraint.Sql) + if err != nil { + return fmt.Errorf("executing fk constraint statement: %w %s", err, constraint.Sql) + } + } + d.logger.Info("applying constraints", zap.Duration("duration", time.Since(startAt))) + return nil +} + +func (d *Database) BeginTransaction() (err error) { + // RisingWave does not support read-write transactions. According to RisingWave docs: + // "The BEGIN command starts the read-write transaction mode, which is not supported yet in RisingWave. + // For compatibility reasons, this command will still succeed but no transaction is actually started." + // + // Since no actual transaction is started, we operate in autocommit mode and set tx to nil + // to ensure all subsequent operations use the database connection directly. + d.logger.Debug("RisingWave: skipping transaction begin, using autocommit mode") + d.tx = nil + return nil +} + +func (d *Database) CommitTransaction() (err error) { + // RisingWave operates in autocommit mode since read-write transactions are not supported. + // All changes are automatically committed when executed. + d.logger.Debug("RisingWave: commit is no-op in autocommit mode") + + // Defensive check: if somehow a transaction was started (shouldn't happen), commit it + if d.tx != nil { + d.logger.Warn("RisingWave: unexpected transaction found during commit, attempting to commit") + err = d.tx.Commit() + if err != nil { + return fmt.Errorf("committing unexpected transaction: %w", err) + } + d.tx = nil + } + return nil +} + +func (d *Database) RollbackTransaction() { + // RisingWave operates in autocommit mode and does not support traditional rollback. + // In streaming databases, data modifications are typically append-only. + // ROLLBACK documentation was not found for RisingWave, suggesting it may not be supported. + d.logger.Debug("RisingWave: rollback is no-op in autocommit mode") + + // Defensive check: if somehow a transaction was started (shouldn't happen), attempt rollback + if d.tx != nil { + d.logger.Warn("RisingWave: unexpected transaction found during rollback, attempting to rollback") + err := d.tx.Rollback() + if err != nil { + // Log error but don't panic since RisingWave may not support rollback + d.logger.Error("RisingWave: rollback failed on unexpected transaction", zap.Error(err)) + } + d.tx = nil + } +} + +func (d *Database) wrapInsertStatement(stmt *pqsql.Stmt) *pqsql.Stmt { + // RisingWave operates in autocommit mode, always use the original statement + // since d.tx should always be nil for RisingWave + if d.tx != nil { + // This should not happen for RisingWave, but handle defensively + d.logger.Warn("RisingWave: unexpected transaction found when wrapping statement") + stmt = d.tx.Stmt(stmt) + } + return stmt +} + +// execSql executes SQL using the database connection in autocommit mode +// RisingWave operates in autocommit mode, so we always use the direct database connection +func (d *Database) execSql(query string, args ...any) (pqsql.Result, error) { + if d.tx != nil { + // This should not happen for RisingWave since we never create transactions + d.logger.Warn("RisingWave: unexpected transaction found during SQL execution, using transaction") + return d.tx.Exec(query, args...) + } + return d.db.Exec(query, args...) +} + +func (d *Database) Insert(table string, values []any) error { + return d.inserter.insert(table, values, d) +} + +func (d *Database) WalkMessageDescriptorAndInsert(dm *dynamic.Message, blockNum uint64, blockTimestamp time.Time, parent *sql.Parent) (time.Duration, error) { + return d.BaseDatabase.WalkMessageDescriptorAndInsertWithDialect(dm, blockNum, blockTimestamp, parent, d.dialect, d) +} + +func (d *Database) InsertBlock(blockNum uint64, hash string, timestamp time.Time) error { + d.logger.Debug("inserting _blocks_", zap.Uint64("block_num", blockNum), zap.String("block_hash", hash)) + err := d.inserter.insert("_blocks_", []any{blockNum, hash, timestamp}, d) + if err != nil { + return fmt.Errorf("inserting block %d: %w", blockNum, err) + } + + return nil +} + +func (d *Database) Flush() (time.Duration, error) { + startFlush := time.Now() + err := d.flusher.flush(d) + if err != nil { + return 0, fmt.Errorf("flushing: %w", err) + } + return time.Since(startFlush), nil +} + +func (d *Database) FetchSinkInfo(schemaName string) (*sql.SinkInfo, error) { + query := fmt.Sprintf("SELECT EXISTS (SELECT 1 FROM information_schema.tables WHERE table_schema = '%s' AND table_name = '_sink_info_')", schemaName) + + var exist bool + err := d.db.QueryRow(query).Scan(&exist) + if err != nil { + return nil, fmt.Errorf("checking if sync_info table exists: %w", err) + } + if !exist { + return nil, nil + } + + out := &sql.SinkInfo{} + + err = d.db.QueryRow(fmt.Sprintf("SELECT schema_hash FROM %s._sink_info_", d.schema.Name)).Scan(&out.SchemaHash) + if err != nil { + return nil, fmt.Errorf("fetching sync info: %w", err) + } + return out, nil + +} + +func (d *Database) StoreSinkInfo(schemaName string, schemaHash string) error { + _, err := d.execSql(fmt.Sprintf("INSERT INTO %s._sink_info_ (schema_hash) VALUES ($1)", schemaName), schemaHash) + if err != nil { + return fmt.Errorf("storing schema hash: %w", err) + } + return nil +} + +func (d *Database) UpdateSinkInfoHash(schemaName string, newHash string) error { + _, err := d.execSql(fmt.Sprintf("UPDATE %s._sink_info_ SET schema_hash = $1", schemaName), newHash) + if err != nil { + return fmt.Errorf("updating schema hash: %w", err) + } + return nil +} + +func (d *Database) FetchCursor() (*sink.Cursor, error) { + query := fmt.Sprintf("SELECT cursor FROM %s WHERE name = $1", tableName(d.schema.Name, "_cursor_")) + + rows, err := d.db.Query(query, "cursor") + if err != nil { + return nil, fmt.Errorf("selecting cursor: %w", err) + } + defer rows.Close() + + if rows.Next() { + var cursor string + err = rows.Scan(&cursor) + + return sink.NewCursor(cursor) + } + return nil, nil +} + +func (d *Database) StoreCursor(cursor *sink.Cursor) error { + err := d.inserter.insert("_cursor_", []any{"cursor", cursor.String()}, d) + if err != nil { + return fmt.Errorf("inserting cursor: %w", err) + } + + return err +} + +func (d *Database) HandleBlocksUndo(lastValidBlockNum uint64) (err error) { + // RisingWave operates in autocommit mode - execute operations directly without transactions + d.logger.Info("undoing blocks", zap.Uint64("last_valid_block_num", lastValidBlockNum)) + + query := fmt.Sprintf(`DELETE FROM %s._blocks_ WHERE "number" > $1`, d.schema.Name) + result, err := d.execSql(query, lastValidBlockNum) + if err != nil { + return fmt.Errorf("deleting block from %d: %w", lastValidBlockNum, err) + } + rowsAffected, err := result.RowsAffected() + if err != nil { + return fmt.Errorf("fetching rows affected: %w", err) + } + d.logger.Info("undo completed", zap.Int64("row_affected", rowsAffected)) + + return nil +} + +func (d *Database) Clone() sql.Database { + base := d.BaseClone() + d.BaseDatabase = base + return d +} + +func (d *Database) DatabaseHash(schemaName string) (uint64, error) { + query := ` +SELECT + c.table_name, + c.column_name, + c.is_nullable, + c.data_type, + c.character_maximum_length, + c.numeric_precision, + c.numeric_precision_radix, + c.numeric_scale, + c.datetime_precision, + c.interval_precision, + c.is_generated, + c.is_updatable, + tc.constraint_name, + tc.table_name, + tc.constraint_type, + kcu.column_name, + kcu.table_name, + kcu.column_name, + ccu.constraint_name, + ccu.table_name, + ccu.column_name +FROM + information_schema.columns c + LEFT JOIN + information_schema.constraint_column_usage ccu + ON c.table_name = ccu.table_name + AND c.column_name = ccu.column_name + AND c.table_schema = ccu.table_schema + LEFT JOIN + information_schema.key_column_usage kcu + ON ccu.constraint_name = kcu.constraint_name + AND c.table_schema = kcu.table_schema + LEFT JOIN + information_schema.table_constraints tc + ON kcu.constraint_name = tc.constraint_name + AND kcu.table_schema = tc.table_schema +WHERE + c.table_schema = '%s' +ORDER BY + c.table_name, + c.column_name, + tc.table_name, + tc.constraint_name, + kcu.table_name, + kcu.column_name, + kcu.constraint_name; +` + + query = fmt.Sprintf(query, schemaName) + + rows, err := d.db.Query(query) + if err != nil { + return 0, fmt.Errorf("executing query to compute schema hash: %w", err) + } + defer rows.Close() + + h := fnv.New64a() + columns, err := rows.Columns() + if err != nil { + return 0, fmt.Errorf("fetching columns for hashing: %w", err) + } + + values := make([]interface{}, len(columns)) + valuePtrs := make([]interface{}, len(columns)) + for i := range values { + valuePtrs[i] = &values[i] + } + + for rows.Next() { + err = rows.Scan(valuePtrs...) + if err != nil { + return 0, fmt.Errorf("scanning row for hashing: %w", err) + } + + for _, val := range values { + var str string + if val != nil { + str = fmt.Sprintf("%v", val) + } + _, err = h.Write([]byte(str)) + if err != nil { + return 0, fmt.Errorf("hashing value %q: %w", str, err) + } + } + } + + if err = rows.Err(); err != nil { + return 0, fmt.Errorf("iterating rows: %w", err) + } + + return h.Sum64(), nil +} + +func isDatabaseReachable(db *pqsql.DB) (bool, error) { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + err := db.PingContext(ctx) + if err != nil { + return false, err + } + return true, nil +} \ No newline at end of file diff --git a/db_proto/sql/risingwave/database_test.go b/db_proto/sql/risingwave/database_test.go new file mode 100644 index 0000000..583ff25 --- /dev/null +++ b/db_proto/sql/risingwave/database_test.go @@ -0,0 +1,54 @@ +package risingwave + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +// Test the tableName helper function that combines schema and table names +func TestTableName(t *testing.T) { + tests := []struct { + schema string + table string + expected string + }{ + {"public", "users", "public.users"}, + {"test_schema", "test_table", "test_schema.test_table"}, + {"my_schema", "_blocks_", "my_schema._blocks_"}, + {"", "table", ".table"}, // Edge case: empty schema + } + + for _, test := range tests { + t.Run(test.expected, func(t *testing.T) { + result := tableName(test.schema, test.table) + assert.Equal(t, test.expected, result) + }) + } +} + +// Test value conversion for RisingWave-specific handling +func TestValueConversion(t *testing.T) { + tests := []struct { + name string + input interface{} + expected string + }{ + {"string", "hello", "'hello'"}, + {"string with quotes", "it's", "'it''s'"}, + {"int64", int64(123), "123"}, + {"uint64", uint64(456), "456"}, + {"bool true", true, "true"}, + {"bool false", false, "false"}, + {"time", time.Date(2023, 1, 1, 0, 0, 0, 0, time.UTC), "'2023-01-01T00:00:00Z'"}, + {"bytes", []byte{0xDE, 0xAD}, "'\\xDEAD'"}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + result := ValueToString(test.input) + assert.Equal(t, test.expected, result) + }) + } +} \ No newline at end of file diff --git a/db_proto/sql/risingwave/dialect.go b/db_proto/sql/risingwave/dialect.go new file mode 100644 index 0000000..fc04beb --- /dev/null +++ b/db_proto/sql/risingwave/dialect.go @@ -0,0 +1,292 @@ +package risingwave + +import ( + "database/sql" + "encoding/hex" + "fmt" + "hash/fnv" + "sort" + "strings" + + sql2 "github.com/streamingfast/substreams-sink-sql/db_proto/sql" + "github.com/streamingfast/substreams-sink-sql/db_proto/sql/schema" + "go.uber.org/zap" +) + +const risingwaveStaticSql = ` + CREATE SCHEMA IF NOT EXISTS "%s"; + + CREATE TABLE IF NOT EXISTS "%s"._sink_info_ ( + schema_hash VARCHAR PRIMARY KEY + ); + + CREATE TABLE IF NOT EXISTS "%s"._cursor_ ( + name VARCHAR PRIMARY KEY, + cursor VARCHAR NOT NULL + ) ON CONFLICT OVERWRITE; + + CREATE TABLE IF NOT EXISTS "%s"._blocks_ ( + number INTEGER PRIMARY KEY, + hash VARCHAR NOT NULL, + timestamp TIMESTAMP WITH TIME ZONE NOT NULL + ); +` + +type DialectRisingwave struct { + *sql2.BaseDialect + schemaName string +} + +func NewDialectRisingwave(schemaName string, tableRegistry map[string]*schema.Table, logger *zap.Logger) (*DialectRisingwave, error) { + d := &DialectRisingwave{ + BaseDialect: sql2.NewBaseDialect(tableRegistry, logger), + schemaName: schemaName, + } + + err := d.init() + if err != nil { + return nil, fmt.Errorf("initializing dialect: %w", err) + } + + for _, table := range tableRegistry { + err := d.createTable(table) + if err != nil { + return nil, fmt.Errorf("handling table %q: %w", table.Name, err) + } + } + + return d, nil +} + +func (d *DialectRisingwave) UseVersionField() bool { + return false +} + +func (d *DialectRisingwave) UseDeletedField() bool { + return false +} + +func (d *DialectRisingwave) init() error { + return nil +} + +func (d *DialectRisingwave) createTable(table *schema.Table) error { + var sb strings.Builder + addedColumns := make(map[string]struct{}) + + tableName := d.FullTableName(table) + + sb.WriteString(fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s (", tableName)) + + // Add primary key if it exists + var primaryKeyFieldName string + if table.PrimaryKey != nil { + pk := table.PrimaryKey + primaryKeyFieldName = pk.Name + sb.WriteString(fmt.Sprintf("%s %s PRIMARY KEY,", pk.Name, MapFieldType(pk.FieldDescriptor))) + addedColumns[pk.Name] = struct{}{} + } + + // Always add block metadata columns + sb.WriteString(" block_number INTEGER NOT NULL,") + sb.WriteString(" block_timestamp TIMESTAMP WITH TIME ZONE NOT NULL,") + addedColumns["block_number"] = struct{}{} + addedColumns["block_timestamp"] = struct{}{} + + // Add parent key for child tables + var parentKeyColumns []string + if table.ChildOf != nil { + parentTable, parentFound := d.TableRegistry[table.ChildOf.ParentTable] + if !parentFound { + return fmt.Errorf("parent table %q not found", table.ChildOf.ParentTable) + } + fieldFound := false + for _, parentField := range parentTable.Columns { + if parentField.Name == table.ChildOf.ParentTableField { + if _, exists := addedColumns[parentField.Name]; !exists { + sb.WriteString(fmt.Sprintf("%s %s NOT NULL,", parentField.Name, MapFieldType(parentField.FieldDescriptor))) + addedColumns[parentField.Name] = struct{}{} + parentKeyColumns = append(parentKeyColumns, parentField.Name) + } + fieldFound = true + break + } + } + if !fieldFound { + return fmt.Errorf("field %q not found in table %q", table.ChildOf.ParentTableField, table.ChildOf.ParentTable) + } + } + + // Add all regular columns from the protobuf message + for _, f := range table.Columns { + // Skip if already added + if _, exists := addedColumns[f.Name]; exists { + continue + } + + // Skip primary key (already handled above) + if f.Name == primaryKeyFieldName { + continue + } + + fieldQuotedName := f.QuotedName() + + // Skip repeated fields (not supported in SQL) + if f.IsRepeated { + continue + } + + // Skip message fields that don't map to simple columns + if f.IsMessage && !IsWellKnownType(f.FieldDescriptor) { + continue + } + + // Handle foreign key fields (but don't add constraints since RisingWave doesn't support them) + if f.ForeignKey != nil { + foreignTable, found := d.TableRegistry[f.ForeignKey.Table] + if !found { + return fmt.Errorf("foreign table %q not found", f.ForeignKey.Table) + } + + var foreignField *schema.Column + for _, field := range foreignTable.Columns { + if field.Name == f.ForeignKey.TableField { + foreignField = field + break + } + } + if foreignField == nil { + return fmt.Errorf("foreign field %q not found in table %q", f.ForeignKey.TableField, f.ForeignKey.Table) + } + } + + // Determine field type + fieldType := MapFieldType(f.FieldDescriptor) + if f.IsUnique { + fieldType = fieldType + " UNIQUE" + } + + // Add the column + sb.WriteString(fmt.Sprintf("%s %s", fieldQuotedName, fieldType)) + sb.WriteString(",") + addedColumns[f.Name] = struct{}{} + } + + // Add composite primary key if no explicit primary key exists + if table.PrimaryKey == nil { + // Remove the last comma before adding primary key constraint + temp := sb.String() + temp = temp[:len(temp)-1] + sb = strings.Builder{} + sb.WriteString(temp) + + // Build composite primary key: always include block_number, then parent keys if any + var pkColumns []string + pkColumns = append(pkColumns, "block_number") + pkColumns = append(pkColumns, parentKeyColumns...) + + // Create the primary key constraint + sb.WriteString(fmt.Sprintf(", PRIMARY KEY (%s)", strings.Join(pkColumns, ", "))) + } else { + // Remove the last comma for tables with explicit primary key + temp := sb.String() + temp = temp[:len(temp)-1] + sb = strings.Builder{} + sb.WriteString(temp) + } + + sb.WriteString("\n);\n") + + d.AddCreateTableSql(table.Name, sb.String()) + + return nil +} + +func (d *DialectRisingwave) CreateDatabase(tx *sql.Tx) error { + staticSql := fmt.Sprintf(risingwaveStaticSql, d.schemaName, d.schemaName, d.schemaName, d.schemaName) + _, err := tx.Exec(staticSql) + if err != nil { + return fmt.Errorf("executing static staticSql: %w\n%s", err, staticSql) + } + + for _, statement := range d.CreateTableSql { + d.Logger.Info("executing create statement", zap.String("sql", statement)) + _, err := tx.Exec(statement) + if err != nil { + return fmt.Errorf("executing create statement: %w %s", err, statement) + } + } + return nil +} + +func (d *DialectRisingwave) FullTableName(table *schema.Table) string { + return tableName(d.schemaName, table.Name) +} + +// todo: move to postgress database +func (d *DialectRisingwave) SchemaHash() string { + h := fnv.New64a() + + var buf []byte + + // SchemaHash tableCreateStatements + var sqls []string + for _, sql := range d.CreateTableSql { + sqls = append(sqls, sql) + //buf = append(buf, []byte(sql)...) + } + + sort.Strings(sqls) + for _, sql := range sqls { + buf = append(buf, []byte(sql)...) + } + + var pk []string + for _, constraint := range d.PrimaryKeySql { + pk = append(pk, constraint.Sql) + } + sort.Strings(pk) + for _, constraint := range pk { + buf = append(buf, []byte(constraint)...) + } + + var fk []string + for _, constraint := range d.ForeignKeySql { + fk = append(fk, constraint.Sql) + } + sort.Strings(fk) + for _, constraint := range fk { + buf = append(buf, []byte(constraint)...) + } + + var uniques []string + for _, constraint := range d.UniqueConstraintSql { + uniques = append(uniques, constraint.Sql) + } + sort.Strings(uniques) + for _, constraint := range uniques { + buf = append(buf, []byte(constraint)...) + } + + //todo: hum... is this useful? + //var accumulators []string + //for _, sql := range d.InsertSql { + // accumulators = append(accumulators, sql) + //} + //sort.Strings(accumulators) + //for _, sql := range accumulators { + // buf = append(buf, []byte(sql)...) + //} + + _, err := h.Write(buf) + if err != nil { + panic("unable to write to hash") + } + + data := h.Sum(nil) + return hex.EncodeToString(data) +} + +func tableName(schemaName string, tableName string) string { + return fmt.Sprintf("%s.%s", schemaName, tableName) +} diff --git a/db_proto/sql/risingwave/dialect_test.go b/db_proto/sql/risingwave/dialect_test.go new file mode 100644 index 0000000..5923211 --- /dev/null +++ b/db_proto/sql/risingwave/dialect_test.go @@ -0,0 +1,793 @@ +package risingwave + +import ( + "strings" + "testing" + + "github.com/golang/protobuf/protoc-gen-go/descriptor" + "github.com/jhump/protoreflect/desc" + "github.com/streamingfast/substreams-sink-sql/db_proto/sql/schema" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" +) + +func TestDialectRisingwave_UseVersionField(t *testing.T) { + d := &DialectRisingwave{} + assert.False(t, d.UseVersionField()) +} + +func TestDialectRisingwave_UseDeletedField(t *testing.T) { + d := &DialectRisingwave{} + assert.False(t, d.UseDeletedField()) +} + +func TestDialectRisingwave_FullTableName(t *testing.T) { + d := &DialectRisingwave{schemaName: "public"} + table := &schema.Table{Name: "users"} + + expected := "public.users" + actual := d.FullTableName(table) + + assert.Equal(t, expected, actual) +} + +func TestDialectRisingwave_SchemaHash(t *testing.T) { + logger := zap.NewNop() + + // Create two identical dialects + d1, err := NewDialectRisingwave("test_schema", map[string]*schema.Table{}, logger) + require.NoError(t, err) + + d2, err := NewDialectRisingwave("test_schema", map[string]*schema.Table{}, logger) + require.NoError(t, err) + + // Their schema hashes should be identical + assert.Equal(t, d1.SchemaHash(), d2.SchemaHash()) + + // With empty table registry, hash should be consistent + // Note: Schema name doesn't affect hash, only table structures do + assert.NotEmpty(t, d1.SchemaHash(), "Schema hash should not be empty") +} + +func TestDialectRisingwave_Init(t *testing.T) { + logger := zap.NewNop() + d, err := NewDialectRisingwave("test_schema", map[string]*schema.Table{}, logger) + require.NoError(t, err) + + // RisingWave dialect uses inline constraints, so PrimaryKeySql should be empty + // Primary keys are defined directly in CREATE TABLE statements + assert.Equal(t, 0, len(d.PrimaryKeySql), "RisingWave should not use ALTER TABLE for primary keys") + + // With empty table registry, no user-defined CREATE TABLE statements should be generated + // System tables (_blocks_, _cursor_, etc.) are handled via static SQL in CreateDatabase method + assert.Equal(t, 0, len(d.CreateTableSql), "No CREATE TABLE statements should be generated with empty table registry") + + // Verify that static SQL is properly formatted and contains system tables + assert.NotEmpty(t, risingwaveStaticSql, "Static SQL should not be empty") + assert.Contains(t, risingwaveStaticSql, "_blocks_", "Static SQL should contain _blocks_ table") + assert.Contains(t, risingwaveStaticSql, "_cursor_", "Static SQL should contain _cursor_ table") + assert.Contains(t, risingwaveStaticSql, "_sink_info_", "Static SQL should contain _sink_info_ table") +} + +func TestDialectRisingwave_CreateTableStaticSql(t *testing.T) { + // Test that the static SQL contains expected RisingWave-specific elements + sql := strings.ToLower(risingwaveStaticSql) + + // Check schema creation + assert.Contains(t, sql, "create schema if not exists") + + // Check _sink_info_ table + assert.Contains(t, sql, "_sink_info_") + assert.Contains(t, sql, "schema_hash varchar primary key") + + // Check _cursor_ table + assert.Contains(t, sql, "_cursor_") + assert.Contains(t, sql, "name varchar primary key") + assert.Contains(t, sql, "cursor varchar not null") + assert.Contains(t, sql, "on conflict overwrite", "RisingWave should use ON CONFLICT OVERWRITE") + + // Check _blocks_ table + assert.Contains(t, sql, "_blocks_") + assert.Contains(t, sql, "number integer") + assert.Contains(t, sql, "hash varchar not null") + assert.Contains(t, sql, "timestamp timestamp with time zone not null") +} + +func TestDialectRisingwave_CreateTable_SimpleTable(t *testing.T) { + logger := zap.NewNop() + + // Create mock field descriptors + stringField := createMockFieldDescriptor("name", descriptor.FieldDescriptorProto_TYPE_STRING) + intField := createMockFieldDescriptor("age", descriptor.FieldDescriptorProto_TYPE_INT32) + + table := &schema.Table{ + Name: "users", + Columns: []*schema.Column{ + {Name: "name", FieldDescriptor: stringField}, + {Name: "age", FieldDescriptor: intField}, + }, + } + + tableRegistry := map[string]*schema.Table{"users": table} + d, err := NewDialectRisingwave("public", tableRegistry, logger) + require.NoError(t, err) + + // Should have one CREATE TABLE statement + assert.Equal(t, 1, len(d.CreateTableSql)) + + sql := d.CreateTableSql["users"] + assert.Contains(t, sql, "CREATE TABLE IF NOT EXISTS public.users") + assert.Contains(t, sql, "block_number INTEGER NOT NULL") + assert.Contains(t, sql, "block_timestamp TIMESTAMP WITH TIME ZONE NOT NULL") + assert.Contains(t, sql, `"name" CHARACTER VARYING`) + assert.Contains(t, sql, `"age" INTEGER`) + + // Should not contain any foreign key constraints + assert.NotContains(t, sql, "FOREIGN KEY") + assert.NotContains(t, sql, "REFERENCES") +} + +func TestDialectRisingwave_CreateTable_WithPrimaryKey(t *testing.T) { + logger := zap.NewNop() + + idField := createMockFieldDescriptor("id", descriptor.FieldDescriptorProto_TYPE_STRING) + nameField := createMockFieldDescriptor("name", descriptor.FieldDescriptorProto_TYPE_STRING) + + table := &schema.Table{ + Name: "users", + PrimaryKey: &schema.PrimaryKey{ + Name: "id", + FieldDescriptor: idField, + }, + Columns: []*schema.Column{ + {Name: "id", FieldDescriptor: idField, IsPrimaryKey: true}, + {Name: "name", FieldDescriptor: nameField}, + }, + } + + tableRegistry := map[string]*schema.Table{"users": table} + d, err := NewDialectRisingwave("public", tableRegistry, logger) + require.NoError(t, err) + + sql := d.CreateTableSql["users"] + assert.Contains(t, sql, "id CHARACTER VARYING PRIMARY KEY") + assert.Contains(t, sql, `"name" CHARACTER VARYING`) + + // Primary key should not be duplicated + assert.Equal(t, 1, strings.Count(sql, "id CHARACTER VARYING")) +} + +func TestDialectRisingwave_CreateTable_ChildTable(t *testing.T) { + logger := zap.NewNop() + + // Parent table + parentIdField := createMockFieldDescriptor("instruction_id", descriptor.FieldDescriptorProto_TYPE_STRING) + parentTable := &schema.Table{ + Name: "instructions", + PrimaryKey: &schema.PrimaryKey{ + Name: "instruction_id", + FieldDescriptor: parentIdField, + }, + Columns: []*schema.Column{ + {Name: "instruction_id", FieldDescriptor: parentIdField, IsPrimaryKey: true}, + }, + } + + // Child table + amountField := createMockFieldDescriptor("amount", descriptor.FieldDescriptorProto_TYPE_UINT64) + childTable := &schema.Table{ + Name: "mints", + ChildOf: &schema.ChildOf{ + ParentTable: "instructions", + ParentTableField: "instruction_id", + }, + Columns: []*schema.Column{ + {Name: "amount", FieldDescriptor: amountField}, + }, + } + + tableRegistry := map[string]*schema.Table{ + "instructions": parentTable, + "mints": childTable, + } + + d, err := NewDialectRisingwave("public", tableRegistry, logger) + require.NoError(t, err) + + sql := d.CreateTableSql["mints"] + assert.Contains(t, sql, "CREATE TABLE IF NOT EXISTS public.mints") + assert.Contains(t, sql, "block_number INTEGER NOT NULL") + assert.Contains(t, sql, "block_timestamp TIMESTAMP WITH TIME ZONE NOT NULL") + assert.Contains(t, sql, "instruction_id CHARACTER VARYING NOT NULL") + assert.Contains(t, sql, `"amount" NUMERIC`) + + // Should not contain foreign key constraints + assert.NotContains(t, sql, "FOREIGN KEY") +} + +func TestDialectRisingwave_CreateTable_WithUniqueConstraint(t *testing.T) { + logger := zap.NewNop() + + emailField := createMockFieldDescriptor("email", descriptor.FieldDescriptorProto_TYPE_STRING) + nameField := createMockFieldDescriptor("name", descriptor.FieldDescriptorProto_TYPE_STRING) + + table := &schema.Table{ + Name: "users", + Columns: []*schema.Column{ + {Name: "email", FieldDescriptor: emailField, IsUnique: true}, + {Name: "name", FieldDescriptor: nameField}, + }, + } + + tableRegistry := map[string]*schema.Table{"users": table} + d, err := NewDialectRisingwave("public", tableRegistry, logger) + require.NoError(t, err) + + sql := d.CreateTableSql["users"] + assert.Contains(t, sql, `"email" CHARACTER VARYING UNIQUE`) + assert.Contains(t, sql, `"name" CHARACTER VARYING`) +} + +func TestDialectRisingwave_CreateTable_SkipsRepeatedFields(t *testing.T) { + logger := zap.NewNop() + + tagsField := createMockFieldDescriptor("tags", descriptor.FieldDescriptorProto_TYPE_STRING) + nameField := createMockFieldDescriptor("name", descriptor.FieldDescriptorProto_TYPE_STRING) + + table := &schema.Table{ + Name: "users", + Columns: []*schema.Column{ + {Name: "tags", FieldDescriptor: tagsField, IsRepeated: true}, + {Name: "name", FieldDescriptor: nameField}, + }, + } + + tableRegistry := map[string]*schema.Table{"users": table} + d, err := NewDialectRisingwave("public", tableRegistry, logger) + require.NoError(t, err) + + sql := d.CreateTableSql["users"] + assert.NotContains(t, sql, "tags") + assert.Contains(t, sql, `"name" CHARACTER VARYING`) +} + +func TestDialectRisingwave_CreateTable_PreventsDuplicateColumns(t *testing.T) { + logger := zap.NewNop() + + // Create a scenario where a column might be added twice + idField := createMockFieldDescriptor("block_number", descriptor.FieldDescriptorProto_TYPE_INT32) + nameField := createMockFieldDescriptor("name", descriptor.FieldDescriptorProto_TYPE_STRING) + + table := &schema.Table{ + Name: "test_table", + Columns: []*schema.Column{ + {Name: "block_number", FieldDescriptor: idField}, // This should be skipped since block_number is added automatically + {Name: "name", FieldDescriptor: nameField}, + }, + } + + tableRegistry := map[string]*schema.Table{"test_table": table} + d, err := NewDialectRisingwave("public", tableRegistry, logger) + require.NoError(t, err) + + sql := d.CreateTableSql["test_table"] + + // block_number should appear only once + assert.Equal(t, 1, strings.Count(sql, "block_number")) + assert.Contains(t, sql, `"name" CHARACTER VARYING`) +} + +func TestDialectRisingwave_CreateInsertFromDescriptor_SimpleTable(t *testing.T) { + nameField := createMockFieldDescriptor("name", descriptor.FieldDescriptorProto_TYPE_STRING) + ageField := createMockFieldDescriptor("age", descriptor.FieldDescriptorProto_TYPE_INT32) + + table := &schema.Table{ + Name: "users", + Columns: []*schema.Column{ + {Name: "name", FieldDescriptor: nameField}, + {Name: "age", FieldDescriptor: ageField}, + }, + } + + d := &DialectRisingwave{schemaName: "public"} + + sql, err := createInsertFromDescriptor(table, d) + require.NoError(t, err) + + expected := `INSERT INTO public.users (block_number, block_timestamp, "name", "age") VALUES ($1, $2, $3, $4)` + assert.Equal(t, expected, sql) +} + +func TestDialectRisingwave_CreateInsertFromDescriptor_WithPrimaryKey(t *testing.T) { + idField := createMockFieldDescriptor("id", descriptor.FieldDescriptorProto_TYPE_STRING) + nameField := createMockFieldDescriptor("name", descriptor.FieldDescriptorProto_TYPE_STRING) + + table := &schema.Table{ + Name: "users", + PrimaryKey: &schema.PrimaryKey{ + Name: "id", + FieldDescriptor: idField, + }, + Columns: []*schema.Column{ + {Name: "id", FieldDescriptor: idField, IsPrimaryKey: true}, + {Name: "name", FieldDescriptor: nameField}, + }, + } + + d := &DialectRisingwave{schemaName: "public"} + + sql, err := createInsertFromDescriptor(table, d) + require.NoError(t, err) + + expected := `INSERT INTO public.users (block_number, block_timestamp, id, "name") VALUES ($1, $2, $3, $4)` + assert.Equal(t, expected, sql) +} + +func TestDialectRisingwave_CreateInsertFromDescriptor_ChildTable(t *testing.T) { + amountField := createMockFieldDescriptor("amount", descriptor.FieldDescriptorProto_TYPE_UINT64) + + table := &schema.Table{ + Name: "mints", + ChildOf: &schema.ChildOf{ + ParentTable: "instructions", + ParentTableField: "instruction_id", + }, + Columns: []*schema.Column{ + {Name: "amount", FieldDescriptor: amountField}, + }, + } + + d := &DialectRisingwave{schemaName: "public"} + + sql, err := createInsertFromDescriptor(table, d) + require.NoError(t, err) + + expected := `INSERT INTO public.mints (block_number, block_timestamp, instruction_id, "amount") VALUES ($1, $2, $3, $4)` + assert.Equal(t, expected, sql) +} + +func TestDialectRisingwave_CreateInsertFromDescriptorAcc_SimpleTable(t *testing.T) { + nameField := createMockFieldDescriptor("name", descriptor.FieldDescriptorProto_TYPE_STRING) + ageField := createMockFieldDescriptor("age", descriptor.FieldDescriptorProto_TYPE_INT32) + + table := &schema.Table{ + Name: "users", + Columns: []*schema.Column{ + {Name: "name", FieldDescriptor: nameField}, + {Name: "age", FieldDescriptor: ageField}, + }, + } + + d := &DialectRisingwave{schemaName: "public"} + + sql, err := createInsertFromDescriptorAcc(table, d) + require.NoError(t, err) + + expected := `INSERT INTO public.users (block_number, block_timestamp, "name", "age") VALUES ` + assert.Equal(t, expected, sql) +} + +func TestDialectRisingwave_CreateInsertFromDescriptorAcc_WithPrimaryKey(t *testing.T) { + idField := createMockFieldDescriptor("id", descriptor.FieldDescriptorProto_TYPE_STRING) + nameField := createMockFieldDescriptor("name", descriptor.FieldDescriptorProto_TYPE_STRING) + + table := &schema.Table{ + Name: "users", + PrimaryKey: &schema.PrimaryKey{ + Name: "id", + FieldDescriptor: idField, + }, + Columns: []*schema.Column{ + {Name: "id", FieldDescriptor: idField, IsPrimaryKey: true}, + {Name: "name", FieldDescriptor: nameField}, + }, + } + + d := &DialectRisingwave{schemaName: "public"} + + sql, err := createInsertFromDescriptorAcc(table, d) + require.NoError(t, err) + + expected := `INSERT INTO public.users (block_number, block_timestamp, id, "name") VALUES ` + assert.Equal(t, expected, sql) +} + +func TestDialectRisingwave_TableName(t *testing.T) { + tests := []struct { + schema string + table string + expected string + }{ + {"public", "users", "public.users"}, + {"test_schema", "orders", "test_schema.orders"}, + {"", "table", ".table"}, + } + + for _, tt := range tests { + t.Run(tt.schema+"_"+tt.table, func(t *testing.T) { + result := tableName(tt.schema, tt.table) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestDialectRisingwave_NoForeignKeyConstraints(t *testing.T) { + logger := zap.NewNop() + + // Create tables with foreign key relationships + userIdField := createMockFieldDescriptor("user_id", descriptor.FieldDescriptorProto_TYPE_STRING) + orderIdField := createMockFieldDescriptor("order_id", descriptor.FieldDescriptorProto_TYPE_STRING) + + userTable := &schema.Table{ + Name: "users", + PrimaryKey: &schema.PrimaryKey{ + Name: "user_id", + FieldDescriptor: userIdField, + }, + Columns: []*schema.Column{ + {Name: "user_id", FieldDescriptor: userIdField, IsPrimaryKey: true}, + }, + } + + orderTable := &schema.Table{ + Name: "orders", + PrimaryKey: &schema.PrimaryKey{ + Name: "order_id", + FieldDescriptor: orderIdField, + }, + Columns: []*schema.Column{ + {Name: "order_id", FieldDescriptor: orderIdField, IsPrimaryKey: true}, + { + Name: "user_id", + FieldDescriptor: userIdField, + ForeignKey: &schema.ForeignKey{ + Table: "users", + TableField: "user_id", + }, + }, + }, + } + + tableRegistry := map[string]*schema.Table{ + "users": userTable, + "orders": orderTable, + } + + d, err := NewDialectRisingwave("public", tableRegistry, logger) + require.NoError(t, err) + + // Should have no foreign key constraints + assert.Equal(t, 0, len(d.ForeignKeySql)) + + // But should still create the tables with the foreign key columns + orderSQL := d.CreateTableSql["orders"] + assert.Contains(t, orderSQL, `"user_id" CHARACTER VARYING`) + assert.NotContains(t, orderSQL, "FOREIGN KEY") + assert.NotContains(t, orderSQL, "REFERENCES") +} + +func TestDialectRisingwave_SchemaHashConsistency(t *testing.T) { + logger := zap.NewNop() + + nameField := createMockFieldDescriptor("name", descriptor.FieldDescriptorProto_TYPE_STRING) + table := &schema.Table{ + Name: "users", + Columns: []*schema.Column{ + {Name: "name", FieldDescriptor: nameField}, + }, + } + + tableRegistry := map[string]*schema.Table{"users": table} + + // Create multiple dialects with same configuration + d1, err := NewDialectRisingwave("public", tableRegistry, logger) + require.NoError(t, err) + + d2, err := NewDialectRisingwave("public", tableRegistry, logger) + require.NoError(t, err) + + // Hashes should be identical + assert.Equal(t, d1.SchemaHash(), d2.SchemaHash()) + + // Create dialect with different table + ageField := createMockFieldDescriptor("age", descriptor.FieldDescriptorProto_TYPE_INT32) + differentTable := &schema.Table{ + Name: "users", + Columns: []*schema.Column{ + {Name: "name", FieldDescriptor: nameField}, + {Name: "age", FieldDescriptor: ageField}, + }, + } + + differentRegistry := map[string]*schema.Table{"users": differentTable} + d3, err := NewDialectRisingwave("public", differentRegistry, logger) + require.NoError(t, err) + + // Hash should be different + assert.NotEqual(t, d1.SchemaHash(), d3.SchemaHash()) +} + +func TestDialectRisingwave_TypeMapping(t *testing.T) { + tests := []struct { + name string + protoType descriptor.FieldDescriptorProto_Type + expectedSQL string + }{ + {"string", descriptor.FieldDescriptorProto_TYPE_STRING, "CHARACTER VARYING"}, + {"int32", descriptor.FieldDescriptorProto_TYPE_INT32, "INTEGER"}, + {"int64", descriptor.FieldDescriptorProto_TYPE_INT64, "BIGINT"}, + {"uint32", descriptor.FieldDescriptorProto_TYPE_UINT32, "BIGINT"}, + {"uint64", descriptor.FieldDescriptorProto_TYPE_UINT64, "NUMERIC"}, + {"float", descriptor.FieldDescriptorProto_TYPE_FLOAT, "REAL"}, + {"double", descriptor.FieldDescriptorProto_TYPE_DOUBLE, "DOUBLE PRECISION"}, + {"bool", descriptor.FieldDescriptorProto_TYPE_BOOL, "BOOLEAN"}, + {"bytes", descriptor.FieldDescriptorProto_TYPE_BYTES, "BYTEA"}, + } + + logger := zap.NewNop() + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + field := createMockFieldDescriptor("test_field", tt.protoType) + + table := &schema.Table{ + Name: "test_table", + Columns: []*schema.Column{ + {Name: "test_field", FieldDescriptor: field}, + }, + } + + tableRegistry := map[string]*schema.Table{"test_table": table} + d, err := NewDialectRisingwave("public", tableRegistry, logger) + require.NoError(t, err) + + sql := d.CreateTableSql["test_table"] + assert.Contains(t, sql, tt.expectedSQL, "Type mapping for %s should produce %s", tt.name, tt.expectedSQL) + }) + } +} + +func TestDialectRisingwave_ComplexTableStructure(t *testing.T) { + logger := zap.NewNop() + + // Create a complex table with multiple column types and constraints + idField := createMockFieldDescriptor("id", descriptor.FieldDescriptorProto_TYPE_STRING) + nameField := createMockFieldDescriptor("name", descriptor.FieldDescriptorProto_TYPE_STRING) + ageField := createMockFieldDescriptor("age", descriptor.FieldDescriptorProto_TYPE_INT32) + emailField := createMockFieldDescriptor("email", descriptor.FieldDescriptorProto_TYPE_STRING) + balanceField := createMockFieldDescriptor("balance", descriptor.FieldDescriptorProto_TYPE_UINT64) + activeField := createMockFieldDescriptor("active", descriptor.FieldDescriptorProto_TYPE_BOOL) + + table := &schema.Table{ + Name: "complex_users", + PrimaryKey: &schema.PrimaryKey{ + Name: "id", + FieldDescriptor: idField, + }, + Columns: []*schema.Column{ + {Name: "id", FieldDescriptor: idField, IsPrimaryKey: true}, + {Name: "name", FieldDescriptor: nameField}, + {Name: "age", FieldDescriptor: ageField}, + {Name: "email", FieldDescriptor: emailField, IsUnique: true}, + {Name: "balance", FieldDescriptor: balanceField}, + {Name: "active", FieldDescriptor: activeField}, + }, + } + + tableRegistry := map[string]*schema.Table{"complex_users": table} + d, err := NewDialectRisingwave("public", tableRegistry, logger) + require.NoError(t, err) + + sql := d.CreateTableSql["complex_users"] + + // Check all expected elements are present + assert.Contains(t, sql, "CREATE TABLE IF NOT EXISTS public.complex_users") + assert.Contains(t, sql, "id CHARACTER VARYING PRIMARY KEY") + assert.Contains(t, sql, `"name" CHARACTER VARYING`) + assert.Contains(t, sql, `"age" INTEGER`) + assert.Contains(t, sql, `"email" CHARACTER VARYING UNIQUE`) + assert.Contains(t, sql, `"balance" NUMERIC`) + assert.Contains(t, sql, `"active" BOOLEAN`) + assert.Contains(t, sql, "block_number INTEGER NOT NULL") + assert.Contains(t, sql, "block_timestamp TIMESTAMP WITH TIME ZONE NOT NULL") + + // Ensure no foreign keys + assert.NotContains(t, sql, "FOREIGN KEY") + assert.NotContains(t, sql, "REFERENCES") +} + +func TestDialectRisingwave_MultipleChildTables(t *testing.T) { + logger := zap.NewNop() + + // Parent table + parentIdField := createMockFieldDescriptor("transaction_id", descriptor.FieldDescriptorProto_TYPE_STRING) + parentTable := &schema.Table{ + Name: "transactions", + PrimaryKey: &schema.PrimaryKey{ + Name: "transaction_id", + FieldDescriptor: parentIdField, + }, + Columns: []*schema.Column{ + {Name: "transaction_id", FieldDescriptor: parentIdField, IsPrimaryKey: true}, + }, + } + + // First child table + transferAmountField := createMockFieldDescriptor("amount", descriptor.FieldDescriptorProto_TYPE_UINT64) + transferTable := &schema.Table{ + Name: "transfers", + ChildOf: &schema.ChildOf{ + ParentTable: "transactions", + ParentTableField: "transaction_id", + }, + Columns: []*schema.Column{ + {Name: "amount", FieldDescriptor: transferAmountField}, + }, + } + + // Second child table + logMessageField := createMockFieldDescriptor("message", descriptor.FieldDescriptorProto_TYPE_STRING) + logTable := &schema.Table{ + Name: "logs", + ChildOf: &schema.ChildOf{ + ParentTable: "transactions", + ParentTableField: "transaction_id", + }, + Columns: []*schema.Column{ + {Name: "message", FieldDescriptor: logMessageField}, + }, + } + + tableRegistry := map[string]*schema.Table{ + "transactions": parentTable, + "transfers": transferTable, + "logs": logTable, + } + + d, err := NewDialectRisingwave("public", tableRegistry, logger) + require.NoError(t, err) + + // Check parent table + parentSQL := d.CreateTableSql["transactions"] + assert.Contains(t, parentSQL, "transaction_id CHARACTER VARYING PRIMARY KEY") + + // Check first child table + transferSQL := d.CreateTableSql["transfers"] + assert.Contains(t, transferSQL, "transaction_id CHARACTER VARYING NOT NULL") + assert.Contains(t, transferSQL, `"amount" NUMERIC`) + + // Check second child table + logSQL := d.CreateTableSql["logs"] + assert.Contains(t, logSQL, "transaction_id CHARACTER VARYING NOT NULL") + assert.Contains(t, logSQL, `"message" CHARACTER VARYING`) + + // All should have block metadata + for _, sql := range []string{parentSQL, transferSQL, logSQL} { + assert.Contains(t, sql, "block_number INTEGER NOT NULL") + assert.Contains(t, sql, "block_timestamp TIMESTAMP WITH TIME ZONE NOT NULL") + } +} + +func TestDialectRisingwave_EmptyTableRegistry(t *testing.T) { + logger := zap.NewNop() + + d, err := NewDialectRisingwave("test_schema", map[string]*schema.Table{}, logger) + require.NoError(t, err) + + // Should have no CREATE TABLE statements + assert.Equal(t, 0, len(d.CreateTableSql)) + + // Should have no constraints + assert.Equal(t, 0, len(d.PrimaryKeySql)) + assert.Equal(t, 0, len(d.ForeignKeySql)) + assert.Equal(t, 0, len(d.UniqueConstraintSql)) + + // But should still have a valid schema hash + assert.NotEmpty(t, d.SchemaHash()) +} + +func TestDialectRisingwave_GetTable(t *testing.T) { + logger := zap.NewNop() + + nameField := createMockFieldDescriptor("name", descriptor.FieldDescriptorProto_TYPE_STRING) + table := &schema.Table{ + Name: "users", + Columns: []*schema.Column{ + {Name: "name", FieldDescriptor: nameField}, + }, + } + + tableRegistry := map[string]*schema.Table{"users": table} + d, err := NewDialectRisingwave("public", tableRegistry, logger) + require.NoError(t, err) + + // Should be able to retrieve the table + retrievedTable := d.GetTable("users") + assert.NotNil(t, retrievedTable) + assert.Equal(t, "users", retrievedTable.Name) + + // Should return nil for non-existent table + nonExistentTable := d.GetTable("non_existent") + assert.Nil(t, nonExistentTable) +} + +func TestDialectRisingwave_GetTables(t *testing.T) { + logger := zap.NewNop() + + nameField := createMockFieldDescriptor("name", descriptor.FieldDescriptorProto_TYPE_STRING) + ageField := createMockFieldDescriptor("age", descriptor.FieldDescriptorProto_TYPE_INT32) + + usersTable := &schema.Table{ + Name: "users", + Columns: []*schema.Column{ + {Name: "name", FieldDescriptor: nameField}, + }, + } + + ordersTable := &schema.Table{ + Name: "orders", + Columns: []*schema.Column{ + {Name: "age", FieldDescriptor: ageField}, + }, + } + + tableRegistry := map[string]*schema.Table{ + "users": usersTable, + "orders": ordersTable, + } + + d, err := NewDialectRisingwave("public", tableRegistry, logger) + require.NoError(t, err) + + tables := d.GetTables() + assert.Equal(t, 2, len(tables)) + + // Check that both tables are present (order doesn't matter) + tableNames := make([]string, len(tables)) + for i, table := range tables { + tableNames[i] = table.Name + } + assert.Contains(t, tableNames, "users") + assert.Contains(t, tableNames, "orders") +} + +// Helper function to create mock field descriptors +func createMockFieldDescriptor(name string, fieldType descriptor.FieldDescriptorProto_Type) *desc.FieldDescriptor { + // Create a minimal field descriptor for testing + // In real usage, these would come from protobuf reflection + fieldNumber := int32(1) // Valid field number (must be > 0) + proto := &descriptor.FieldDescriptorProto{ + Name: &name, + Type: &fieldType, + Number: &fieldNumber, + } + + // Create a mock message descriptor + msgProto := &descriptor.DescriptorProto{ + Name: stringPtr("TestMessage"), + Field: []*descriptor.FieldDescriptorProto{proto}, + } + + // Create file descriptor + fileProto := &descriptor.FileDescriptorProto{ + Name: stringPtr("test.proto"), + MessageType: []*descriptor.DescriptorProto{msgProto}, + } + + // Build descriptors + fileDesc, err := desc.CreateFileDescriptor(fileProto) + if err != nil { + panic(err) + } + + msgDesc := fileDesc.GetMessageTypes()[0] + fieldDesc := msgDesc.GetFields()[0] + + return fieldDesc +} + +func stringPtr(s string) *string { + return &s +} + diff --git a/db_proto/sql/risingwave/inserter.go b/db_proto/sql/risingwave/inserter.go new file mode 100644 index 0000000..f007735 --- /dev/null +++ b/db_proto/sql/risingwave/inserter.go @@ -0,0 +1,9 @@ +package risingwave + +type pgInserter interface { + insert(table string, values []any, database *Database) error +} + +type pgFlusher interface { + flush(database *Database) error +} \ No newline at end of file diff --git a/db_proto/sql/risingwave/row_inserter.go b/db_proto/sql/risingwave/row_inserter.go new file mode 100644 index 0000000..75709cb --- /dev/null +++ b/db_proto/sql/risingwave/row_inserter.go @@ -0,0 +1,157 @@ +package risingwave + +import ( + "database/sql" + "encoding/base64" + "fmt" + "strconv" + "strings" + "time" + + sql2 "github.com/streamingfast/substreams-sink-sql/db_proto/sql" + "github.com/streamingfast/substreams-sink-sql/db_proto/sql/schema" + "go.uber.org/zap" + "google.golang.org/protobuf/types/known/timestamppb" +) + +type RowInserter struct { + insertQueries map[string]string + insertStatements map[string]*sql.Stmt + logger *zap.Logger +} + +func NewRowInserter(logger *zap.Logger) (*RowInserter, error) { + logger = logger.Named("risingwave inserter") + + return &RowInserter{ + logger: logger, + }, nil +} + +func (i *RowInserter) init(database *Database) error { + tables := database.dialect.GetTables() + insertStatements := map[string]*sql.Stmt{} + insertQueries := map[string]string{} + + for _, table := range tables { + query, err := createInsertFromDescriptor(table, database.dialect) + if err != nil { + return fmt.Errorf("creating insert from descriptor for table %q: %w", table.Name, err) + } + insertQueries[table.Name] = query + + stmt, err := database.db.Prepare(query) + if err != nil { + return fmt.Errorf("preparing statement %q: %w", query, err) + } + insertStatements[table.Name] = stmt + } + + // RisingWave supports RETURNING clause as of release #7094 + insertQueries["_blocks_"] = fmt.Sprintf("INSERT INTO %s (number, hash, timestamp) VALUES ($1, $2, $3) RETURNING number", tableName(database.schema.Name, "_blocks_")) + bs, err := database.db.Prepare(insertQueries["_blocks_"]) + if err != nil { + return fmt.Errorf("preparing statement %q: %w", insertQueries["_blocks_"], err) + } + insertStatements["_blocks_"] = bs + + // RisingWave doesn't support PostgreSQL's ON CONFLICT syntax in INSERT statements + // The _cursor_ table is created with ON CONFLICT OVERWRITE to handle this automatically + insertQueries["_cursor_"] = fmt.Sprintf("INSERT INTO %s (name, cursor) VALUES ($1, $2)", tableName(database.schema.Name, "_cursor_")) + cs, err := database.db.Prepare(insertQueries["_cursor_"]) + if err != nil { + return fmt.Errorf("preparing statement %q: %w", insertQueries["_cursor_"], err) + } + insertStatements["_cursor_"] = cs + + i.insertStatements = insertStatements + i.insertQueries = insertQueries + + return nil +} + +func createInsertFromDescriptor(table *schema.Table, dialect sql2.Dialect) (string, error) { + tableName := dialect.FullTableName(table) + fields := table.Columns + + var fieldNames []string + var placeholders []string + + fieldCount := 0 + returningField := "" + if table.PrimaryKey != nil { + returningField = table.PrimaryKey.Name + } + + // Add block_number + fieldCount++ + fieldNames = append(fieldNames, "block_number") + placeholders = append(placeholders, fmt.Sprintf("$%d", fieldCount)) + + // Add block_timestamp + fieldCount++ + fieldNames = append(fieldNames, "block_timestamp") + placeholders = append(placeholders, fmt.Sprintf("$%d", fieldCount)) + + if pk := table.PrimaryKey; pk != nil { + fieldCount++ + returningField = pk.Name + fieldNames = append(fieldNames, pk.Name) + placeholders = append(placeholders, fmt.Sprintf("$%d", fieldCount)) + } + + if table.ChildOf != nil { + fieldCount++ + fieldNames = append(fieldNames, table.ChildOf.ParentTableField) + placeholders = append(placeholders, fmt.Sprintf("$%d", fieldCount)) + } + + for _, field := range fields { + if field.Name == returningField { + continue + } + if field.IsRepeated || field.IsExtension { + continue + } + fieldCount++ + fieldNames = append(fieldNames, field.QuotedName()) + placeholders = append(placeholders, fmt.Sprintf("$%d", fieldCount)) + } + + return fmt.Sprintf("INSERT INTO %s (%s) VALUES (%s)", + tableName, + strings.Join(fieldNames, ", "), + strings.Join(placeholders, ", "), + ), nil +} + +func (i *RowInserter) insert(table string, values []any, database *Database) error { + i.logger.Debug("inserting row", zap.String("table", table), zap.Any("values", values)) + stmt := i.insertStatements[table] + stmt = database.wrapInsertStatement(stmt) + + for i, value := range values { + switch v := value.(type) { + case uint64: + values[i] = strconv.FormatUint(v, 10) + case []uint8: + values[i] = base64.StdEncoding.EncodeToString(v) + case *timestamppb.Timestamp: + values[i] = "'" + v.AsTime().Format(time.RFC3339) + "'" + } + } + + _, err := stmt.Exec(values...) + if err != nil { + insert := i.insertQueries[table] + return fmt.Errorf("risingwave row inserter: querying insert %q: %w", insert, err) + } + + return nil +} + +func (i *RowInserter) flush(database *Database) error { + return nil +} + + diff --git a/db_proto/sql/risingwave/types.go b/db_proto/sql/risingwave/types.go new file mode 100644 index 0000000..5fd171e --- /dev/null +++ b/db_proto/sql/risingwave/types.go @@ -0,0 +1,392 @@ +package risingwave + +import ( + "encoding/hex" + "fmt" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/protoc-gen-go/descriptor" + "github.com/jhump/protoreflect/desc" + sql2 "github.com/streamingfast/substreams-sink-sql/db_proto/sql" + "github.com/streamingfast/substreams-sink-sql/proto" + "google.golang.org/protobuf/types/known/timestamppb" +) + +type DataType string + +const ( + // Numeric types - aligning with RisingWave documentation + TypeSmallInt DataType = "SMALLINT" // Two-byte integer + TypeInteger DataType = "INTEGER" // Four-byte integer + TypeBigInt DataType = "BIGINT" // Eight-byte integer + TypeNumeric DataType = "NUMERIC" // Exact numeric (28 decimal digits precision) + TypeReal DataType = "REAL" // Single precision floating-point (4 bytes) + TypeDouble DataType = "DOUBLE PRECISION" // Double precision floating-point (8 bytes) + + // Boolean type + TypeBool DataType = "BOOLEAN" // Logical Boolean (true, false, or null) + + // String types + TypeVarchar DataType = "CHARACTER VARYING" // Variable-length character string + TypeText DataType = "CHARACTER VARYING" // Use CHARACTER VARYING for RisingWave + + // Binary type + TypeBytea DataType = "BYTEA" // Binary strings (hex format) + + // Date and time types + TypeDate DataType = "DATE" // Calendar date (year, month, day) + TypeTime DataType = "TIME" // Time of day (no time zone) + TypeTimestamp DataType = "TIMESTAMP" // Date and time (no time zone) + TypeTimestamptz DataType = "TIMESTAMP WITH TIME ZONE" // Timestamp with time zone + TypeInterval DataType = "INTERVAL" // Time span + + // Complex types + TypeJsonb DataType = "JSONB" // Binary JSON value + + // RisingWave-specific semantic types + TypeRwInt256 DataType = "rw_int256" // RisingWave's 256-bit signed integer type + TypeRwUint256 DataType = "rw_uint256" // RisingWave's 256-bit unsigned integer type +) + +func (s DataType) String() string { + return string(s) +} + +func IsWellKnownType(fd *desc.FieldDescriptor) bool { + switch fd.GetMessageType().GetFullyQualifiedName() { + case "google.protobuf.Timestamp": + return true + default: + return false + } +} + +// MapSemanticType maps semantic types to RisingWave-specific SQL types +func MapSemanticType(semanticType sql2.SemanticType) (string, bool) { + switch semanticType { + case sql2.SemanticUint256: + return string(TypeRwUint256), true // Use new rw_uint256 for unsigned + case sql2.SemanticInt256: + return string(TypeRwInt256), true // Keep rw_int256 for signed + case sql2.SemanticAddress: + return "CHARACTER VARYING", true + case sql2.SemanticHash: + return "CHARACTER VARYING", true + case sql2.SemanticSignature: + return "CHARACTER VARYING", true + case sql2.SemanticPubkey: + return "CHARACTER VARYING", true + case sql2.SemanticHex: + return "CHARACTER VARYING", true + case sql2.SemanticBase64: + return "CHARACTER VARYING", true + case sql2.SemanticJSON: + return string(TypeJsonb), true + case sql2.SemanticUUID: + return "CHARACTER VARYING", true // RisingWave converts UUID to CHARACTER VARYING + case sql2.SemanticUnixTimestamp, sql2.SemanticUnixTimestampMS, sql2.SemanticBlockTimestamp: + return string(TypeTimestamptz), true + default: + return "", false // Not supported + } +} + +// SupportsSemanticType returns true if RisingWave supports the semantic type +func SupportsSemanticType(semanticType sql2.SemanticType) bool { + _, supported := MapSemanticType(semanticType) + return supported +} + +func MapFieldType(fd *desc.FieldDescriptor) DataType { + // Check for semantic type annotation first + semanticType, _, hasSemanticType := proto.SemanticTypeInfo(fd) + if hasSemanticType { + if sqlType, supported := MapSemanticType(sql2.SemanticType(semanticType)); supported { + return DataType(sqlType) + } + // Fall through to default mapping if semantic type not supported + } + + // Default protobuf type mapping + t := fd.GetType() + switch t { + case descriptor.FieldDescriptorProto_TYPE_MESSAGE: + switch fd.GetMessageType().GetFullyQualifiedName() { + case "google.protobuf.Timestamp": + return TypeTimestamptz // Use timestamptz for protobuf timestamps + default: + panic(fmt.Sprintf("Message type not supported: %s", fd.GetMessageType().GetFullyQualifiedName())) + } + case descriptor.FieldDescriptorProto_TYPE_BOOL: + return TypeBool + case descriptor.FieldDescriptorProto_TYPE_INT32, descriptor.FieldDescriptorProto_TYPE_SINT32, descriptor.FieldDescriptorProto_TYPE_SFIXED32: + return TypeInteger + case descriptor.FieldDescriptorProto_TYPE_INT64, descriptor.FieldDescriptorProto_TYPE_SINT64, descriptor.FieldDescriptorProto_TYPE_SFIXED64: + return TypeBigInt + case descriptor.FieldDescriptorProto_TYPE_UINT64, descriptor.FieldDescriptorProto_TYPE_FIXED64: + return TypeNumeric // Use NUMERIC for large unsigned integers + case descriptor.FieldDescriptorProto_TYPE_UINT32, descriptor.FieldDescriptorProto_TYPE_FIXED32: + return TypeBigInt // Use BIGINT for 32-bit unsigned (to avoid overflow) + case descriptor.FieldDescriptorProto_TYPE_FLOAT: + return TypeReal // Use REAL for single precision + case descriptor.FieldDescriptorProto_TYPE_DOUBLE: + return TypeDouble + case descriptor.FieldDescriptorProto_TYPE_STRING: + return TypeVarchar + case descriptor.FieldDescriptorProto_TYPE_BYTES: + return TypeBytea // Use BYTEA for binary data + case descriptor.FieldDescriptorProto_TYPE_ENUM: + return TypeVarchar // Store enums as varchar + default: + panic(fmt.Sprintf("unsupported type: %s", t)) + } +} + +func ValueToString(value any) (s string) { + switch v := value.(type) { + case string: + s = "'" + strings.ReplaceAll(strings.ReplaceAll(v, "'", "''"), "\\", "\\\\") + "'" + case int64: + s = strconv.FormatInt(v, 10) + case int32: + s = strconv.FormatInt(int64(v), 10) + case int: + s = strconv.FormatInt(int64(v), 10) + case uint64: + // For large unsigned integers, use numeric literal + s = strconv.FormatUint(v, 10) + case uint32: + s = strconv.FormatUint(uint64(v), 10) + case uint: + s = strconv.FormatUint(uint64(v), 10) + case float64: + s = strconv.FormatFloat(v, 'f', -1, 64) + case float32: + s = strconv.FormatFloat(float64(v), 'f', -1, 32) + case []uint8: + // RisingWave expects hex format for bytea: '\x...' + s = "'\\x" + strings.ToUpper(fmt.Sprintf("%x", v)) + "'" + case bool: + s = strconv.FormatBool(v) + case time.Time: + // Use RFC3339 format for timestamps + s = "'" + v.Format(time.RFC3339) + "'" + case *timestamppb.Timestamp: + // Convert protobuf timestamp to timestamptz format + s = "'" + v.AsTime().Format(time.RFC3339) + "'" + default: + panic(fmt.Sprintf("unsupported type: %T", v)) + } + return +} + +// ConvertSemanticValue converts a value according to semantic type and format hint for RisingWave +func ConvertSemanticValue(semanticType sql2.SemanticType, value interface{}, formatHint string) (string, error) { + switch semanticType { + case sql2.SemanticUint256: + return convertToRwUint256(value, formatHint) + case sql2.SemanticInt256: + return convertToRwInt256(value, formatHint) + case sql2.SemanticAddress: + return convertToAddress(value) + case sql2.SemanticHash: + return convertToHash(value) + case sql2.SemanticSignature, sql2.SemanticPubkey, sql2.SemanticHex: + return convertToHexString(value) + case sql2.SemanticJSON: + return convertToJSON(value) + case sql2.SemanticUUID: + return convertToUUID(value) + case sql2.SemanticUnixTimestamp: + return convertUnixTimestamp(value, false) + case sql2.SemanticUnixTimestampMS: + return convertUnixTimestamp(value, true) + case sql2.SemanticBlockTimestamp: + return convertUnixTimestamp(value, false) + default: + // Fallback to default value conversion + return ValueToString(value), nil + } +} + +// convertToRwUint256 converts values to RisingWave's rw_uint256 type +func convertToRwUint256(value interface{}, formatHint string) (string, error) { + switch v := value.(type) { + case string: + // Handle hex strings (0x...) + if strings.HasPrefix(v, "0x") { + return "'" + v + "'::rw_uint256", nil + } + // Handle decimal strings + if formatHint == "hex" && !strings.HasPrefix(v, "0x") { + // Add 0x prefix if missing for hex format + return "'0x" + v + "'::rw_uint256", nil + } + return "'" + v + "'::rw_uint256", nil + case []byte: + // Convert bytes to hex for rw_uint256 + hexStr := "0x" + hex.EncodeToString(v) + return "'" + hexStr + "'::rw_uint256", nil + case int64, uint64, int32, uint32: + // Convert numeric types to string + return "'" + fmt.Sprintf("%v", v) + "'::rw_uint256", nil + default: + return "", fmt.Errorf("cannot convert %T to rw_uint256", value) + } +} + +// convertToRwInt256 converts values to RisingWave's rw_int256 type +func convertToRwInt256(value interface{}, formatHint string) (string, error) { + switch v := value.(type) { + case string: + // Handle hex strings (0x...) + if strings.HasPrefix(v, "0x") { + return "'" + v + "'::rw_int256", nil + } + // Handle decimal strings + if formatHint == "hex" && !strings.HasPrefix(v, "0x") { + // Add 0x prefix if missing for hex format + return "'0x" + v + "'::rw_int256", nil + } + return "'" + v + "'::rw_int256", nil + case []byte: + // Convert bytes to hex for rw_int256 + hexStr := "0x" + hex.EncodeToString(v) + return "'" + hexStr + "'::rw_int256", nil + case int64, uint64, int32, uint32: + // Convert numeric types to string + return "'" + fmt.Sprintf("%v", v) + "'::rw_int256", nil + default: + return "", fmt.Errorf("cannot convert %T to rw_int256", value) + } +} + +// convertToAddress converts values to blockchain address format +func convertToAddress(value interface{}) (string, error) { + switch v := value.(type) { + case string: + // Validate address format + if strings.HasPrefix(v, "0x") { + // Has 0x prefix - validate hex part is exactly 40 chars + hexPart := v[2:] + if len(hexPart) == 40 { + return "'" + v + "'", nil + } + return "", fmt.Errorf("invalid address format: %s (expected 40 hex chars after 0x)", v) + } + // No 0x prefix - should be exactly 40 hex chars + if len(v) == 40 { + return "'0x" + v + "'", nil + } + return "", fmt.Errorf("invalid address format: %s (expected 40 or 42 chars)", v) + case []byte: + if len(v) == 20 { + return "'0x" + hex.EncodeToString(v) + "'", nil + } + return "", fmt.Errorf("invalid address byte length: %d (expected 20)", len(v)) + default: + return "", fmt.Errorf("cannot convert %T to address", value) + } +} + +// convertToHash converts values to hash format +func convertToHash(value interface{}) (string, error) { + switch v := value.(type) { + case string: + // Validate hash format + if len(v) == 66 && strings.HasPrefix(v, "0x") { + return "'" + v + "'", nil + } + if len(v) == 64 { + // Add 0x prefix if missing + return "'0x" + v + "'", nil + } + return "", fmt.Errorf("invalid hash format: %s (expected 64 or 66 chars)", v) + case []byte: + if len(v) == 32 { + return "'0x" + hex.EncodeToString(v) + "'", nil + } + return "", fmt.Errorf("invalid hash byte length: %d (expected 32)", len(v)) + default: + return "", fmt.Errorf("cannot convert %T to hash", value) + } +} + +// convertToHexString converts values to hex string format +func convertToHexString(value interface{}) (string, error) { + switch v := value.(type) { + case string: + if strings.HasPrefix(v, "0x") { + return "'" + v + "'", nil + } + // Add 0x prefix if missing + return "'0x" + v + "'", nil + case []byte: + return "'0x" + hex.EncodeToString(v) + "'", nil + default: + return "", fmt.Errorf("cannot convert %T to hex string", value) + } +} + + +// convertToJSON converts values to JSONB format +func convertToJSON(value interface{}) (string, error) { + switch v := value.(type) { + case string: + // Assume string is already valid JSON + return "'" + strings.ReplaceAll(v, "'", "''") + "'::jsonb", nil + default: + return "", fmt.Errorf("cannot convert %T to JSON", value) + } +} + +// convertToUUID converts values to UUID format +func convertToUUID(value interface{}) (string, error) { + switch v := value.(type) { + case string: + // Basic UUID validation (length check) + if len(v) == 36 { + return "'" + v + "'", nil + } + return "", fmt.Errorf("invalid UUID format: %s (expected 36 chars)", v) + default: + return "", fmt.Errorf("cannot convert %T to UUID", value) + } +} + +// convertUnixTimestamp converts unix timestamps to RisingWave timestamp format +func convertUnixTimestamp(value interface{}, isMilliseconds bool) (string, error) { + var t time.Time + + switch v := value.(type) { + case int64: + if isMilliseconds { + t = time.Unix(v/1000, (v%1000)*1000000) + } else { + t = time.Unix(v, 0) + } + case uint64: + if isMilliseconds { + t = time.Unix(int64(v/1000), int64((v%1000)*1000000)) + } else { + t = time.Unix(int64(v), 0) + } + case string: + // Try to parse as number + if val, err := strconv.ParseInt(v, 10, 64); err == nil { + if isMilliseconds { + t = time.Unix(val/1000, (val%1000)*1000000) + } else { + t = time.Unix(val, 0) + } + } else { + return "", fmt.Errorf("cannot parse timestamp string: %s", v) + } + default: + return "", fmt.Errorf("cannot convert %T to timestamp", value) + } + + return "'" + t.UTC().Format(time.RFC3339) + "'", nil +} diff --git a/db_proto/sql/risingwave/types_test.go b/db_proto/sql/risingwave/types_test.go new file mode 100644 index 0000000..fa986b7 --- /dev/null +++ b/db_proto/sql/risingwave/types_test.go @@ -0,0 +1,436 @@ +package risingwave + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + sql2 "github.com/streamingfast/substreams-sink-sql/db_proto/sql" + "google.golang.org/protobuf/types/known/timestamppb" +) + +func TestValueToString(t *testing.T) { + tests := []struct { + name string + input interface{} + expected string + }{ + // String values + {"simple string", "hello", "'hello'"}, + {"string with quotes", "hello'world", "'hello''world'"}, + {"string with backslash", "hello\\world", "'hello\\\\world'"}, + {"empty string", "", "''"}, + + // Integer values + {"int64", int64(123), "123"}, + {"int64 negative", int64(-456), "-456"}, + {"int32", int32(456), "456"}, + {"int", int(789), "789"}, + + // Unsigned integer values + {"uint64", uint64(123), "123"}, + {"uint32", uint32(456), "456"}, + {"uint", uint(789), "789"}, + + // Float values + {"float64", float64(123.45), "123.45"}, + {"float32", float32(67.89), "67.89"}, + + // Boolean values + {"bool true", true, "true"}, + {"bool false", false, "false"}, + + // Byte slice (should be hex encoded with uppercase) + {"bytes", []uint8{0xDE, 0xAD, 0xBE, 0xEF}, "'\\xDEADBEEF'"}, + {"empty bytes", []uint8{}, "'\\x'"}, + + // Time values + {"time", time.Date(2023, 1, 15, 10, 30, 0, 0, time.UTC), "'2023-01-15T10:30:00Z'"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := ValueToString(tt.input) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestValueToStringTimestamp(t *testing.T) { + // Test protobuf timestamp + testTime := time.Date(2023, 1, 15, 10, 30, 0, 0, time.UTC) + pbTime := timestamppb.New(testTime) + result := ValueToString(pbTime) + assert.Equal(t, "'2023-01-15T10:30:00Z'", result) +} + +func TestValueToStringPanic(t *testing.T) { + // Test unsupported type should panic + assert.Panics(t, func() { + ValueToString(complex64(1 + 2i)) + }) +} + +func TestDataTypeString(t *testing.T) { + tests := []struct { + dataType DataType + expected string + }{ + {TypeSmallInt, "SMALLINT"}, + {TypeInteger, "INTEGER"}, + {TypeBigInt, "BIGINT"}, + {TypeNumeric, "NUMERIC"}, + {TypeReal, "REAL"}, + {TypeDouble, "DOUBLE PRECISION"}, + {TypeBool, "BOOLEAN"}, + {TypeVarchar, "CHARACTER VARYING"}, + {TypeText, "CHARACTER VARYING"}, + {TypeBytea, "BYTEA"}, + {TypeDate, "DATE"}, + {TypeTime, "TIME"}, + {TypeTimestamp, "TIMESTAMP"}, + {TypeTimestamptz, "TIMESTAMP WITH TIME ZONE"}, + {TypeInterval, "INTERVAL"}, + {TypeJsonb, "JSONB"}, + {TypeRwInt256, "rw_int256"}, + {TypeRwUint256, "rw_uint256"}, + } + + for _, tt := range tests { + t.Run(string(tt.dataType), func(t *testing.T) { + assert.Equal(t, tt.expected, tt.dataType.String()) + }) + } +} + +func TestMapSemanticType(t *testing.T) { + tests := []struct { + name string + semanticType sql2.SemanticType + expectedSQL string + shouldSupport bool + }{ + { + name: "uint256 maps to rw_uint256", + semanticType: sql2.SemanticUint256, + expectedSQL: "rw_uint256", + shouldSupport: true, + }, + { + name: "int256 maps to rw_int256", + semanticType: sql2.SemanticInt256, + expectedSQL: "rw_int256", + shouldSupport: true, + }, + { + name: "address maps to CHARACTER VARYING", + semanticType: sql2.SemanticAddress, + expectedSQL: "CHARACTER VARYING", + shouldSupport: true, + }, + { + name: "hash maps to CHARACTER VARYING", + semanticType: sql2.SemanticHash, + expectedSQL: "CHARACTER VARYING", + shouldSupport: true, + }, + { + name: "json maps to JSONB", + semanticType: sql2.SemanticJSON, + expectedSQL: "JSONB", + shouldSupport: true, + }, + { + name: "unix_timestamp maps to TIMESTAMP WITH TIME ZONE", + semanticType: sql2.SemanticUnixTimestamp, + expectedSQL: "TIMESTAMP WITH TIME ZONE", + shouldSupport: true, + }, + { + name: "unsupported type", + semanticType: sql2.SemanticType("unsupported"), + expectedSQL: "", + shouldSupport: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + sqlType, supported := MapSemanticType(tt.semanticType) + + assert.Equal(t, tt.shouldSupport, supported, "MapSemanticType() supported") + assert.Equal(t, tt.expectedSQL, sqlType, "MapSemanticType() sqlType") + + // Test SupportsSemanticType consistency + assert.Equal(t, tt.shouldSupport, SupportsSemanticType(tt.semanticType), "SupportsSemanticType() consistency") + }) + } +} + +func TestConvertToRwUint256(t *testing.T) { + tests := []struct { + name string + value interface{} + formatHint string + expected string + shouldError bool + }{ + { + name: "hex string with 0x prefix", + value: "0x1234567890abcdef", + formatHint: "hex", + expected: "'0x1234567890abcdef'::rw_uint256", + shouldError: false, + }, + { + name: "hex string without 0x prefix with hex hint", + value: "1234567890abcdef", + formatHint: "hex", + expected: "'0x1234567890abcdef'::rw_uint256", + shouldError: false, + }, + { + name: "decimal string", + value: "123456789012345678901234567890", + formatHint: "decimal", + expected: "'123456789012345678901234567890'::rw_uint256", + shouldError: false, + }, + { + name: "byte array", + value: []byte{0x12, 0x34, 0x56, 0x78}, + formatHint: "", + expected: "'0x12345678'::rw_uint256", + shouldError: false, + }, + { + name: "uint64 value", + value: uint64(12345), + formatHint: "", + expected: "'12345'::rw_uint256", + shouldError: false, + }, + { + name: "unsupported type", + value: float64(123.45), + formatHint: "", + expected: "", + shouldError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := convertToRwUint256(tt.value, tt.formatHint) + + if tt.shouldError { + assert.Error(t, err, "convertToRwUint256() should error") + return + } + + assert.NoError(t, err, "convertToRwUint256() should not error") + assert.Equal(t, tt.expected, result, "convertToRwUint256() result") + }) + } +} + +func TestConvertToRwInt256(t *testing.T) { + tests := []struct { + name string + value interface{} + formatHint string + expected string + shouldError bool + }{ + { + name: "hex string with 0x prefix", + value: "0x1234567890abcdef", + formatHint: "hex", + expected: "'0x1234567890abcdef'::rw_int256", + shouldError: false, + }, + { + name: "hex string without 0x prefix with hex hint", + value: "1234567890abcdef", + formatHint: "hex", + expected: "'0x1234567890abcdef'::rw_int256", + shouldError: false, + }, + { + name: "decimal string", + value: "123456789012345678901234567890", + formatHint: "decimal", + expected: "'123456789012345678901234567890'::rw_int256", + shouldError: false, + }, + { + name: "byte array", + value: []byte{0x12, 0x34, 0x56, 0x78}, + formatHint: "", + expected: "'0x12345678'::rw_int256", + shouldError: false, + }, + { + name: "int64 value", + value: int64(12345), + formatHint: "", + expected: "'12345'::rw_int256", + shouldError: false, + }, + { + name: "uint64 value", + value: uint64(12345), + formatHint: "", + expected: "'12345'::rw_int256", + shouldError: false, + }, + { + name: "unsupported type", + value: float64(123.45), + formatHint: "", + expected: "", + shouldError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := convertToRwInt256(tt.value, tt.formatHint) + + if tt.shouldError { + assert.Error(t, err, "convertToRwInt256() should error") + return + } + + assert.NoError(t, err, "convertToRwInt256() should not error") + assert.Equal(t, tt.expected, result, "convertToRwInt256() result") + }) + } +} + +func TestConvertToAddress(t *testing.T) { + tests := []struct { + name string + value interface{} + expected string + shouldError bool + }{ + { + name: "valid address with 0x prefix", + value: "0x742d35cc6636C0532925a3b8D0A3e5A5F2d5De8e", + expected: "'0x742d35cc6636C0532925a3b8D0A3e5A5F2d5De8e'", + shouldError: false, + }, + { + name: "valid address without 0x prefix", + value: "742d35cc6636C0532925a3b8D0A3e5A5F2d5De8e", + expected: "'0x742d35cc6636C0532925a3b8D0A3e5A5F2d5De8e'", + shouldError: false, + }, + { + name: "20-byte array", + value: []byte{0x74, 0x2d, 0x35, 0xcc, 0x66, 0x36, 0xc0, 0x53, 0x29, 0x25, 0xa3, 0xb8, 0xd0, 0xa3, 0xe5, 0xa5, 0xf2, 0xd5, 0xde, 0x8e}, + expected: "'0x742d35cc6636c0532925a3b8d0a3e5a5f2d5de8e'", + shouldError: false, + }, + { + name: "invalid address length", + value: "0x742d35cc6636C0532925a3b8D0A3e5A5F2d5De", + expected: "", + shouldError: true, + }, + { + name: "invalid byte array length", + value: []byte{0x74, 0x2d, 0x35}, + expected: "", + shouldError: true, + }, + { + name: "unsupported type", + value: 123, + expected: "", + shouldError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := convertToAddress(tt.value) + + if tt.shouldError { + assert.Error(t, err, "convertToAddress() should error") + return + } + + assert.NoError(t, err, "convertToAddress() should not error") + assert.Equal(t, tt.expected, result, "convertToAddress() result") + }) + } +} + +func TestConvertSemanticValue(t *testing.T) { + tests := []struct { + name string + semanticType sql2.SemanticType + value interface{} + formatHint string + shouldError bool + }{ + { + name: "uint256 conversion", + semanticType: sql2.SemanticUint256, + value: "0x123456789", + formatHint: "hex", + shouldError: false, + }, + { + name: "address conversion", + semanticType: sql2.SemanticAddress, + value: "0x742d35cc6636C0532925a3b8D0A3e5A5F2d5De8e", + formatHint: "", + shouldError: false, + }, + { + name: "json conversion", + semanticType: sql2.SemanticJSON, + value: `{"key": "value"}`, + formatHint: "", + shouldError: false, + }, + { + name: "uuid conversion", + semanticType: sql2.SemanticUUID, + value: "550e8400-e29b-41d4-a716-446655440000", + formatHint: "", + shouldError: false, + }, + { + name: "unix timestamp conversion", + semanticType: sql2.SemanticUnixTimestamp, + value: int64(1640995200), + formatHint: "", + shouldError: false, + }, + { + name: "fallback to default conversion", + semanticType: sql2.SemanticType("unknown"), + value: "test", + formatHint: "", + shouldError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := ConvertSemanticValue(tt.semanticType, tt.value, tt.formatHint) + + if tt.shouldError { + assert.Error(t, err, "ConvertSemanticValue() should error") + return + } + + assert.NoError(t, err, "ConvertSemanticValue() should not error") + assert.NotEmpty(t, result, "ConvertSemanticValue() should return non-empty result") + }) + } +} diff --git a/db_proto/sql/schema/schema.go b/db_proto/sql/schema/schema.go index 3148716..6c79869 100644 --- a/db_proto/sql/schema/schema.go +++ b/db_proto/sql/schema/schema.go @@ -5,7 +5,7 @@ import ( "github.com/golang/protobuf/protoc-gen-go/descriptor" "github.com/jhump/protoreflect/desc" - "github.com/streamingfast/substreams-sink-sql/pb/sf/substreams/sink/sql/schema/v1" + pbSchema "github.com/streamingfast/substreams-sink-sql/pb/sf/substreams/sink/sql/schema/v1" "github.com/streamingfast/substreams-sink-sql/proto" "go.uber.org/zap" ) @@ -55,7 +55,7 @@ func (s *Schema) init(rootMessageDescriptor *desc.MessageDescriptor) error { if s.withProtoOption { return nil } - tableInfo = &schema.Table{ + tableInfo = &pbSchema.Table{ Name: md.GetName(), ChildOf: nil, } diff --git a/db_proto/sql/schema/table.go b/db_proto/sql/schema/table.go index 09fd4ca..8181441 100644 --- a/db_proto/sql/schema/table.go +++ b/db_proto/sql/schema/table.go @@ -33,17 +33,19 @@ func NewChildOf(childOf string) (*ChildOf, error) { } type Table struct { - Name string - PrimaryKey *PrimaryKey - ChildOf *ChildOf - Columns []*Column - Ordinal int + Name string + PrimaryKey *PrimaryKey + ChildOf *ChildOf + Columns []*Column + Ordinal int + PbTableInfo *pbSchmema.Table } func NewTable(descriptor *desc.MessageDescriptor, tableInfo *pbSchmema.Table, ordinal int) (*Table, error) { table := &Table{ - Name: descriptor.GetName(), - Ordinal: ordinal, + Name: descriptor.GetName(), + Ordinal: ordinal, + PbTableInfo: tableInfo, } table.Name = tableInfo.Name @@ -81,7 +83,9 @@ func (t *Table) processColumns(descriptor *desc.MessageDescriptor) error { } if fieldDescriptor.IsRepeated() { - continue + if fieldDescriptor.GetType() == descriptor2.FieldDescriptorProto_TYPE_MESSAGE { //This will be handled by table relations + continue + } } if fieldDescriptor.GetType() == descriptor2.FieldDescriptorProto_TYPE_MESSAGE { @@ -99,7 +103,7 @@ func (t *Table) processColumns(descriptor *desc.MessageDescriptor) error { if column.IsPrimaryKey { if t.PrimaryKey != nil { - return fmt.Errorf("multiple primary keys are not supported in message") + return fmt.Errorf("multiple field mark has primary keys are not supported") } t.PrimaryKey = &PrimaryKey{ diff --git a/db_proto/sql/semantic_types.go b/db_proto/sql/semantic_types.go new file mode 100644 index 0000000..ddd8245 --- /dev/null +++ b/db_proto/sql/semantic_types.go @@ -0,0 +1,156 @@ +package sql + +import "fmt" + +// SemanticType represents a high-level semantic meaning for a field +// that can be mapped to optimal SQL types per database dialect +type SemanticType string + +const ( + // Blockchain/Crypto types + SemanticUint256 SemanticType = "uint256" // 256-bit unsigned integer + SemanticInt256 SemanticType = "int256" // 256-bit signed integer + SemanticAddress SemanticType = "address" // Blockchain address (42 chars with 0x prefix) + SemanticHash SemanticType = "hash" // Cryptographic hash (66 chars with 0x prefix) + SemanticSignature SemanticType = "signature" // Cryptographic signature + SemanticPubkey SemanticType = "pubkey" // Public key + + + // Text/Binary types + SemanticHex SemanticType = "hex" // Hexadecimal string + SemanticBase64 SemanticType = "base64" // Base64 encoded data + SemanticJSON SemanticType = "json" // JSON data + SemanticUUID SemanticType = "uuid" // UUID string + + // Time types + SemanticUnixTimestamp SemanticType = "unix_timestamp" // Unix timestamp (seconds) + SemanticUnixTimestampMS SemanticType = "unix_timestamp_ms" // Unix timestamp (milliseconds) + SemanticBlockTimestamp SemanticType = "block_timestamp" // Blockchain timestamp +) + +// SemanticTypeInfo contains metadata about a semantic type +type SemanticTypeInfo struct { + Name SemanticType + Description string + DefaultSQL string // Fallback SQL type when dialect doesn't support it + Validation string // Optional validation pattern/rules +} + +// SemanticTypeRegistry contains all supported semantic types with their metadata +var SemanticTypeRegistry = map[SemanticType]SemanticTypeInfo{ + SemanticUint256: { + Name: SemanticUint256, + Description: "256-bit unsigned integer for large blockchain values", + DefaultSQL: "VARCHAR", // Safe fallback for unsupported dialects + Validation: "numeric", + }, + SemanticInt256: { + Name: SemanticInt256, + Description: "256-bit signed integer for large blockchain values", + DefaultSQL: "VARCHAR", + Validation: "numeric", + }, + SemanticAddress: { + Name: SemanticAddress, + Description: "Blockchain address (42 characters with 0x prefix)", + DefaultSQL: "VARCHAR(42)", + Validation: "^0x[a-fA-F0-9]{40}$", + }, + SemanticHash: { + Name: SemanticHash, + Description: "Cryptographic hash (66 characters with 0x prefix)", + DefaultSQL: "VARCHAR(66)", + Validation: "^0x[a-fA-F0-9]{64}$", + }, + SemanticSignature: { + Name: SemanticSignature, + Description: "Cryptographic signature (variable length hex)", + DefaultSQL: "VARCHAR", + Validation: "hex", + }, + SemanticPubkey: { + Name: SemanticPubkey, + Description: "Public key (variable length hex)", + DefaultSQL: "VARCHAR", + Validation: "hex", + }, + SemanticHex: { + Name: SemanticHex, + Description: "Hexadecimal string data", + DefaultSQL: "VARCHAR", + Validation: "hex", + }, + SemanticBase64: { + Name: SemanticBase64, + Description: "Base64 encoded binary data", + DefaultSQL: "TEXT", + Validation: "base64", + }, + SemanticJSON: { + Name: SemanticJSON, + Description: "JSON structured data", + DefaultSQL: "TEXT", + Validation: "json", + }, + SemanticUUID: { + Name: SemanticUUID, + Description: "UUID identifier", + DefaultSQL: "VARCHAR(36)", + Validation: "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$", + }, + SemanticUnixTimestamp: { + Name: SemanticUnixTimestamp, + Description: "Unix timestamp in seconds", + DefaultSQL: "TIMESTAMP WITH TIME ZONE", + Validation: "unix_timestamp", + }, + SemanticUnixTimestampMS: { + Name: SemanticUnixTimestampMS, + Description: "Unix timestamp in milliseconds", + DefaultSQL: "TIMESTAMP WITH TIME ZONE", + Validation: "unix_timestamp_ms", + }, + SemanticBlockTimestamp: { + Name: SemanticBlockTimestamp, + Description: "Blockchain block timestamp", + DefaultSQL: "TIMESTAMP WITH TIME ZONE", + Validation: "unix_timestamp", + }, +} + +// IsValidSemanticType checks if a semantic type is supported +func IsValidSemanticType(semanticType string) bool { + _, exists := SemanticTypeRegistry[SemanticType(semanticType)] + return exists +} + +// GetSemanticTypeInfo returns metadata for a semantic type +func GetSemanticTypeInfo(semanticType string) (SemanticTypeInfo, error) { + info, exists := SemanticTypeRegistry[SemanticType(semanticType)] + if !exists { + return SemanticTypeInfo{}, fmt.Errorf("unsupported semantic type: %s", semanticType) + } + return info, nil +} + +// SemanticTypeMapper defines the interface for dialect-specific semantic type mapping +type SemanticTypeMapper interface { + // MapSemanticType maps a semantic type to dialect-specific SQL type + MapSemanticType(semanticType SemanticType) (sqlType string, supported bool) + + // ConvertValue converts a value according to semantic type and format hint + ConvertValue(semanticType SemanticType, value interface{}, formatHint string) (string, error) + + // SupportsSemanticType returns true if the dialect supports the semantic type + SupportsSemanticType(semanticType SemanticType) bool +} + +// FormatHint provides guidance for value conversion +type FormatHint string + +const ( + FormatHintHex FormatHint = "hex" // Hexadecimal format + FormatHintDecimal FormatHint = "decimal" // Decimal format + FormatHintBase64 FormatHint = "base64" // Base64 format + FormatHintString FormatHint = "string" // String format +) \ No newline at end of file diff --git a/db_proto/test/substreams/order/proto/test/relations/relations.proto b/db_proto/test/substreams/order/proto/test/relations/relations.proto index 8ee2b5e..bf651ad 100644 --- a/db_proto/test/substreams/order/proto/test/relations/relations.proto +++ b/db_proto/test/substreams/order/proto/test/relations/relations.proto @@ -25,9 +25,15 @@ message Entity { } message TypesTest { - option (sf.substreams.sink.sql.schema.v1.table) = { name: "types_tests" }; + option (schema.table) = { + name: "types_tests" + clickhouse_table_options: { + order_by_fields: [{name: "id"}] + } + }; + - uint64 id =1 [(sf.substreams.sink.sql.schema.v1.field) = { primary_key: true }]; + uint64 id =1 [(schema.field) = { primary_key: true }]; // Field for each protobuf native type double double_field = 2; float float_field = 3; @@ -50,45 +56,74 @@ message TypesTest { } message Customer { - option (sf.substreams.sink.sql.schema.v1.table) = { name: "customers" }; + option (schema.table) = { + name: "customers" + clickhouse_table_options: { + order_by_fields: [{name: "customer_id"}] + } + }; + - string customer_id = 1 [(sf.substreams.sink.sql.schema.v1.field) = { primary_key: true }]; + string customer_id = 1 [(schema.field) = { primary_key: true }]; string name = 2; } message Order { - option (sf.substreams.sink.sql.schema.v1.table) = { + option (schema.table) = { name: "orders" + clickhouse_table_options: { + order_by_fields: [{name: "order_id"}, {name: "customer_ref_id"}] + index_fields: [{ + field_name: "order_id" + name: "order_id_idx" + type: bloom_filter + granularity: 4 + }] + } }; - string order_id = 1 [(sf.substreams.sink.sql.schema.v1.field) = { primary_key: true}]; - string customer_ref_id = 2 [(sf.substreams.sink.sql.schema.v1.field) = { foreign_key: "customers on customer_id"}]; + + + string order_id = 1 [(schema.field) = { primary_key: true}]; + string customer_ref_id = 2 [(schema.field) = { foreign_key: "customers on customer_id"}]; repeated OrderItem items = 3; OrderExtension extension = 4; } message OrderExtension { - option (sf.substreams.sink.sql.schema.v1.table) = { + option (schema.table) = { name: "order_extensions", child_of: "orders on order_id" + clickhouse_table_options: { + order_by_fields: [{name: "order_id"}] + } }; string description = 1; } message OrderItem { - option (sf.substreams.sink.sql.schema.v1.table) = { + option (schema.table) = { name: "order_items", child_of: "orders on order_id" + clickhouse_table_options: { + order_by_fields: [{name: "order_id"}, {name: "item_id"}] + } }; // can also leverage orders._id using "order on order_id" if order do not have a external unique identifier - string item_id = 2 [(sf.substreams.sink.sql.schema.v1.field) = { foreign_key: "items on item_id"}]; + string item_id = 2 [(schema.field) = { foreign_key: "items on item_id"}]; int64 quantity = 11; } message Item { - option (sf.substreams.sink.sql.schema.v1.table) = { name: "items" }; + option (schema.table) = { + name: "items" + clickhouse_table_options: { + order_by_fields: [{name: "item_id"}] + } + }; + - string item_id = 1 [(sf.substreams.sink.sql.schema.v1.field) = { unique: true }]; + string item_id = 1 [(schema.field) = { unique: true }]; string name = 10; double price = 11; diff --git a/db_proto/test/substreams/order/src/pb/mod.rs b/db_proto/test/substreams/order/src/pb/mod.rs index 66fa561..5e3d718 100644 --- a/db_proto/test/substreams/order/src/pb/mod.rs +++ b/db_proto/test/substreams/order/src/pb/mod.rs @@ -1,4 +1,9 @@ // @generated +// @@protoc_insertion_point(attribute:schema) +pub mod schema { + include!("schema.rs"); + // @@protoc_insertion_point(schema) +} pub mod sf { pub mod solana { pub mod r#type { @@ -13,17 +18,6 @@ pub mod sf { pub mod substreams { include!("sf.substreams.rs"); // @@protoc_insertion_point(sf.substreams) - pub mod sink { - pub mod sql { - pub mod schema { - // @@protoc_insertion_point(attribute:sf.substreams.sink.sql.schema.v1) - pub mod v1 { - include!("sf.substreams.sink.sql.schema.v1.rs"); - // @@protoc_insertion_point(sf.substreams.sink.sql.schema.v1) - } - } - } - } pub mod solana { // @@protoc_insertion_point(attribute:sf.substreams.solana.v1) pub mod v1 { diff --git a/db_proto/test/substreams/order/src/pb/schema.rs b/db_proto/test/substreams/order/src/pb/schema.rs index 103b84c..55acc40 100644 --- a/db_proto/test/substreams/order/src/pb/schema.rs +++ b/db_proto/test/substreams/order/src/pb/schema.rs @@ -7,9 +7,8 @@ pub struct Table { pub name: ::prost::alloc::string::String, #[prost(string, optional, tag="2")] pub child_of: ::core::option::Option<::prost::alloc::string::String>, - /// should be remove - #[prost(string, tag="81")] - pub many_to_one_relation_field_name: ::prost::alloc::string::String, + #[prost(message, optional, tag="200")] + pub clickhouse_table_options: ::core::option::Option, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -23,4 +22,130 @@ pub struct Column { #[prost(bool, tag="4")] pub primary_key: bool, } +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ClickhouseTableOptions { + #[prost(message, repeated, tag="1")] + pub order_by_fields: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag="2")] + pub partition_fields: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag="3")] + pub replacing_fields: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag="4")] + pub index_fields: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ClickhousePartitionByField { + #[prost(string, tag="1")] + pub name: ::prost::alloc::string::String, + #[prost(enumeration="Function", tag="2")] + pub function: i32, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ClickhouseOrderByField { + #[prost(string, tag="1")] + pub name: ::prost::alloc::string::String, + #[prost(bool, tag="2")] + pub descending: bool, + #[prost(enumeration="Function", tag="3")] + pub function: i32, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ClickhouseReplacingField { + #[prost(string, tag="1")] + pub name: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ClickhouseIndexField { + #[prost(string, tag="1")] + pub name: ::prost::alloc::string::String, + #[prost(string, tag="2")] + pub field_name: ::prost::alloc::string::String, + #[prost(enumeration="IndexType", tag="3")] + pub r#type: i32, + #[prost(uint32, tag="4")] + pub granularity: u32, + #[prost(enumeration="Function", tag="5")] + pub function: i32, +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum IndexType { + Minmax = 0, + Set = 1, + NgrambfV1 = 2, + TokenbfV1 = 3, + BloomFilter = 4, +} +impl IndexType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + IndexType::Minmax => "minmax", + IndexType::Set => "set", + IndexType::NgrambfV1 => "ngrambf_v1", + IndexType::TokenbfV1 => "tokenbf_v1", + IndexType::BloomFilter => "bloom_filter", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "minmax" => Some(Self::Minmax), + "set" => Some(Self::Set), + "ngrambf_v1" => Some(Self::NgrambfV1), + "tokenbf_v1" => Some(Self::TokenbfV1), + "bloom_filter" => Some(Self::BloomFilter), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum Function { + Unset = 0, + ToYyyymm = 1, + ToYyyydd = 2, + ToYear = 3, + ToMonth = 4, + ToDate = 5, + ToStartOfMonth = 6, +} +impl Function { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Function::Unset => "unset", + Function::ToYyyymm => "toYYYYMM", + Function::ToYyyydd => "toYYYYDD", + Function::ToYear => "toYear", + Function::ToMonth => "toMonth", + Function::ToDate => "toDate", + Function::ToStartOfMonth => "toStartOfMonth", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "unset" => Some(Self::Unset), + "toYYYYMM" => Some(Self::ToYyyymm), + "toYYYYDD" => Some(Self::ToYyyydd), + "toYear" => Some(Self::ToYear), + "toMonth" => Some(Self::ToMonth), + "toDate" => Some(Self::ToDate), + "toStartOfMonth" => Some(Self::ToStartOfMonth), + _ => None, + } + } +} // @@protoc_insertion_point(module) diff --git a/docker-compose.yml b/docker-compose.yml index 96d225a..6874e8e 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -46,3 +46,20 @@ services: - "8123:8123" - "9000:9000" - "9005:9005" + risingwave: + container_name: risingwave-ssp2 + image: risingwavelabs/risingwave:latest + hostname: risingwave + command: ["playground"] + environment: + - RUST_BACKTRACE=1 + ports: + - "4566:4566" # PostgreSQL wire protocol port + - "5691:5691" # RisingWave dashboard + volumes: + - ./devel/data/risingwave:/risingwave/data + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:5691/api/v1/health"] + interval: 30s + timeout: 10s + retries: 15 diff --git a/docs/tutorial/schema.risingwave.sql b/docs/tutorial/schema.risingwave.sql new file mode 100644 index 0000000..375f092 --- /dev/null +++ b/docs/tutorial/schema.risingwave.sql @@ -0,0 +1,29 @@ +-- RisingWave optimized schema for blockchain data +CREATE TABLE IF NOT EXISTS block_meta ( + id VARCHAR NOT NULL PRIMARY KEY, + at TIMESTAMP WITH TIME ZONE, + number BIGINT, + hash VARCHAR, + parent_hash VARCHAR, + timestamp TIMESTAMP WITH TIME ZONE +); + +-- Cursor table for Substreams state management +CREATE TABLE IF NOT EXISTS cursors ( + id VARCHAR NOT NULL PRIMARY KEY, + cursor VARCHAR, + block_num BIGINT, + block_id VARCHAR +); + +-- Optional: Create a materialized view for real-time analytics +-- This demonstrates RisingWave's streaming capabilities +CREATE MATERIALIZED VIEW IF NOT EXISTS block_stats AS +SELECT + DATE_TRUNC('hour', timestamp) as hour, + COUNT(*) as block_count, + MIN(number) as min_block, + MAX(number) as max_block, + AVG(EXTRACT(EPOCH FROM (timestamp - LAG(timestamp) OVER (ORDER BY number)))) as avg_block_time +FROM block_meta +GROUP BY DATE_TRUNC('hour', timestamp); \ No newline at end of file diff --git a/docs/tutorial/substreams.risingwave.yaml b/docs/tutorial/substreams.risingwave.yaml new file mode 100644 index 0000000..94c8ce3 --- /dev/null +++ b/docs/tutorial/substreams.risingwave.yaml @@ -0,0 +1,49 @@ +specVersion: v0.1.0 +package: + name: 'substreams_risingwave_sink_tutorial' + version: v0.1.0 + +protobuf: + files: + - block_meta.proto + importPaths: + - ./proto + +imports: + sql: https://github.com/streamingfast/substreams-sink-sql/releases/download/protodefs-v1.0.3/substreams-sink-sql-protodefs-v1.0.3.spkg + blockmeta: https://github.com/streamingfast/substreams-eth-block-meta/releases/download/v0.5.1/substreams-eth-block-meta-v0.5.1.spkg + +binaries: + default: + type: wasm/rust-v1 + file: target/wasm32-unknown-unknown/release/substreams_postgresql_sink_tutorial.wasm + +modules: + - name: store_block_meta_start + kind: store + updatePolicy: set_if_not_exists + valueType: proto:eth.block_meta.v1.BlockMeta + inputs: + - source: sf.ethereum.type.v2.Block + + - name: db_out + kind: map + inputs: + - store: store_block_meta_start + mode: deltas + output: + type: proto:sf.substreams.sink.database.v1.DatabaseChanges + +network: mainnet + +sink: + module: db_out + type: sf.substreams.sink.sql.v1.Service + config: + schema: "./schema.risingwave.sql" + engine: risingwave + wire_protocol_access: true + postgraphile_frontend: + enabled: false + pgweb_frontend: + enabled: false \ No newline at end of file diff --git a/pb/sf/substreams/sink/sql/schema/v1/schema.pb.go b/pb/sf/substreams/sink/sql/schema/v1/schema.pb.go index 3341533..8042fe3 100644 --- a/pb/sf/substreams/sink/sql/schema/v1/schema.pb.go +++ b/pb/sf/substreams/sink/sql/schema/v1/schema.pb.go @@ -1,10 +1,10 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.36.6 // protoc (unknown) // source: sf/substreams/sink/sql/schema/v1/schema.proto -package schema +package v1 import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" @@ -12,6 +12,7 @@ import ( descriptorpb "google.golang.org/protobuf/types/descriptorpb" reflect "reflect" sync "sync" + unsafe "unsafe" ) const ( @@ -21,24 +22,136 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -type Table struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +type IndexType int32 + +const ( + IndexType_minmax IndexType = 0 + IndexType_set IndexType = 1 + IndexType_ngrambf_v1 IndexType = 2 + IndexType_tokenbf_v1 IndexType = 3 + IndexType_bloom_filter IndexType = 4 +) + +// Enum value maps for IndexType. +var ( + IndexType_name = map[int32]string{ + 0: "minmax", + 1: "set", + 2: "ngrambf_v1", + 3: "tokenbf_v1", + 4: "bloom_filter", + } + IndexType_value = map[string]int32{ + "minmax": 0, + "set": 1, + "ngrambf_v1": 2, + "tokenbf_v1": 3, + "bloom_filter": 4, + } +) + +func (x IndexType) Enum() *IndexType { + p := new(IndexType) + *p = x + return p +} + +func (x IndexType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (IndexType) Descriptor() protoreflect.EnumDescriptor { + return file_sf_substreams_sink_sql_schema_v1_schema_proto_enumTypes[0].Descriptor() +} + +func (IndexType) Type() protoreflect.EnumType { + return &file_sf_substreams_sink_sql_schema_v1_schema_proto_enumTypes[0] +} + +func (x IndexType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - ChildOf *string `protobuf:"bytes,2,opt,name=child_of,json=childOf,proto3,oneof" json:"child_of,omitempty"` - // should be remove - ManyToOneRelationFieldName string `protobuf:"bytes,81,opt,name=many_to_one_relation_field_name,json=manyToOneRelationFieldName,proto3" json:"many_to_one_relation_field_name,omitempty"` +// Deprecated: Use IndexType.Descriptor instead. +func (IndexType) EnumDescriptor() ([]byte, []int) { + return file_sf_substreams_sink_sql_schema_v1_schema_proto_rawDescGZIP(), []int{0} +} + +type Function int32 + +const ( + Function_unset Function = 0 + Function_toYYYYMM Function = 1 + Function_toYYYYDD Function = 2 + Function_toYear Function = 3 + Function_toMonth Function = 4 + Function_toDate Function = 5 + Function_toStartOfMonth Function = 6 +) + +// Enum value maps for Function. +var ( + Function_name = map[int32]string{ + 0: "unset", + 1: "toYYYYMM", + 2: "toYYYYDD", + 3: "toYear", + 4: "toMonth", + 5: "toDate", + 6: "toStartOfMonth", + } + Function_value = map[string]int32{ + "unset": 0, + "toYYYYMM": 1, + "toYYYYDD": 2, + "toYear": 3, + "toMonth": 4, + "toDate": 5, + "toStartOfMonth": 6, + } +) + +func (x Function) Enum() *Function { + p := new(Function) + *p = x + return p +} + +func (x Function) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Function) Descriptor() protoreflect.EnumDescriptor { + return file_sf_substreams_sink_sql_schema_v1_schema_proto_enumTypes[1].Descriptor() +} + +func (Function) Type() protoreflect.EnumType { + return &file_sf_substreams_sink_sql_schema_v1_schema_proto_enumTypes[1] +} + +func (x Function) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Function.Descriptor instead. +func (Function) EnumDescriptor() ([]byte, []int) { + return file_sf_substreams_sink_sql_schema_v1_schema_proto_rawDescGZIP(), []int{1} +} + +type Table struct { + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + ChildOf *string `protobuf:"bytes,2,opt,name=child_of,json=childOf,proto3,oneof" json:"child_of,omitempty"` // repeated string primary_key_fields = 3; + ClickhouseTableOptions *ClickhouseTableOptions `protobuf:"bytes,200,opt,name=clickhouse_table_options,json=clickhouseTableOptions,proto3,oneof" json:"clickhouse_table_options,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Table) Reset() { *x = Table{} - if protoimpl.UnsafeEnabled { - mi := &file_sf_substreams_sink_sql_schema_v1_schema_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_sf_substreams_sink_sql_schema_v1_schema_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Table) String() string { @@ -49,7 +162,7 @@ func (*Table) ProtoMessage() {} func (x *Table) ProtoReflect() protoreflect.Message { mi := &file_sf_substreams_sink_sql_schema_v1_schema_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -78,31 +191,31 @@ func (x *Table) GetChildOf() string { return "" } -func (x *Table) GetManyToOneRelationFieldName() string { +func (x *Table) GetClickhouseTableOptions() *ClickhouseTableOptions { if x != nil { - return x.ManyToOneRelationFieldName + return x.ClickhouseTableOptions } - return "" + return nil } type Column struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Name *string `protobuf:"bytes,1,opt,name=name,proto3,oneof" json:"name,omitempty"` + ForeignKey *string `protobuf:"bytes,2,opt,name=foreign_key,json=foreignKey,proto3,oneof" json:"foreign_key,omitempty"` + Unique bool `protobuf:"varint,3,opt,name=unique,proto3" json:"unique,omitempty"` + PrimaryKey bool `protobuf:"varint,4,opt,name=primary_key,json=primaryKey,proto3" json:"primary_key,omitempty"` + // Semantic type annotation for custom SQL type mapping + SemanticType *string `protobuf:"bytes,5,opt,name=semantic_type,json=semanticType,proto3,oneof" json:"semantic_type,omitempty"` // Semantic type hint (e.g., "uint256", "address") + FormatHint *string `protobuf:"bytes,6,opt,name=format_hint,json=formatHint,proto3,oneof" json:"format_hint,omitempty"` // Format hint for conversion (e.g., "hex", "decimal") unknownFields protoimpl.UnknownFields - - Name *string `protobuf:"bytes,1,opt,name=name,proto3,oneof" json:"name,omitempty"` - ForeignKey *string `protobuf:"bytes,2,opt,name=foreign_key,json=foreignKey,proto3,oneof" json:"foreign_key,omitempty"` - Unique bool `protobuf:"varint,3,opt,name=unique,proto3" json:"unique,omitempty"` - PrimaryKey bool `protobuf:"varint,4,opt,name=primary_key,json=primaryKey,proto3" json:"primary_key,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Column) Reset() { *x = Column{} - if protoimpl.UnsafeEnabled { - mi := &file_sf_substreams_sink_sql_schema_v1_schema_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_sf_substreams_sink_sql_schema_v1_schema_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Column) String() string { @@ -113,7 +226,7 @@ func (*Column) ProtoMessage() {} func (x *Column) ProtoReflect() protoreflect.Message { mi := &file_sf_substreams_sink_sql_schema_v1_schema_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -156,12 +269,326 @@ func (x *Column) GetPrimaryKey() bool { return false } +func (x *Column) GetSemanticType() string { + if x != nil && x.SemanticType != nil { + return *x.SemanticType + } + return "" +} + +func (x *Column) GetFormatHint() string { + if x != nil && x.FormatHint != nil { + return *x.FormatHint + } + return "" +} + +type ClickhouseTableOptions struct { + state protoimpl.MessageState `protogen:"open.v1"` + OrderByFields []*ClickhouseOrderByField `protobuf:"bytes,1,rep,name=order_by_fields,json=orderByFields,proto3" json:"order_by_fields,omitempty"` + PartitionFields []*ClickhousePartitionByField `protobuf:"bytes,2,rep,name=partition_fields,json=partitionFields,proto3" json:"partition_fields,omitempty"` + ReplacingFields []*ClickhouseReplacingField `protobuf:"bytes,3,rep,name=replacing_fields,json=replacingFields,proto3" json:"replacing_fields,omitempty"` + IndexFields []*ClickhouseIndexField `protobuf:"bytes,4,rep,name=index_fields,json=indexFields,proto3" json:"index_fields,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ClickhouseTableOptions) Reset() { + *x = ClickhouseTableOptions{} + mi := &file_sf_substreams_sink_sql_schema_v1_schema_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ClickhouseTableOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClickhouseTableOptions) ProtoMessage() {} + +func (x *ClickhouseTableOptions) ProtoReflect() protoreflect.Message { + mi := &file_sf_substreams_sink_sql_schema_v1_schema_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClickhouseTableOptions.ProtoReflect.Descriptor instead. +func (*ClickhouseTableOptions) Descriptor() ([]byte, []int) { + return file_sf_substreams_sink_sql_schema_v1_schema_proto_rawDescGZIP(), []int{2} +} + +func (x *ClickhouseTableOptions) GetOrderByFields() []*ClickhouseOrderByField { + if x != nil { + return x.OrderByFields + } + return nil +} + +func (x *ClickhouseTableOptions) GetPartitionFields() []*ClickhousePartitionByField { + if x != nil { + return x.PartitionFields + } + return nil +} + +func (x *ClickhouseTableOptions) GetReplacingFields() []*ClickhouseReplacingField { + if x != nil { + return x.ReplacingFields + } + return nil +} + +func (x *ClickhouseTableOptions) GetIndexFields() []*ClickhouseIndexField { + if x != nil { + return x.IndexFields + } + return nil +} + +type ClickhousePartitionByField struct { + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Function Function `protobuf:"varint,2,opt,name=function,proto3,enum=sf.substreams.sink.sql.schema.v1.Function" json:"function,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ClickhousePartitionByField) Reset() { + *x = ClickhousePartitionByField{} + mi := &file_sf_substreams_sink_sql_schema_v1_schema_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ClickhousePartitionByField) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClickhousePartitionByField) ProtoMessage() {} + +func (x *ClickhousePartitionByField) ProtoReflect() protoreflect.Message { + mi := &file_sf_substreams_sink_sql_schema_v1_schema_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClickhousePartitionByField.ProtoReflect.Descriptor instead. +func (*ClickhousePartitionByField) Descriptor() ([]byte, []int) { + return file_sf_substreams_sink_sql_schema_v1_schema_proto_rawDescGZIP(), []int{3} +} + +func (x *ClickhousePartitionByField) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ClickhousePartitionByField) GetFunction() Function { + if x != nil { + return x.Function + } + return Function_unset +} + +type ClickhouseOrderByField struct { + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Descending bool `protobuf:"varint,2,opt,name=descending,proto3" json:"descending,omitempty"` + Function Function `protobuf:"varint,3,opt,name=function,proto3,enum=sf.substreams.sink.sql.schema.v1.Function" json:"function,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ClickhouseOrderByField) Reset() { + *x = ClickhouseOrderByField{} + mi := &file_sf_substreams_sink_sql_schema_v1_schema_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ClickhouseOrderByField) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClickhouseOrderByField) ProtoMessage() {} + +func (x *ClickhouseOrderByField) ProtoReflect() protoreflect.Message { + mi := &file_sf_substreams_sink_sql_schema_v1_schema_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClickhouseOrderByField.ProtoReflect.Descriptor instead. +func (*ClickhouseOrderByField) Descriptor() ([]byte, []int) { + return file_sf_substreams_sink_sql_schema_v1_schema_proto_rawDescGZIP(), []int{4} +} + +func (x *ClickhouseOrderByField) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ClickhouseOrderByField) GetDescending() bool { + if x != nil { + return x.Descending + } + return false +} + +func (x *ClickhouseOrderByField) GetFunction() Function { + if x != nil { + return x.Function + } + return Function_unset +} + +type ClickhouseReplacingField struct { + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ClickhouseReplacingField) Reset() { + *x = ClickhouseReplacingField{} + mi := &file_sf_substreams_sink_sql_schema_v1_schema_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ClickhouseReplacingField) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClickhouseReplacingField) ProtoMessage() {} + +func (x *ClickhouseReplacingField) ProtoReflect() protoreflect.Message { + mi := &file_sf_substreams_sink_sql_schema_v1_schema_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClickhouseReplacingField.ProtoReflect.Descriptor instead. +func (*ClickhouseReplacingField) Descriptor() ([]byte, []int) { + return file_sf_substreams_sink_sql_schema_v1_schema_proto_rawDescGZIP(), []int{5} +} + +func (x *ClickhouseReplacingField) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +type ClickhouseIndexField struct { + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + FieldName string `protobuf:"bytes,2,opt,name=field_name,json=fieldName,proto3" json:"field_name,omitempty"` + Type IndexType `protobuf:"varint,3,opt,name=type,proto3,enum=sf.substreams.sink.sql.schema.v1.IndexType" json:"type,omitempty"` + Granularity uint32 `protobuf:"varint,4,opt,name=granularity,proto3" json:"granularity,omitempty"` + Function Function `protobuf:"varint,5,opt,name=function,proto3,enum=sf.substreams.sink.sql.schema.v1.Function" json:"function,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ClickhouseIndexField) Reset() { + *x = ClickhouseIndexField{} + mi := &file_sf_substreams_sink_sql_schema_v1_schema_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ClickhouseIndexField) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClickhouseIndexField) ProtoMessage() {} + +func (x *ClickhouseIndexField) ProtoReflect() protoreflect.Message { + mi := &file_sf_substreams_sink_sql_schema_v1_schema_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClickhouseIndexField.ProtoReflect.Descriptor instead. +func (*ClickhouseIndexField) Descriptor() ([]byte, []int) { + return file_sf_substreams_sink_sql_schema_v1_schema_proto_rawDescGZIP(), []int{6} +} + +func (x *ClickhouseIndexField) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ClickhouseIndexField) GetFieldName() string { + if x != nil { + return x.FieldName + } + return "" +} + +func (x *ClickhouseIndexField) GetType() IndexType { + if x != nil { + return x.Type + } + return IndexType_minmax +} + +func (x *ClickhouseIndexField) GetGranularity() uint32 { + if x != nil { + return x.Granularity + } + return 0 +} + +func (x *ClickhouseIndexField) GetFunction() Function { + if x != nil { + return x.Function + } + return Function_unset +} + var file_sf_substreams_sink_sql_schema_v1_schema_proto_extTypes = []protoimpl.ExtensionInfo{ { ExtendedType: (*descriptorpb.MessageOptions)(nil), ExtensionType: (*Table)(nil), Field: 77701, - Name: "schema.table", + Name: "sf.substreams.sink.sql.schema.v1.table", Tag: "bytes,77701,opt,name=table", Filename: "sf/substreams/sink/sql/schema/v1/schema.proto", }, @@ -169,7 +596,7 @@ var file_sf_substreams_sink_sql_schema_v1_schema_proto_extTypes = []protoimpl.Ex ExtendedType: (*descriptorpb.FieldOptions)(nil), ExtensionType: (*Column)(nil), Field: 77702, - Name: "schema.field", + Name: "sf.substreams.sink.sql.schema.v1.field", Tag: "bytes,77702,opt,name=field", Filename: "sf/substreams/sink/sql/schema/v1/schema.proto", }, @@ -177,93 +604,133 @@ var file_sf_substreams_sink_sql_schema_v1_schema_proto_extTypes = []protoimpl.Ex // Extension fields to descriptorpb.MessageOptions. var ( - // optional schema.Table table = 77701; + // optional sf.substreams.sink.sql.schema.v1.Table table = 77701; E_Table = &file_sf_substreams_sink_sql_schema_v1_schema_proto_extTypes[0] ) // Extension fields to descriptorpb.FieldOptions. var ( - // optional schema.Column field = 77702; - E_Field = &file_sf_substreams_sink_sql_schema_v1_schema_proto_extTypes[1] //todo: ignore + // optional sf.substreams.sink.sql.schema.v1.Column field = 77702; + E_Field = &file_sf_substreams_sink_sql_schema_v1_schema_proto_extTypes[1] ) var File_sf_substreams_sink_sql_schema_v1_schema_proto protoreflect.FileDescriptor -var file_sf_substreams_sink_sql_schema_v1_schema_proto_rawDesc = []byte{ - 0x0a, 0x2d, 0x73, 0x66, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2f, - 0x73, 0x69, 0x6e, 0x6b, 0x2f, 0x73, 0x71, 0x6c, 0x2f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2f, - 0x76, 0x31, 0x2f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8d, 0x01, 0x0a, 0x05, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, - 0x5f, 0x6f, 0x66, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x07, 0x63, 0x68, 0x69, - 0x6c, 0x64, 0x4f, 0x66, 0x88, 0x01, 0x01, 0x12, 0x43, 0x0a, 0x1f, 0x6d, 0x61, 0x6e, 0x79, 0x5f, - 0x74, 0x6f, 0x5f, 0x6f, 0x6e, 0x65, 0x5f, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x51, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x1a, 0x6d, 0x61, 0x6e, 0x79, 0x54, 0x6f, 0x4f, 0x6e, 0x65, 0x52, 0x65, 0x6c, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x42, 0x0b, 0x0a, 0x09, - 0x5f, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x6f, 0x66, 0x22, 0x99, 0x01, 0x0a, 0x06, 0x43, 0x6f, - 0x6c, 0x75, 0x6d, 0x6e, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x48, 0x00, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x24, 0x0a, - 0x0b, 0x66, 0x6f, 0x72, 0x65, 0x69, 0x67, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x48, 0x01, 0x52, 0x0a, 0x66, 0x6f, 0x72, 0x65, 0x69, 0x67, 0x6e, 0x4b, 0x65, 0x79, - 0x88, 0x01, 0x01, 0x12, 0x16, 0x0a, 0x06, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x06, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x70, - 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x0a, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x4b, 0x65, 0x79, 0x42, 0x07, 0x0a, 0x05, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x66, 0x6f, 0x72, 0x65, 0x69, 0x67, - 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x3a, 0x46, 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x1f, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x85, 0xdf, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x3a, 0x45, 0x0a, - 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x86, 0xdf, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, - 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x52, 0x05, 0x66, - 0x69, 0x65, 0x6c, 0x64, 0x42, 0x89, 0x01, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x2e, 0x73, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x42, 0x0b, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x50, 0x01, 0x5a, 0x36, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, - 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x66, 0x61, 0x73, 0x74, 0x2f, 0x73, 0x75, 0x62, - 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2d, 0x73, 0x69, 0x6e, 0x6b, 0x2d, 0x73, 0x71, 0x6c, - 0x2f, 0x70, 0x62, 0x2f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0xa2, 0x02, 0x03, 0x53, 0x58, 0x58, - 0xaa, 0x02, 0x06, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0xca, 0x02, 0x06, 0x53, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0xe2, 0x02, 0x12, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5c, 0x47, 0x50, 0x42, 0x4d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x06, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +const file_sf_substreams_sink_sql_schema_v1_schema_proto_rawDesc = "" + + "\n" + + "-sf/substreams/sink/sql/schema/v1/schema.proto\x12 sf.substreams.sink.sql.schema.v1\x1a google/protobuf/descriptor.proto\"\xdf\x01\n" + + "\x05Table\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x1e\n" + + "\bchild_of\x18\x02 \x01(\tH\x00R\achildOf\x88\x01\x01\x12x\n" + + "\x18clickhouse_table_options\x18\xc8\x01 \x01(\v28.sf.substreams.sink.sql.schema.v1.ClickhouseTableOptionsH\x01R\x16clickhouseTableOptions\x88\x01\x01B\v\n" + + "\t_child_ofB\x1b\n" + + "\x19_clickhouse_table_options\"\x8b\x02\n" + + "\x06Column\x12\x17\n" + + "\x04name\x18\x01 \x01(\tH\x00R\x04name\x88\x01\x01\x12$\n" + + "\vforeign_key\x18\x02 \x01(\tH\x01R\n" + + "foreignKey\x88\x01\x01\x12\x16\n" + + "\x06unique\x18\x03 \x01(\bR\x06unique\x12\x1f\n" + + "\vprimary_key\x18\x04 \x01(\bR\n" + + "primaryKey\x12(\n" + + "\rsemantic_type\x18\x05 \x01(\tH\x02R\fsemanticType\x88\x01\x01\x12$\n" + + "\vformat_hint\x18\x06 \x01(\tH\x03R\n" + + "formatHint\x88\x01\x01B\a\n" + + "\x05_nameB\x0e\n" + + "\f_foreign_keyB\x10\n" + + "\x0e_semantic_typeB\x0e\n" + + "\f_format_hint\"\xa5\x03\n" + + "\x16ClickhouseTableOptions\x12`\n" + + "\x0forder_by_fields\x18\x01 \x03(\v28.sf.substreams.sink.sql.schema.v1.ClickhouseOrderByFieldR\rorderByFields\x12g\n" + + "\x10partition_fields\x18\x02 \x03(\v2<.sf.substreams.sink.sql.schema.v1.ClickhousePartitionByFieldR\x0fpartitionFields\x12e\n" + + "\x10replacing_fields\x18\x03 \x03(\v2:.sf.substreams.sink.sql.schema.v1.ClickhouseReplacingFieldR\x0freplacingFields\x12Y\n" + + "\findex_fields\x18\x04 \x03(\v26.sf.substreams.sink.sql.schema.v1.ClickhouseIndexFieldR\vindexFields\"x\n" + + "\x1aClickhousePartitionByField\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12F\n" + + "\bfunction\x18\x02 \x01(\x0e2*.sf.substreams.sink.sql.schema.v1.FunctionR\bfunction\"\x94\x01\n" + + "\x16ClickhouseOrderByField\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x1e\n" + + "\n" + + "descending\x18\x02 \x01(\bR\n" + + "descending\x12F\n" + + "\bfunction\x18\x03 \x01(\x0e2*.sf.substreams.sink.sql.schema.v1.FunctionR\bfunction\".\n" + + "\x18ClickhouseReplacingField\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\"\xf4\x01\n" + + "\x14ClickhouseIndexField\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x1d\n" + + "\n" + + "field_name\x18\x02 \x01(\tR\tfieldName\x12?\n" + + "\x04type\x18\x03 \x01(\x0e2+.sf.substreams.sink.sql.schema.v1.IndexTypeR\x04type\x12 \n" + + "\vgranularity\x18\x04 \x01(\rR\vgranularity\x12F\n" + + "\bfunction\x18\x05 \x01(\x0e2*.sf.substreams.sink.sql.schema.v1.FunctionR\bfunction*R\n" + + "\tIndexType\x12\n" + + "\n" + + "\x06minmax\x10\x00\x12\a\n" + + "\x03set\x10\x01\x12\x0e\n" + + "\n" + + "ngrambf_v1\x10\x02\x12\x0e\n" + + "\n" + + "tokenbf_v1\x10\x03\x12\x10\n" + + "\fbloom_filter\x10\x04*j\n" + + "\bFunction\x12\t\n" + + "\x05unset\x10\x00\x12\f\n" + + "\btoYYYYMM\x10\x01\x12\f\n" + + "\btoYYYYDD\x10\x02\x12\n" + + "\n" + + "\x06toYear\x10\x03\x12\v\n" + + "\atoMonth\x10\x04\x12\n" + + "\n" + + "\x06toDate\x10\x05\x12\x12\n" + + "\x0etoStartOfMonth\x10\x06:`\n" + + "\x05table\x12\x1f.google.protobuf.MessageOptions\x18\x85\xdf\x04 \x01(\v2'.sf.substreams.sink.sql.schema.v1.TableR\x05table:_\n" + + "\x05field\x12\x1d.google.protobuf.FieldOptions\x18\x86\xdf\x04 \x01(\v2(.sf.substreams.sink.sql.schema.v1.ColumnR\x05fieldB\xac\x02\n" + + "$com.sf.substreams.sink.sql.schema.v1B\vSchemaProtoP\x01ZPgithub.com/streamingfast/substreams-sink-sql/pb/sf/substreams/sink/sql/schema/v1\xa2\x02\x05SSSSS\xaa\x02 Sf.Substreams.Sink.Sql.Schema.V1\xca\x02 Sf\\Substreams\\Sink\\Sql\\Schema\\V1\xe2\x02,Sf\\Substreams\\Sink\\Sql\\Schema\\V1\\GPBMetadata\xea\x02%Sf::Substreams::Sink::Sql::Schema::V1b\x06proto3" var ( file_sf_substreams_sink_sql_schema_v1_schema_proto_rawDescOnce sync.Once - file_sf_substreams_sink_sql_schema_v1_schema_proto_rawDescData = file_sf_substreams_sink_sql_schema_v1_schema_proto_rawDesc + file_sf_substreams_sink_sql_schema_v1_schema_proto_rawDescData []byte ) func file_sf_substreams_sink_sql_schema_v1_schema_proto_rawDescGZIP() []byte { file_sf_substreams_sink_sql_schema_v1_schema_proto_rawDescOnce.Do(func() { - file_sf_substreams_sink_sql_schema_v1_schema_proto_rawDescData = protoimpl.X.CompressGZIP(file_sf_substreams_sink_sql_schema_v1_schema_proto_rawDescData) + file_sf_substreams_sink_sql_schema_v1_schema_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_sf_substreams_sink_sql_schema_v1_schema_proto_rawDesc), len(file_sf_substreams_sink_sql_schema_v1_schema_proto_rawDesc))) }) return file_sf_substreams_sink_sql_schema_v1_schema_proto_rawDescData } -var file_sf_substreams_sink_sql_schema_v1_schema_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_sf_substreams_sink_sql_schema_v1_schema_proto_goTypes = []interface{}{ - (*Table)(nil), // 0: schema.Table - (*Column)(nil), // 1: schema.Column - (*descriptorpb.MessageOptions)(nil), // 2: google.protobuf.MessageOptions - (*descriptorpb.FieldOptions)(nil), // 3: google.protobuf.FieldOptions +var file_sf_substreams_sink_sql_schema_v1_schema_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_sf_substreams_sink_sql_schema_v1_schema_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_sf_substreams_sink_sql_schema_v1_schema_proto_goTypes = []any{ + (IndexType)(0), // 0: sf.substreams.sink.sql.schema.v1.IndexType + (Function)(0), // 1: sf.substreams.sink.sql.schema.v1.Function + (*Table)(nil), // 2: sf.substreams.sink.sql.schema.v1.Table + (*Column)(nil), // 3: sf.substreams.sink.sql.schema.v1.Column + (*ClickhouseTableOptions)(nil), // 4: sf.substreams.sink.sql.schema.v1.ClickhouseTableOptions + (*ClickhousePartitionByField)(nil), // 5: sf.substreams.sink.sql.schema.v1.ClickhousePartitionByField + (*ClickhouseOrderByField)(nil), // 6: sf.substreams.sink.sql.schema.v1.ClickhouseOrderByField + (*ClickhouseReplacingField)(nil), // 7: sf.substreams.sink.sql.schema.v1.ClickhouseReplacingField + (*ClickhouseIndexField)(nil), // 8: sf.substreams.sink.sql.schema.v1.ClickhouseIndexField + (*descriptorpb.MessageOptions)(nil), // 9: google.protobuf.MessageOptions + (*descriptorpb.FieldOptions)(nil), // 10: google.protobuf.FieldOptions } var file_sf_substreams_sink_sql_schema_v1_schema_proto_depIdxs = []int32{ - 2, // 0: schema.table:extendee -> google.protobuf.MessageOptions - 3, // 1: schema.field:extendee -> google.protobuf.FieldOptions - 0, // 2: schema.table:type_name -> schema.Table - 1, // 3: schema.field:type_name -> schema.Column - 4, // [4:4] is the sub-list for method output_type - 4, // [4:4] is the sub-list for method input_type - 2, // [2:4] is the sub-list for extension type_name - 0, // [0:2] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name + 4, // 0: sf.substreams.sink.sql.schema.v1.Table.clickhouse_table_options:type_name -> sf.substreams.sink.sql.schema.v1.ClickhouseTableOptions + 6, // 1: sf.substreams.sink.sql.schema.v1.ClickhouseTableOptions.order_by_fields:type_name -> sf.substreams.sink.sql.schema.v1.ClickhouseOrderByField + 5, // 2: sf.substreams.sink.sql.schema.v1.ClickhouseTableOptions.partition_fields:type_name -> sf.substreams.sink.sql.schema.v1.ClickhousePartitionByField + 7, // 3: sf.substreams.sink.sql.schema.v1.ClickhouseTableOptions.replacing_fields:type_name -> sf.substreams.sink.sql.schema.v1.ClickhouseReplacingField + 8, // 4: sf.substreams.sink.sql.schema.v1.ClickhouseTableOptions.index_fields:type_name -> sf.substreams.sink.sql.schema.v1.ClickhouseIndexField + 1, // 5: sf.substreams.sink.sql.schema.v1.ClickhousePartitionByField.function:type_name -> sf.substreams.sink.sql.schema.v1.Function + 1, // 6: sf.substreams.sink.sql.schema.v1.ClickhouseOrderByField.function:type_name -> sf.substreams.sink.sql.schema.v1.Function + 0, // 7: sf.substreams.sink.sql.schema.v1.ClickhouseIndexField.type:type_name -> sf.substreams.sink.sql.schema.v1.IndexType + 1, // 8: sf.substreams.sink.sql.schema.v1.ClickhouseIndexField.function:type_name -> sf.substreams.sink.sql.schema.v1.Function + 9, // 9: sf.substreams.sink.sql.schema.v1.table:extendee -> google.protobuf.MessageOptions + 10, // 10: sf.substreams.sink.sql.schema.v1.field:extendee -> google.protobuf.FieldOptions + 2, // 11: sf.substreams.sink.sql.schema.v1.table:type_name -> sf.substreams.sink.sql.schema.v1.Table + 3, // 12: sf.substreams.sink.sql.schema.v1.field:type_name -> sf.substreams.sink.sql.schema.v1.Column + 13, // [13:13] is the sub-list for method output_type + 13, // [13:13] is the sub-list for method input_type + 11, // [11:13] is the sub-list for extension type_name + 9, // [9:11] is the sub-list for extension extendee + 0, // [0:9] is the sub-list for field type_name } func init() { file_sf_substreams_sink_sql_schema_v1_schema_proto_init() } @@ -271,51 +738,25 @@ func file_sf_substreams_sink_sql_schema_v1_schema_proto_init() { if File_sf_substreams_sink_sql_schema_v1_schema_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_sf_substreams_sink_sql_schema_v1_schema_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Table); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sf_substreams_sink_sql_schema_v1_schema_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Column); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_sf_substreams_sink_sql_schema_v1_schema_proto_msgTypes[0].OneofWrappers = []interface{}{} - file_sf_substreams_sink_sql_schema_v1_schema_proto_msgTypes[1].OneofWrappers = []interface{}{} + file_sf_substreams_sink_sql_schema_v1_schema_proto_msgTypes[0].OneofWrappers = []any{} + file_sf_substreams_sink_sql_schema_v1_schema_proto_msgTypes[1].OneofWrappers = []any{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_sf_substreams_sink_sql_schema_v1_schema_proto_rawDesc, - NumEnums: 0, - NumMessages: 2, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_sf_substreams_sink_sql_schema_v1_schema_proto_rawDesc), len(file_sf_substreams_sink_sql_schema_v1_schema_proto_rawDesc)), + NumEnums: 2, + NumMessages: 7, NumExtensions: 2, NumServices: 0, }, GoTypes: file_sf_substreams_sink_sql_schema_v1_schema_proto_goTypes, DependencyIndexes: file_sf_substreams_sink_sql_schema_v1_schema_proto_depIdxs, + EnumInfos: file_sf_substreams_sink_sql_schema_v1_schema_proto_enumTypes, MessageInfos: file_sf_substreams_sink_sql_schema_v1_schema_proto_msgTypes, ExtensionInfos: file_sf_substreams_sink_sql_schema_v1_schema_proto_extTypes, }.Build() File_sf_substreams_sink_sql_schema_v1_schema_proto = out.File - file_sf_substreams_sink_sql_schema_v1_schema_proto_rawDesc = nil file_sf_substreams_sink_sql_schema_v1_schema_proto_goTypes = nil file_sf_substreams_sink_sql_schema_v1_schema_proto_depIdxs = nil } diff --git a/pb/sf/substreams/sink/sql/services/v1/services.pb.go b/pb/sf/substreams/sink/sql/services/v1/services.pb.go index 8e95f5d..5ee6dc2 100644 --- a/pb/sf/substreams/sink/sql/services/v1/services.pb.go +++ b/pb/sf/substreams/sink/sql/services/v1/services.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.36.6 // protoc (unknown) // source: sf/substreams/sink/sql/services/v1/services.proto @@ -12,6 +12,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + unsafe "unsafe" ) const ( @@ -71,10 +72,7 @@ func (Service_Engine) EnumDescriptor() ([]byte, []int) { } type Service struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Containing both create table statements and index creation statements. Schema string `protobuf:"bytes,1,opt,name=schema,proto3" json:"schema,omitempty"` DbtConfig *DBTConfig `protobuf:"bytes,2,opt,name=dbt_config,json=dbtConfig,proto3,oneof" json:"dbt_config,omitempty"` @@ -82,15 +80,15 @@ type Service struct { PostgraphileFrontend *PostgraphileFrontend `protobuf:"bytes,5,opt,name=postgraphile_frontend,json=postgraphileFrontend,proto3" json:"postgraphile_frontend,omitempty"` Engine Service_Engine `protobuf:"varint,7,opt,name=engine,proto3,enum=sf.substreams.sink.sql.service.v1.Service_Engine" json:"engine,omitempty"` RestFrontend *RESTFrontend `protobuf:"bytes,8,opt,name=rest_frontend,json=restFrontend,proto3" json:"rest_frontend,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Service) Reset() { *x = Service{} - if protoimpl.UnsafeEnabled { - mi := &file_sf_substreams_sink_sql_services_v1_services_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_sf_substreams_sink_sql_services_v1_services_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Service) String() string { @@ -101,7 +99,7 @@ func (*Service) ProtoMessage() {} func (x *Service) ProtoReflect() protoreflect.Message { mi := &file_sf_substreams_sink_sql_services_v1_services_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -160,22 +158,19 @@ func (x *Service) GetRestFrontend() *RESTFrontend { // https://www.getdbt.com/product/what-is-dbt type DBTConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Files []byte `protobuf:"bytes,1,opt,name=files,proto3" json:"files,omitempty"` - RunIntervalSeconds int32 `protobuf:"varint,2,opt,name=run_interval_seconds,json=runIntervalSeconds,proto3" json:"run_interval_seconds,omitempty"` - Enabled bool `protobuf:"varint,3,opt,name=enabled,proto3" json:"enabled,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + Files []byte `protobuf:"bytes,1,opt,name=files,proto3" json:"files,omitempty"` + RunIntervalSeconds int32 `protobuf:"varint,2,opt,name=run_interval_seconds,json=runIntervalSeconds,proto3" json:"run_interval_seconds,omitempty"` + Enabled bool `protobuf:"varint,3,opt,name=enabled,proto3" json:"enabled,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *DBTConfig) Reset() { *x = DBTConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_sf_substreams_sink_sql_services_v1_services_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_sf_substreams_sink_sql_services_v1_services_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DBTConfig) String() string { @@ -186,7 +181,7 @@ func (*DBTConfig) ProtoMessage() {} func (x *DBTConfig) ProtoReflect() protoreflect.Message { mi := &file_sf_substreams_sink_sql_services_v1_services_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -224,20 +219,17 @@ func (x *DBTConfig) GetEnabled() bool { // https://hasura.io/docs/latest/index/ type HasuraFrontend struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` unknownFields protoimpl.UnknownFields - - Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` + sizeCache protoimpl.SizeCache } func (x *HasuraFrontend) Reset() { *x = HasuraFrontend{} - if protoimpl.UnsafeEnabled { - mi := &file_sf_substreams_sink_sql_services_v1_services_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_sf_substreams_sink_sql_services_v1_services_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *HasuraFrontend) String() string { @@ -248,7 +240,7 @@ func (*HasuraFrontend) ProtoMessage() {} func (x *HasuraFrontend) ProtoReflect() protoreflect.Message { mi := &file_sf_substreams_sink_sql_services_v1_services_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -272,20 +264,17 @@ func (x *HasuraFrontend) GetEnabled() bool { // https://www.graphile.org/postgraphile/ type PostgraphileFrontend struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` unknownFields protoimpl.UnknownFields - - Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` + sizeCache protoimpl.SizeCache } func (x *PostgraphileFrontend) Reset() { *x = PostgraphileFrontend{} - if protoimpl.UnsafeEnabled { - mi := &file_sf_substreams_sink_sql_services_v1_services_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_sf_substreams_sink_sql_services_v1_services_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *PostgraphileFrontend) String() string { @@ -296,7 +285,7 @@ func (*PostgraphileFrontend) ProtoMessage() {} func (x *PostgraphileFrontend) ProtoReflect() protoreflect.Message { mi := &file_sf_substreams_sink_sql_services_v1_services_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -320,20 +309,17 @@ func (x *PostgraphileFrontend) GetEnabled() bool { // https://github.com/sosedoff/pgweb type PGWebFrontend struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` unknownFields protoimpl.UnknownFields - - Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` + sizeCache protoimpl.SizeCache } func (x *PGWebFrontend) Reset() { *x = PGWebFrontend{} - if protoimpl.UnsafeEnabled { - mi := &file_sf_substreams_sink_sql_services_v1_services_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_sf_substreams_sink_sql_services_v1_services_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *PGWebFrontend) String() string { @@ -344,7 +330,7 @@ func (*PGWebFrontend) ProtoMessage() {} func (x *PGWebFrontend) ProtoReflect() protoreflect.Message { mi := &file_sf_substreams_sink_sql_services_v1_services_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -368,20 +354,17 @@ func (x *PGWebFrontend) GetEnabled() bool { // https://github.com/semiotic-ai/sql-wrapper type RESTFrontend struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` unknownFields protoimpl.UnknownFields - - Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` + sizeCache protoimpl.SizeCache } func (x *RESTFrontend) Reset() { *x = RESTFrontend{} - if protoimpl.UnsafeEnabled { - mi := &file_sf_substreams_sink_sql_services_v1_services_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_sf_substreams_sink_sql_services_v1_services_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RESTFrontend) String() string { @@ -392,7 +375,7 @@ func (*RESTFrontend) ProtoMessage() {} func (x *RESTFrontend) ProtoReflect() protoreflect.Message { mi := &file_sf_substreams_sink_sql_services_v1_services_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -416,103 +399,52 @@ func (x *RESTFrontend) GetEnabled() bool { var File_sf_substreams_sink_sql_services_v1_services_proto protoreflect.FileDescriptor -var file_sf_substreams_sink_sql_services_v1_services_proto_rawDesc = []byte{ - 0x0a, 0x31, 0x73, 0x66, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2f, - 0x73, 0x69, 0x6e, 0x6b, 0x2f, 0x73, 0x71, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x12, 0x21, 0x73, 0x66, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, - 0x6d, 0x73, 0x2e, 0x73, 0x69, 0x6e, 0x6b, 0x2e, 0x73, 0x71, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x1b, 0x73, 0x66, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x74, - 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x22, 0xa8, 0x04, 0x0a, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, - 0x1e, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x06, 0xc2, 0x89, 0x01, 0x02, 0x08, 0x01, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, - 0x50, 0x0a, 0x0a, 0x64, 0x62, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x73, 0x66, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, - 0x61, 0x6d, 0x73, 0x2e, 0x73, 0x69, 0x6e, 0x6b, 0x2e, 0x73, 0x71, 0x6c, 0x2e, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x42, 0x54, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x48, 0x00, 0x52, 0x09, 0x64, 0x62, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x88, 0x01, - 0x01, 0x12, 0x5a, 0x0a, 0x0f, 0x68, 0x61, 0x73, 0x75, 0x72, 0x61, 0x5f, 0x66, 0x72, 0x6f, 0x6e, - 0x74, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x73, 0x66, 0x2e, - 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2e, 0x73, 0x69, 0x6e, 0x6b, 0x2e, - 0x73, 0x71, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x48, - 0x61, 0x73, 0x75, 0x72, 0x61, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x52, 0x0e, 0x68, - 0x61, 0x73, 0x75, 0x72, 0x61, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x12, 0x6c, 0x0a, - 0x15, 0x70, 0x6f, 0x73, 0x74, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x6c, 0x65, 0x5f, 0x66, 0x72, - 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x73, - 0x66, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2e, 0x73, 0x69, 0x6e, - 0x6b, 0x2e, 0x73, 0x71, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x50, 0x6f, 0x73, 0x74, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x6c, 0x65, 0x46, 0x72, 0x6f, - 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x52, 0x14, 0x70, 0x6f, 0x73, 0x74, 0x67, 0x72, 0x61, 0x70, 0x68, - 0x69, 0x6c, 0x65, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x12, 0x49, 0x0a, 0x06, 0x65, - 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x73, 0x66, - 0x2e, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2e, 0x73, 0x69, 0x6e, 0x6b, - 0x2e, 0x73, 0x71, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x52, 0x06, - 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x12, 0x54, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x66, - 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, - 0x73, 0x66, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2e, 0x73, 0x69, - 0x6e, 0x6b, 0x2e, 0x73, 0x71, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, - 0x31, 0x2e, 0x52, 0x45, 0x53, 0x54, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x52, 0x0c, - 0x72, 0x65, 0x73, 0x74, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x22, 0x31, 0x0a, 0x06, - 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x75, 0x6e, 0x73, 0x65, 0x74, 0x10, - 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x74, 0x67, 0x72, 0x65, 0x73, 0x10, 0x01, 0x12, - 0x0e, 0x0a, 0x0a, 0x63, 0x6c, 0x69, 0x63, 0x6b, 0x68, 0x6f, 0x75, 0x73, 0x65, 0x10, 0x02, 0x42, - 0x0d, 0x0a, 0x0b, 0x5f, 0x64, 0x62, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x75, - 0x0a, 0x09, 0x44, 0x42, 0x54, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1c, 0x0a, 0x05, 0x66, - 0x69, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0xc2, 0x89, 0x01, 0x02, - 0x10, 0x01, 0x52, 0x05, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x72, 0x75, 0x6e, - 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, - 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x12, 0x72, 0x75, 0x6e, 0x49, 0x6e, 0x74, 0x65, - 0x72, 0x76, 0x61, 0x6c, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, - 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, - 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x2a, 0x0a, 0x0e, 0x48, 0x61, 0x73, 0x75, 0x72, 0x61, 0x46, - 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, - 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, - 0x64, 0x22, 0x30, 0x0a, 0x14, 0x50, 0x6f, 0x73, 0x74, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x6c, - 0x65, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, - 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, - 0x6c, 0x65, 0x64, 0x22, 0x29, 0x0a, 0x0d, 0x50, 0x47, 0x57, 0x65, 0x62, 0x46, 0x72, 0x6f, 0x6e, - 0x74, 0x65, 0x6e, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x28, - 0x0a, 0x0c, 0x52, 0x45, 0x53, 0x54, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x12, 0x18, - 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x42, 0x98, 0x02, 0x0a, 0x25, 0x63, 0x6f, 0x6d, - 0x2e, 0x73, 0x66, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2e, 0x73, - 0x69, 0x6e, 0x6b, 0x2e, 0x73, 0x71, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, - 0x76, 0x31, 0x42, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x50, 0x01, 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x66, 0x61, 0x73, 0x74, 0x2f, 0x73, 0x75, - 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2d, 0x73, 0x69, 0x6e, 0x6b, 0x2d, 0x73, 0x71, - 0x6c, 0x2f, 0x70, 0x62, 0x3b, 0x70, 0x62, 0x73, 0x71, 0x6c, 0xa2, 0x02, 0x05, 0x53, 0x53, 0x53, - 0x53, 0x53, 0xaa, 0x02, 0x21, 0x53, 0x66, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, - 0x6d, 0x73, 0x2e, 0x53, 0x69, 0x6e, 0x6b, 0x2e, 0x53, 0x71, 0x6c, 0x2e, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x21, 0x53, 0x66, 0x5c, 0x53, 0x75, 0x62, 0x73, - 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x5c, 0x53, 0x69, 0x6e, 0x6b, 0x5c, 0x53, 0x71, 0x6c, 0x5c, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x2d, 0x53, 0x66, 0x5c, - 0x53, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x5c, 0x53, 0x69, 0x6e, 0x6b, 0x5c, - 0x53, 0x71, 0x6c, 0x5c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5c, 0x56, 0x31, 0x5c, 0x47, - 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x26, 0x53, 0x66, 0x3a, - 0x3a, 0x53, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x3a, 0x3a, 0x53, 0x69, 0x6e, - 0x6b, 0x3a, 0x3a, 0x53, 0x71, 0x6c, 0x3a, 0x3a, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x3a, - 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +const file_sf_substreams_sink_sql_services_v1_services_proto_rawDesc = "" + + "\n" + + "1sf/substreams/sink/sql/services/v1/services.proto\x12!sf.substreams.sink.sql.service.v1\x1a\x1bsf/substreams/options.proto\"\xa8\x04\n" + + "\aService\x12\x1e\n" + + "\x06schema\x18\x01 \x01(\tB\x06\u0089\x01\x02\b\x01R\x06schema\x12P\n" + + "\n" + + "dbt_config\x18\x02 \x01(\v2,.sf.substreams.sink.sql.service.v1.DBTConfigH\x00R\tdbtConfig\x88\x01\x01\x12Z\n" + + "\x0fhasura_frontend\x18\x04 \x01(\v21.sf.substreams.sink.sql.service.v1.HasuraFrontendR\x0ehasuraFrontend\x12l\n" + + "\x15postgraphile_frontend\x18\x05 \x01(\v27.sf.substreams.sink.sql.service.v1.PostgraphileFrontendR\x14postgraphileFrontend\x12I\n" + + "\x06engine\x18\a \x01(\x0e21.sf.substreams.sink.sql.service.v1.Service.EngineR\x06engine\x12T\n" + + "\rrest_frontend\x18\b \x01(\v2/.sf.substreams.sink.sql.service.v1.RESTFrontendR\frestFrontend\"1\n" + + "\x06Engine\x12\t\n" + + "\x05unset\x10\x00\x12\f\n" + + "\bpostgres\x10\x01\x12\x0e\n" + + "\n" + + "clickhouse\x10\x02B\r\n" + + "\v_dbt_config\"u\n" + + "\tDBTConfig\x12\x1c\n" + + "\x05files\x18\x01 \x01(\fB\x06\u0089\x01\x02\x10\x01R\x05files\x120\n" + + "\x14run_interval_seconds\x18\x02 \x01(\x05R\x12runIntervalSeconds\x12\x18\n" + + "\aenabled\x18\x03 \x01(\bR\aenabled\"*\n" + + "\x0eHasuraFrontend\x12\x18\n" + + "\aenabled\x18\x01 \x01(\bR\aenabled\"0\n" + + "\x14PostgraphileFrontend\x12\x18\n" + + "\aenabled\x18\x01 \x01(\bR\aenabled\")\n" + + "\rPGWebFrontend\x12\x18\n" + + "\aenabled\x18\x01 \x01(\bR\aenabled\"(\n" + + "\fRESTFrontend\x12\x18\n" + + "\aenabled\x18\x01 \x01(\bR\aenabledB\x98\x02\n" + + "%com.sf.substreams.sink.sql.service.v1B\rServicesProtoP\x01Z5github.com/streamingfast/substreams-sink-sql/pb;pbsql\xa2\x02\x05SSSSS\xaa\x02!Sf.Substreams.Sink.Sql.Service.V1\xca\x02!Sf\\Substreams\\Sink\\Sql\\Service\\V1\xe2\x02-Sf\\Substreams\\Sink\\Sql\\Service\\V1\\GPBMetadata\xea\x02&Sf::Substreams::Sink::Sql::Service::V1b\x06proto3" var ( file_sf_substreams_sink_sql_services_v1_services_proto_rawDescOnce sync.Once - file_sf_substreams_sink_sql_services_v1_services_proto_rawDescData = file_sf_substreams_sink_sql_services_v1_services_proto_rawDesc + file_sf_substreams_sink_sql_services_v1_services_proto_rawDescData []byte ) func file_sf_substreams_sink_sql_services_v1_services_proto_rawDescGZIP() []byte { file_sf_substreams_sink_sql_services_v1_services_proto_rawDescOnce.Do(func() { - file_sf_substreams_sink_sql_services_v1_services_proto_rawDescData = protoimpl.X.CompressGZIP(file_sf_substreams_sink_sql_services_v1_services_proto_rawDescData) + file_sf_substreams_sink_sql_services_v1_services_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_sf_substreams_sink_sql_services_v1_services_proto_rawDesc), len(file_sf_substreams_sink_sql_services_v1_services_proto_rawDesc))) }) return file_sf_substreams_sink_sql_services_v1_services_proto_rawDescData } var file_sf_substreams_sink_sql_services_v1_services_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_sf_substreams_sink_sql_services_v1_services_proto_msgTypes = make([]protoimpl.MessageInfo, 6) -var file_sf_substreams_sink_sql_services_v1_services_proto_goTypes = []interface{}{ +var file_sf_substreams_sink_sql_services_v1_services_proto_goTypes = []any{ (Service_Engine)(0), // 0: sf.substreams.sink.sql.service.v1.Service.Engine (*Service)(nil), // 1: sf.substreams.sink.sql.service.v1.Service (*DBTConfig)(nil), // 2: sf.substreams.sink.sql.service.v1.DBTConfig @@ -539,86 +471,12 @@ func file_sf_substreams_sink_sql_services_v1_services_proto_init() { if File_sf_substreams_sink_sql_services_v1_services_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_sf_substreams_sink_sql_services_v1_services_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Service); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sf_substreams_sink_sql_services_v1_services_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DBTConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sf_substreams_sink_sql_services_v1_services_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HasuraFrontend); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sf_substreams_sink_sql_services_v1_services_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PostgraphileFrontend); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sf_substreams_sink_sql_services_v1_services_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PGWebFrontend); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sf_substreams_sink_sql_services_v1_services_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RESTFrontend); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_sf_substreams_sink_sql_services_v1_services_proto_msgTypes[0].OneofWrappers = []interface{}{} + file_sf_substreams_sink_sql_services_v1_services_proto_msgTypes[0].OneofWrappers = []any{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_sf_substreams_sink_sql_services_v1_services_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_sf_substreams_sink_sql_services_v1_services_proto_rawDesc), len(file_sf_substreams_sink_sql_services_v1_services_proto_rawDesc)), NumEnums: 1, NumMessages: 6, NumExtensions: 0, @@ -630,7 +488,6 @@ func file_sf_substreams_sink_sql_services_v1_services_proto_init() { MessageInfos: file_sf_substreams_sink_sql_services_v1_services_proto_msgTypes, }.Build() File_sf_substreams_sink_sql_services_v1_services_proto = out.File - file_sf_substreams_sink_sql_services_v1_services_proto_rawDesc = nil file_sf_substreams_sink_sql_services_v1_services_proto_goTypes = nil file_sf_substreams_sink_sql_services_v1_services_proto_depIdxs = nil } diff --git a/pb/sf/substreams/sink/sql/v1/deprecated.pb.go b/pb/sf/substreams/sink/sql/v1/deprecated.pb.go index bcaae05..5899377 100644 --- a/pb/sf/substreams/sink/sql/v1/deprecated.pb.go +++ b/pb/sf/substreams/sink/sql/v1/deprecated.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.36.6 // protoc (unknown) // source: sf/substreams/sink/sql/v1/deprecated.proto @@ -12,6 +12,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + unsafe "unsafe" ) const ( @@ -71,10 +72,7 @@ func (Service_Engine) EnumDescriptor() ([]byte, []int) { } type Service struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Containing both create table statements and index creation statements. Schema string `protobuf:"bytes,1,opt,name=schema,proto3" json:"schema,omitempty"` DbtConfig *DBTConfig `protobuf:"bytes,2,opt,name=dbt_config,json=dbtConfig,proto3,oneof" json:"dbt_config,omitempty"` @@ -82,15 +80,15 @@ type Service struct { PostgraphileFrontend *PostgraphileFrontend `protobuf:"bytes,5,opt,name=postgraphile_frontend,json=postgraphileFrontend,proto3" json:"postgraphile_frontend,omitempty"` Engine Service_Engine `protobuf:"varint,7,opt,name=engine,proto3,enum=sf.substreams.sink.sql.v1.Service_Engine" json:"engine,omitempty"` RestFrontend *RESTFrontend `protobuf:"bytes,8,opt,name=rest_frontend,json=restFrontend,proto3" json:"rest_frontend,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Service) Reset() { *x = Service{} - if protoimpl.UnsafeEnabled { - mi := &file_sf_substreams_sink_sql_v1_deprecated_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_sf_substreams_sink_sql_v1_deprecated_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Service) String() string { @@ -101,7 +99,7 @@ func (*Service) ProtoMessage() {} func (x *Service) ProtoReflect() protoreflect.Message { mi := &file_sf_substreams_sink_sql_v1_deprecated_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -160,22 +158,19 @@ func (x *Service) GetRestFrontend() *RESTFrontend { // https://www.getdbt.com/product/what-is-dbt type DBTConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Files []byte `protobuf:"bytes,1,opt,name=files,proto3" json:"files,omitempty"` - RunIntervalSeconds int32 `protobuf:"varint,2,opt,name=run_interval_seconds,json=runIntervalSeconds,proto3" json:"run_interval_seconds,omitempty"` - Enabled bool `protobuf:"varint,3,opt,name=enabled,proto3" json:"enabled,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + Files []byte `protobuf:"bytes,1,opt,name=files,proto3" json:"files,omitempty"` + RunIntervalSeconds int32 `protobuf:"varint,2,opt,name=run_interval_seconds,json=runIntervalSeconds,proto3" json:"run_interval_seconds,omitempty"` + Enabled bool `protobuf:"varint,3,opt,name=enabled,proto3" json:"enabled,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *DBTConfig) Reset() { *x = DBTConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_sf_substreams_sink_sql_v1_deprecated_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_sf_substreams_sink_sql_v1_deprecated_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DBTConfig) String() string { @@ -186,7 +181,7 @@ func (*DBTConfig) ProtoMessage() {} func (x *DBTConfig) ProtoReflect() protoreflect.Message { mi := &file_sf_substreams_sink_sql_v1_deprecated_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -224,20 +219,17 @@ func (x *DBTConfig) GetEnabled() bool { // https://hasura.io/docs/latest/index/ type HasuraFrontend struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` unknownFields protoimpl.UnknownFields - - Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` + sizeCache protoimpl.SizeCache } func (x *HasuraFrontend) Reset() { *x = HasuraFrontend{} - if protoimpl.UnsafeEnabled { - mi := &file_sf_substreams_sink_sql_v1_deprecated_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_sf_substreams_sink_sql_v1_deprecated_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *HasuraFrontend) String() string { @@ -248,7 +240,7 @@ func (*HasuraFrontend) ProtoMessage() {} func (x *HasuraFrontend) ProtoReflect() protoreflect.Message { mi := &file_sf_substreams_sink_sql_v1_deprecated_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -272,20 +264,17 @@ func (x *HasuraFrontend) GetEnabled() bool { // https://www.graphile.org/postgraphile/ type PostgraphileFrontend struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` unknownFields protoimpl.UnknownFields - - Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` + sizeCache protoimpl.SizeCache } func (x *PostgraphileFrontend) Reset() { *x = PostgraphileFrontend{} - if protoimpl.UnsafeEnabled { - mi := &file_sf_substreams_sink_sql_v1_deprecated_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_sf_substreams_sink_sql_v1_deprecated_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *PostgraphileFrontend) String() string { @@ -296,7 +285,7 @@ func (*PostgraphileFrontend) ProtoMessage() {} func (x *PostgraphileFrontend) ProtoReflect() protoreflect.Message { mi := &file_sf_substreams_sink_sql_v1_deprecated_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -320,20 +309,17 @@ func (x *PostgraphileFrontend) GetEnabled() bool { // https://github.com/sosedoff/pgweb type PGWebFrontend struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` unknownFields protoimpl.UnknownFields - - Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` + sizeCache protoimpl.SizeCache } func (x *PGWebFrontend) Reset() { *x = PGWebFrontend{} - if protoimpl.UnsafeEnabled { - mi := &file_sf_substreams_sink_sql_v1_deprecated_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_sf_substreams_sink_sql_v1_deprecated_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *PGWebFrontend) String() string { @@ -344,7 +330,7 @@ func (*PGWebFrontend) ProtoMessage() {} func (x *PGWebFrontend) ProtoReflect() protoreflect.Message { mi := &file_sf_substreams_sink_sql_v1_deprecated_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -368,20 +354,17 @@ func (x *PGWebFrontend) GetEnabled() bool { // https://github.com/semiotic-ai/sql-wrapper type RESTFrontend struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` unknownFields protoimpl.UnknownFields - - Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` + sizeCache protoimpl.SizeCache } func (x *RESTFrontend) Reset() { *x = RESTFrontend{} - if protoimpl.UnsafeEnabled { - mi := &file_sf_substreams_sink_sql_v1_deprecated_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_sf_substreams_sink_sql_v1_deprecated_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RESTFrontend) String() string { @@ -392,7 +375,7 @@ func (*RESTFrontend) ProtoMessage() {} func (x *RESTFrontend) ProtoReflect() protoreflect.Message { mi := &file_sf_substreams_sink_sql_v1_deprecated_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -416,97 +399,52 @@ func (x *RESTFrontend) GetEnabled() bool { var File_sf_substreams_sink_sql_v1_deprecated_proto protoreflect.FileDescriptor -var file_sf_substreams_sink_sql_v1_deprecated_proto_rawDesc = []byte{ - 0x0a, 0x2a, 0x73, 0x66, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2f, - 0x73, 0x69, 0x6e, 0x6b, 0x2f, 0x73, 0x71, 0x6c, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x65, 0x70, 0x72, - 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x19, 0x73, 0x66, - 0x2e, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2e, 0x73, 0x69, 0x6e, 0x6b, - 0x2e, 0x73, 0x71, 0x6c, 0x2e, 0x76, 0x31, 0x1a, 0x1b, 0x73, 0x66, 0x2f, 0x73, 0x75, 0x62, 0x73, - 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x80, 0x04, 0x0a, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x12, 0x1e, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x06, 0xc2, 0x89, 0x01, 0x02, 0x08, 0x01, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x12, 0x48, 0x0a, 0x0a, 0x64, 0x62, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x73, 0x66, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, - 0x65, 0x61, 0x6d, 0x73, 0x2e, 0x73, 0x69, 0x6e, 0x6b, 0x2e, 0x73, 0x71, 0x6c, 0x2e, 0x76, 0x31, - 0x2e, 0x44, 0x42, 0x54, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x09, 0x64, 0x62, - 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x88, 0x01, 0x01, 0x12, 0x52, 0x0a, 0x0f, 0x68, 0x61, - 0x73, 0x75, 0x72, 0x61, 0x5f, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x73, 0x66, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, - 0x61, 0x6d, 0x73, 0x2e, 0x73, 0x69, 0x6e, 0x6b, 0x2e, 0x73, 0x71, 0x6c, 0x2e, 0x76, 0x31, 0x2e, - 0x48, 0x61, 0x73, 0x75, 0x72, 0x61, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x52, 0x0e, - 0x68, 0x61, 0x73, 0x75, 0x72, 0x61, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x12, 0x64, - 0x0a, 0x15, 0x70, 0x6f, 0x73, 0x74, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x6c, 0x65, 0x5f, 0x66, - 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, - 0x73, 0x66, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2e, 0x73, 0x69, - 0x6e, 0x6b, 0x2e, 0x73, 0x71, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x73, 0x74, 0x67, 0x72, - 0x61, 0x70, 0x68, 0x69, 0x6c, 0x65, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x52, 0x14, - 0x70, 0x6f, 0x73, 0x74, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x6c, 0x65, 0x46, 0x72, 0x6f, 0x6e, - 0x74, 0x65, 0x6e, 0x64, 0x12, 0x41, 0x0a, 0x06, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x18, 0x07, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x73, 0x66, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, - 0x65, 0x61, 0x6d, 0x73, 0x2e, 0x73, 0x69, 0x6e, 0x6b, 0x2e, 0x73, 0x71, 0x6c, 0x2e, 0x76, 0x31, - 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x52, - 0x06, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x12, 0x4c, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x74, 0x5f, - 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, - 0x2e, 0x73, 0x66, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2e, 0x73, - 0x69, 0x6e, 0x6b, 0x2e, 0x73, 0x71, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x45, 0x53, 0x54, 0x46, - 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x74, 0x46, 0x72, 0x6f, - 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x22, 0x31, 0x0a, 0x06, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x12, - 0x09, 0x0a, 0x05, 0x75, 0x6e, 0x73, 0x65, 0x74, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x70, 0x6f, - 0x73, 0x74, 0x67, 0x72, 0x65, 0x73, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x63, 0x6c, 0x69, 0x63, - 0x6b, 0x68, 0x6f, 0x75, 0x73, 0x65, 0x10, 0x02, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x64, 0x62, 0x74, - 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x75, 0x0a, 0x09, 0x44, 0x42, 0x54, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1c, 0x0a, 0x05, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0c, 0x42, 0x06, 0xc2, 0x89, 0x01, 0x02, 0x10, 0x01, 0x52, 0x05, 0x66, 0x69, 0x6c, - 0x65, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, - 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x12, 0x72, 0x75, 0x6e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x53, 0x65, 0x63, - 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x2a, - 0x0a, 0x0e, 0x48, 0x61, 0x73, 0x75, 0x72, 0x61, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, - 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x30, 0x0a, 0x14, 0x50, 0x6f, - 0x73, 0x74, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x6c, 0x65, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x65, - 0x6e, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x29, 0x0a, 0x0d, - 0x50, 0x47, 0x57, 0x65, 0x62, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x12, 0x18, 0x0a, - 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, - 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x28, 0x0a, 0x0c, 0x52, 0x45, 0x53, 0x54, 0x46, - 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, - 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, - 0x64, 0x42, 0xf0, 0x01, 0x0a, 0x1d, 0x63, 0x6f, 0x6d, 0x2e, 0x73, 0x66, 0x2e, 0x73, 0x75, 0x62, - 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2e, 0x73, 0x69, 0x6e, 0x6b, 0x2e, 0x73, 0x71, 0x6c, - 0x2e, 0x76, 0x31, 0x42, 0x0f, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x66, 0x61, 0x73, 0x74, - 0x2f, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2d, 0x73, 0x69, 0x6e, 0x6b, - 0x2d, 0x73, 0x71, 0x6c, 0x2f, 0x70, 0x62, 0x3b, 0x70, 0x62, 0x73, 0x71, 0x6c, 0xa2, 0x02, 0x04, - 0x53, 0x53, 0x53, 0x53, 0xaa, 0x02, 0x19, 0x53, 0x66, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x74, 0x72, - 0x65, 0x61, 0x6d, 0x73, 0x2e, 0x53, 0x69, 0x6e, 0x6b, 0x2e, 0x53, 0x71, 0x6c, 0x2e, 0x56, 0x31, - 0xca, 0x02, 0x19, 0x53, 0x66, 0x5c, 0x53, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, - 0x5c, 0x53, 0x69, 0x6e, 0x6b, 0x5c, 0x53, 0x71, 0x6c, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x25, 0x53, - 0x66, 0x5c, 0x53, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x5c, 0x53, 0x69, 0x6e, - 0x6b, 0x5c, 0x53, 0x71, 0x6c, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x1d, 0x53, 0x66, 0x3a, 0x3a, 0x53, 0x75, 0x62, 0x73, 0x74, - 0x72, 0x65, 0x61, 0x6d, 0x73, 0x3a, 0x3a, 0x53, 0x69, 0x6e, 0x6b, 0x3a, 0x3a, 0x53, 0x71, 0x6c, - 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +const file_sf_substreams_sink_sql_v1_deprecated_proto_rawDesc = "" + + "\n" + + "*sf/substreams/sink/sql/v1/deprecated.proto\x12\x19sf.substreams.sink.sql.v1\x1a\x1bsf/substreams/options.proto\"\x80\x04\n" + + "\aService\x12\x1e\n" + + "\x06schema\x18\x01 \x01(\tB\x06\u0089\x01\x02\b\x01R\x06schema\x12H\n" + + "\n" + + "dbt_config\x18\x02 \x01(\v2$.sf.substreams.sink.sql.v1.DBTConfigH\x00R\tdbtConfig\x88\x01\x01\x12R\n" + + "\x0fhasura_frontend\x18\x04 \x01(\v2).sf.substreams.sink.sql.v1.HasuraFrontendR\x0ehasuraFrontend\x12d\n" + + "\x15postgraphile_frontend\x18\x05 \x01(\v2/.sf.substreams.sink.sql.v1.PostgraphileFrontendR\x14postgraphileFrontend\x12A\n" + + "\x06engine\x18\a \x01(\x0e2).sf.substreams.sink.sql.v1.Service.EngineR\x06engine\x12L\n" + + "\rrest_frontend\x18\b \x01(\v2'.sf.substreams.sink.sql.v1.RESTFrontendR\frestFrontend\"1\n" + + "\x06Engine\x12\t\n" + + "\x05unset\x10\x00\x12\f\n" + + "\bpostgres\x10\x01\x12\x0e\n" + + "\n" + + "clickhouse\x10\x02B\r\n" + + "\v_dbt_config\"u\n" + + "\tDBTConfig\x12\x1c\n" + + "\x05files\x18\x01 \x01(\fB\x06\u0089\x01\x02\x10\x01R\x05files\x120\n" + + "\x14run_interval_seconds\x18\x02 \x01(\x05R\x12runIntervalSeconds\x12\x18\n" + + "\aenabled\x18\x03 \x01(\bR\aenabled\"*\n" + + "\x0eHasuraFrontend\x12\x18\n" + + "\aenabled\x18\x01 \x01(\bR\aenabled\"0\n" + + "\x14PostgraphileFrontend\x12\x18\n" + + "\aenabled\x18\x01 \x01(\bR\aenabled\")\n" + + "\rPGWebFrontend\x12\x18\n" + + "\aenabled\x18\x01 \x01(\bR\aenabled\"(\n" + + "\fRESTFrontend\x12\x18\n" + + "\aenabled\x18\x01 \x01(\bR\aenabledB\xf0\x01\n" + + "\x1dcom.sf.substreams.sink.sql.v1B\x0fDeprecatedProtoP\x01Z5github.com/streamingfast/substreams-sink-sql/pb;pbsql\xa2\x02\x04SSSS\xaa\x02\x19Sf.Substreams.Sink.Sql.V1\xca\x02\x19Sf\\Substreams\\Sink\\Sql\\V1\xe2\x02%Sf\\Substreams\\Sink\\Sql\\V1\\GPBMetadata\xea\x02\x1dSf::Substreams::Sink::Sql::V1b\x06proto3" var ( file_sf_substreams_sink_sql_v1_deprecated_proto_rawDescOnce sync.Once - file_sf_substreams_sink_sql_v1_deprecated_proto_rawDescData = file_sf_substreams_sink_sql_v1_deprecated_proto_rawDesc + file_sf_substreams_sink_sql_v1_deprecated_proto_rawDescData []byte ) func file_sf_substreams_sink_sql_v1_deprecated_proto_rawDescGZIP() []byte { file_sf_substreams_sink_sql_v1_deprecated_proto_rawDescOnce.Do(func() { - file_sf_substreams_sink_sql_v1_deprecated_proto_rawDescData = protoimpl.X.CompressGZIP(file_sf_substreams_sink_sql_v1_deprecated_proto_rawDescData) + file_sf_substreams_sink_sql_v1_deprecated_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_sf_substreams_sink_sql_v1_deprecated_proto_rawDesc), len(file_sf_substreams_sink_sql_v1_deprecated_proto_rawDesc))) }) return file_sf_substreams_sink_sql_v1_deprecated_proto_rawDescData } var file_sf_substreams_sink_sql_v1_deprecated_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_sf_substreams_sink_sql_v1_deprecated_proto_msgTypes = make([]protoimpl.MessageInfo, 6) -var file_sf_substreams_sink_sql_v1_deprecated_proto_goTypes = []interface{}{ +var file_sf_substreams_sink_sql_v1_deprecated_proto_goTypes = []any{ (Service_Engine)(0), // 0: sf.substreams.sink.sql.v1.Service.Engine (*Service)(nil), // 1: sf.substreams.sink.sql.v1.Service (*DBTConfig)(nil), // 2: sf.substreams.sink.sql.v1.DBTConfig @@ -533,86 +471,12 @@ func file_sf_substreams_sink_sql_v1_deprecated_proto_init() { if File_sf_substreams_sink_sql_v1_deprecated_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_sf_substreams_sink_sql_v1_deprecated_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Service); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sf_substreams_sink_sql_v1_deprecated_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DBTConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sf_substreams_sink_sql_v1_deprecated_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HasuraFrontend); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sf_substreams_sink_sql_v1_deprecated_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PostgraphileFrontend); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sf_substreams_sink_sql_v1_deprecated_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PGWebFrontend); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sf_substreams_sink_sql_v1_deprecated_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RESTFrontend); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_sf_substreams_sink_sql_v1_deprecated_proto_msgTypes[0].OneofWrappers = []interface{}{} + file_sf_substreams_sink_sql_v1_deprecated_proto_msgTypes[0].OneofWrappers = []any{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_sf_substreams_sink_sql_v1_deprecated_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_sf_substreams_sink_sql_v1_deprecated_proto_rawDesc), len(file_sf_substreams_sink_sql_v1_deprecated_proto_rawDesc)), NumEnums: 1, NumMessages: 6, NumExtensions: 0, @@ -624,7 +488,6 @@ func file_sf_substreams_sink_sql_v1_deprecated_proto_init() { MessageInfos: file_sf_substreams_sink_sql_v1_deprecated_proto_msgTypes, }.Build() File_sf_substreams_sink_sql_v1_deprecated_proto = out.File - file_sf_substreams_sink_sql_v1_deprecated_proto_rawDesc = nil file_sf_substreams_sink_sql_v1_deprecated_proto_goTypes = nil file_sf_substreams_sink_sql_v1_deprecated_proto_depIdxs = nil } diff --git a/pb/test/relations/relations.pb.go b/pb/test/relations/relations.pb.go index c566508..0e86829 100644 --- a/pb/test/relations/relations.pb.go +++ b/pb/test/relations/relations.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.36.6 // protoc (unknown) // source: test/relations/relations.proto @@ -14,6 +14,7 @@ import ( timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" sync "sync" + unsafe "unsafe" ) const ( @@ -24,20 +25,17 @@ const ( ) type Output struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Entities []*Entity `protobuf:"bytes,1,rep,name=entities,proto3" json:"entities,omitempty"` unknownFields protoimpl.UnknownFields - - Entities []*Entity `protobuf:"bytes,1,rep,name=entities,proto3" json:"entities,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Output) Reset() { *x = Output{} - if protoimpl.UnsafeEnabled { - mi := &file_test_relations_relations_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_test_relations_relations_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Output) String() string { @@ -48,7 +46,7 @@ func (*Output) ProtoMessage() {} func (x *Output) ProtoReflect() protoreflect.Message { mi := &file_test_relations_relations_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -71,26 +69,23 @@ func (x *Output) GetEntities() []*Entity { } type Entity struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to Entity: + state protoimpl.MessageState `protogen:"open.v1"` + // Types that are valid to be assigned to Entity: // // *Entity_TypesTest // *Entity_Customer // *Entity_Order // *Entity_Item - Entity isEntity_Entity `protobuf_oneof:"entity"` + Entity isEntity_Entity `protobuf_oneof:"entity"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Entity) Reset() { *x = Entity{} - if protoimpl.UnsafeEnabled { - mi := &file_test_relations_relations_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_test_relations_relations_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Entity) String() string { @@ -101,7 +96,7 @@ func (*Entity) ProtoMessage() {} func (x *Entity) ProtoReflect() protoreflect.Message { mi := &file_test_relations_relations_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -116,37 +111,45 @@ func (*Entity) Descriptor() ([]byte, []int) { return file_test_relations_relations_proto_rawDescGZIP(), []int{1} } -func (m *Entity) GetEntity() isEntity_Entity { - if m != nil { - return m.Entity +func (x *Entity) GetEntity() isEntity_Entity { + if x != nil { + return x.Entity } return nil } func (x *Entity) GetTypesTest() *TypesTest { - if x, ok := x.GetEntity().(*Entity_TypesTest); ok { - return x.TypesTest + if x != nil { + if x, ok := x.Entity.(*Entity_TypesTest); ok { + return x.TypesTest + } } return nil } func (x *Entity) GetCustomer() *Customer { - if x, ok := x.GetEntity().(*Entity_Customer); ok { - return x.Customer + if x != nil { + if x, ok := x.Entity.(*Entity_Customer); ok { + return x.Customer + } } return nil } func (x *Entity) GetOrder() *Order { - if x, ok := x.GetEntity().(*Entity_Order); ok { - return x.Order + if x != nil { + if x, ok := x.Entity.(*Entity_Order); ok { + return x.Order + } } return nil } func (x *Entity) GetItem() *Item { - if x, ok := x.GetEntity().(*Entity_Item); ok { - return x.Item + if x != nil { + if x, ok := x.Entity.(*Entity_Item); ok { + return x.Item + } } return nil } @@ -180,11 +183,8 @@ func (*Entity_Order) isEntity_Entity() {} func (*Entity_Item) isEntity_Entity() {} type TypesTest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` // Field for each protobuf native type DoubleField float64 `protobuf:"fixed64,2,opt,name=double_field,json=doubleField,proto3" json:"double_field,omitempty"` FloatField float32 `protobuf:"fixed32,3,opt,name=float_field,json=floatField,proto3" json:"float_field,omitempty"` @@ -202,15 +202,15 @@ type TypesTest struct { StringField string `protobuf:"bytes,15,opt,name=string_field,json=stringField,proto3" json:"string_field,omitempty"` BytesField []byte `protobuf:"bytes,16,opt,name=bytes_field,json=bytesField,proto3" json:"bytes_field,omitempty"` TimestampField *timestamppb.Timestamp `protobuf:"bytes,30,opt,name=timestamp_field,json=timestampField,proto3" json:"timestamp_field,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *TypesTest) Reset() { *x = TypesTest{} - if protoimpl.UnsafeEnabled { - mi := &file_test_relations_relations_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_test_relations_relations_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *TypesTest) String() string { @@ -221,7 +221,7 @@ func (*TypesTest) ProtoMessage() {} func (x *TypesTest) ProtoReflect() protoreflect.Message { mi := &file_test_relations_relations_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -356,21 +356,18 @@ func (x *TypesTest) GetTimestampField() *timestamppb.Timestamp { } type Customer struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + CustomerId string `protobuf:"bytes,1,opt,name=customer_id,json=customerId,proto3" json:"customer_id,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` unknownFields protoimpl.UnknownFields - - CustomerId string `protobuf:"bytes,1,opt,name=customer_id,json=customerId,proto3" json:"customer_id,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Customer) Reset() { *x = Customer{} - if protoimpl.UnsafeEnabled { - mi := &file_test_relations_relations_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_test_relations_relations_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Customer) String() string { @@ -381,7 +378,7 @@ func (*Customer) ProtoMessage() {} func (x *Customer) ProtoReflect() protoreflect.Message { mi := &file_test_relations_relations_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -411,22 +408,19 @@ func (x *Customer) GetName() string { } type Order struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + OrderId string `protobuf:"bytes,1,opt,name=order_id,json=orderId,proto3" json:"order_id,omitempty"` + CustomerRefId string `protobuf:"bytes,2,opt,name=customer_ref_id,json=customerRefId,proto3" json:"customer_ref_id,omitempty"` + Items []*OrderItem `protobuf:"bytes,3,rep,name=items,proto3" json:"items,omitempty"` unknownFields protoimpl.UnknownFields - - OrderId string `protobuf:"bytes,1,opt,name=order_id,json=orderId,proto3" json:"order_id,omitempty"` - CustomerRefId string `protobuf:"bytes,2,opt,name=customer_ref_id,json=customerRefId,proto3" json:"customer_ref_id,omitempty"` - Items []*OrderItem `protobuf:"bytes,3,rep,name=items,proto3" json:"items,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Order) Reset() { *x = Order{} - if protoimpl.UnsafeEnabled { - mi := &file_test_relations_relations_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_test_relations_relations_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Order) String() string { @@ -437,7 +431,7 @@ func (*Order) ProtoMessage() {} func (x *Order) ProtoReflect() protoreflect.Message { mi := &file_test_relations_relations_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -474,22 +468,19 @@ func (x *Order) GetItems() []*OrderItem { } type OrderItem struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // can also leverage orders._id using "order on order_id" if order do not have a external unique identifier - ItemId string `protobuf:"bytes,2,opt,name=item_id,json=itemId,proto3" json:"item_id,omitempty"` - Quantity int64 `protobuf:"varint,11,opt,name=quantity,proto3" json:"quantity,omitempty"` + ItemId string `protobuf:"bytes,2,opt,name=item_id,json=itemId,proto3" json:"item_id,omitempty"` + Quantity int64 `protobuf:"varint,11,opt,name=quantity,proto3" json:"quantity,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *OrderItem) Reset() { *x = OrderItem{} - if protoimpl.UnsafeEnabled { - mi := &file_test_relations_relations_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_test_relations_relations_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *OrderItem) String() string { @@ -500,7 +491,7 @@ func (*OrderItem) ProtoMessage() {} func (x *OrderItem) ProtoReflect() protoreflect.Message { mi := &file_test_relations_relations_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -530,22 +521,19 @@ func (x *OrderItem) GetQuantity() int64 { } type Item struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + ItemId string `protobuf:"bytes,1,opt,name=item_id,json=itemId,proto3" json:"item_id,omitempty"` + Name string `protobuf:"bytes,10,opt,name=name,proto3" json:"name,omitempty"` + Price float64 `protobuf:"fixed64,11,opt,name=price,proto3" json:"price,omitempty"` unknownFields protoimpl.UnknownFields - - ItemId string `protobuf:"bytes,1,opt,name=item_id,json=itemId,proto3" json:"item_id,omitempty"` - Name string `protobuf:"bytes,10,opt,name=name,proto3" json:"name,omitempty"` - Price float64 `protobuf:"fixed64,11,opt,name=price,proto3" json:"price,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Item) Reset() { *x = Item{} - if protoimpl.UnsafeEnabled { - mi := &file_test_relations_relations_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_test_relations_relations_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Item) String() string { @@ -556,7 +544,7 @@ func (*Item) ProtoMessage() {} func (x *Item) ProtoReflect() protoreflect.Message { mi := &file_test_relations_relations_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -594,133 +582,80 @@ func (x *Item) GetPrice() float64 { var File_test_relations_relations_proto protoreflect.FileDescriptor -var file_test_relations_relations_proto_rawDesc = []byte{ - 0x0a, 0x1e, 0x74, 0x65, 0x73, 0x74, 0x2f, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x2f, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x12, 0x0e, 0x74, 0x65, 0x73, 0x74, 0x2e, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x1a, 0x2d, 0x73, 0x66, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x65, 0x61, - 0x6d, 0x73, 0x2f, 0x73, 0x69, 0x6e, 0x6b, 0x2f, 0x73, 0x71, 0x6c, 0x2f, 0x73, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x22, 0x3c, 0x0a, 0x06, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x32, 0x0a, 0x08, - 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, - 0x2e, 0x74, 0x65, 0x73, 0x74, 0x2e, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, - 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, - 0x22, 0xe1, 0x01, 0x0a, 0x06, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x3a, 0x0a, 0x0a, 0x74, - 0x79, 0x70, 0x65, 0x73, 0x5f, 0x74, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x19, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x2e, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x2e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x54, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x09, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x54, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x08, 0x63, 0x75, 0x73, 0x74, 0x6f, - 0x6d, 0x65, 0x72, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x74, 0x65, 0x73, 0x74, - 0x2e, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, - 0x6d, 0x65, 0x72, 0x48, 0x00, 0x52, 0x08, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x12, - 0x2d, 0x0a, 0x05, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, - 0x2e, 0x74, 0x65, 0x73, 0x74, 0x2e, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, - 0x4f, 0x72, 0x64, 0x65, 0x72, 0x48, 0x00, 0x52, 0x05, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x12, 0x2a, - 0x0a, 0x04, 0x69, 0x74, 0x65, 0x6d, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, - 0x65, 0x73, 0x74, 0x2e, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x74, - 0x65, 0x6d, 0x48, 0x00, 0x52, 0x04, 0x69, 0x74, 0x65, 0x6d, 0x42, 0x08, 0x0a, 0x06, 0x65, 0x6e, - 0x74, 0x69, 0x74, 0x79, 0x22, 0x88, 0x05, 0x0a, 0x09, 0x54, 0x79, 0x70, 0x65, 0x73, 0x54, 0x65, - 0x73, 0x74, 0x12, 0x16, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x42, 0x06, - 0xb2, 0xf8, 0x25, 0x02, 0x20, 0x01, 0x52, 0x02, 0x69, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6f, - 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, - 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x1f, 0x0a, - 0x0b, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x02, 0x52, 0x0a, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x1f, - 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x33, 0x32, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x0a, 0x69, 0x6e, 0x74, 0x33, 0x32, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, - 0x1f, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x69, 0x6e, 0x74, 0x33, 0x32, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x75, 0x69, 0x6e, 0x74, 0x33, 0x32, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x66, 0x69, - 0x65, 0x6c, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x75, 0x69, 0x6e, 0x74, 0x36, - 0x34, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x69, 0x6e, 0x74, 0x33, 0x32, - 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x11, 0x52, 0x0b, 0x73, 0x69, - 0x6e, 0x74, 0x33, 0x32, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x69, 0x6e, - 0x74, 0x36, 0x34, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x12, 0x52, - 0x0b, 0x73, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x23, 0x0a, 0x0d, - 0x66, 0x69, 0x78, 0x65, 0x64, 0x33, 0x32, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x0a, 0x20, - 0x01, 0x28, 0x07, 0x52, 0x0c, 0x66, 0x69, 0x78, 0x65, 0x64, 0x33, 0x32, 0x46, 0x69, 0x65, 0x6c, - 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x66, 0x69, 0x78, 0x65, 0x64, 0x36, 0x34, 0x5f, 0x66, 0x69, 0x65, - 0x6c, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x06, 0x52, 0x0c, 0x66, 0x69, 0x78, 0x65, 0x64, 0x36, - 0x34, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x66, 0x69, 0x78, 0x65, 0x64, - 0x33, 0x32, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0f, 0x52, 0x0d, - 0x73, 0x66, 0x69, 0x78, 0x65, 0x64, 0x33, 0x32, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x25, 0x0a, - 0x0e, 0x73, 0x66, 0x69, 0x78, 0x65, 0x64, 0x36, 0x34, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, - 0x0d, 0x20, 0x01, 0x28, 0x10, 0x52, 0x0d, 0x73, 0x66, 0x69, 0x78, 0x65, 0x64, 0x36, 0x34, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x66, 0x69, 0x65, - 0x6c, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x66, 0x69, - 0x65, 0x6c, 0x64, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, - 0x67, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, - 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x62, 0x79, 0x74, - 0x65, 0x73, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x43, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0e, 0x74, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x3a, 0x11, 0xaa, 0xf8, - 0x25, 0x0d, 0x0a, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x73, 0x5f, 0x74, 0x65, 0x73, 0x74, 0x73, 0x22, - 0x58, 0x0a, 0x08, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x12, 0x27, 0x0a, 0x0b, 0x63, - 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x06, 0xb2, 0xf8, 0x25, 0x02, 0x20, 0x01, 0x52, 0x0a, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, - 0x65, 0x72, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x0f, 0xaa, 0xf8, 0x25, 0x0b, 0x0a, 0x09, - 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x73, 0x22, 0xb1, 0x01, 0x0a, 0x05, 0x4f, 0x72, - 0x64, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x08, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x06, 0xb2, 0xf8, 0x25, 0x02, 0x20, 0x01, 0x52, 0x07, 0x6f, - 0x72, 0x64, 0x65, 0x72, 0x49, 0x64, 0x12, 0x46, 0x0a, 0x0f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, - 0x65, 0x72, 0x5f, 0x72, 0x65, 0x66, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x1e, 0xb2, 0xf8, 0x25, 0x1a, 0x12, 0x18, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x73, - 0x20, 0x6f, 0x6e, 0x20, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x52, - 0x0d, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x52, 0x65, 0x66, 0x49, 0x64, 0x12, 0x2f, - 0x0a, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, - 0x74, 0x65, 0x73, 0x74, 0x2e, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, - 0x72, 0x64, 0x65, 0x72, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x3a, - 0x0c, 0xaa, 0xf8, 0x25, 0x08, 0x0a, 0x06, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x73, 0x22, 0x7f, 0x0a, - 0x09, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x2f, 0x0a, 0x07, 0x69, 0x74, - 0x65, 0x6d, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x16, 0xb2, 0xf8, 0x25, - 0x12, 0x12, 0x10, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x20, 0x6f, 0x6e, 0x20, 0x69, 0x74, 0x65, 0x6d, - 0x5f, 0x69, 0x64, 0x52, 0x06, 0x69, 0x74, 0x65, 0x6d, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x71, - 0x75, 0x61, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x71, - 0x75, 0x61, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x3a, 0x25, 0xaa, 0xf8, 0x25, 0x21, 0x0a, 0x0b, 0x6f, - 0x72, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x12, 0x6f, 0x72, 0x64, 0x65, - 0x72, 0x73, 0x20, 0x6f, 0x6e, 0x20, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x22, 0x5e, - 0x0a, 0x04, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x1f, 0x0a, 0x07, 0x69, 0x74, 0x65, 0x6d, 0x5f, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x06, 0xb2, 0xf8, 0x25, 0x02, 0x18, 0x01, 0x52, - 0x06, 0x69, 0x74, 0x65, 0x6d, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x70, - 0x72, 0x69, 0x63, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x70, 0x72, 0x69, 0x63, - 0x65, 0x3a, 0x0b, 0xaa, 0xf8, 0x25, 0x07, 0x0a, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x42, 0x8d, - 0x01, 0x0a, 0x12, 0x63, 0x6f, 0x6d, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x2e, 0x72, 0x65, 0x6c, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x0e, 0x52, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0e, 0x74, 0x65, 0x73, 0x74, 0x2f, 0x72, 0x65, - 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x03, 0x54, 0x52, 0x58, 0xaa, 0x02, 0x0e, - 0x54, 0x65, 0x73, 0x74, 0x2e, 0x52, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xca, 0x02, - 0x0e, 0x54, 0x65, 0x73, 0x74, 0x5c, 0x52, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xe2, - 0x02, 0x1a, 0x54, 0x65, 0x73, 0x74, 0x5c, 0x52, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0f, 0x54, - 0x65, 0x73, 0x74, 0x3a, 0x3a, 0x52, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +const file_test_relations_relations_proto_rawDesc = "" + + "\n" + + "\x1etest/relations/relations.proto\x12\x0etest.relations\x1a google/protobuf/descriptor.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a-sf/substreams/sink/sql/schema/v1/schema.proto\"<\n" + + "\x06Output\x122\n" + + "\bentities\x18\x01 \x03(\v2\x16.test.relations.EntityR\bentities\"\xe1\x01\n" + + "\x06Entity\x12:\n" + + "\n" + + "types_test\x18\x01 \x01(\v2\x19.test.relations.TypesTestH\x00R\ttypesTest\x126\n" + + "\bcustomer\x18\n" + + " \x01(\v2\x18.test.relations.CustomerH\x00R\bcustomer\x12-\n" + + "\x05order\x18\v \x01(\v2\x15.test.relations.OrderH\x00R\x05order\x12*\n" + + "\x04item\x18\f \x01(\v2\x14.test.relations.ItemH\x00R\x04itemB\b\n" + + "\x06entity\"\x88\x05\n" + + "\tTypesTest\x12\x16\n" + + "\x02id\x18\x01 \x01(\x04B\x06\xb2\xf8%\x02 \x01R\x02id\x12!\n" + + "\fdouble_field\x18\x02 \x01(\x01R\vdoubleField\x12\x1f\n" + + "\vfloat_field\x18\x03 \x01(\x02R\n" + + "floatField\x12\x1f\n" + + "\vint32_field\x18\x04 \x01(\x05R\n" + + "int32Field\x12\x1f\n" + + "\vint64_field\x18\x05 \x01(\x03R\n" + + "int64Field\x12!\n" + + "\fuint32_field\x18\x06 \x01(\rR\vuint32Field\x12!\n" + + "\fuint64_field\x18\a \x01(\x04R\vuint64Field\x12!\n" + + "\fsint32_field\x18\b \x01(\x11R\vsint32Field\x12!\n" + + "\fsint64_field\x18\t \x01(\x12R\vsint64Field\x12#\n" + + "\rfixed32_field\x18\n" + + " \x01(\aR\ffixed32Field\x12#\n" + + "\rfixed64_field\x18\v \x01(\x06R\ffixed64Field\x12%\n" + + "\x0esfixed32_field\x18\f \x01(\x0fR\rsfixed32Field\x12%\n" + + "\x0esfixed64_field\x18\r \x01(\x10R\rsfixed64Field\x12\x1d\n" + + "\n" + + "bool_field\x18\x0e \x01(\bR\tboolField\x12!\n" + + "\fstring_field\x18\x0f \x01(\tR\vstringField\x12\x1f\n" + + "\vbytes_field\x18\x10 \x01(\fR\n" + + "bytesField\x12C\n" + + "\x0ftimestamp_field\x18\x1e \x01(\v2\x1a.google.protobuf.TimestampR\x0etimestampField:\x11\xaa\xf8%\r\n" + + "\vtypes_tests\"X\n" + + "\bCustomer\x12'\n" + + "\vcustomer_id\x18\x01 \x01(\tB\x06\xb2\xf8%\x02 \x01R\n" + + "customerId\x12\x12\n" + + "\x04name\x18\x02 \x01(\tR\x04name:\x0f\xaa\xf8%\v\n" + + "\tcustomers\"\xb1\x01\n" + + "\x05Order\x12!\n" + + "\border_id\x18\x01 \x01(\tB\x06\xb2\xf8%\x02 \x01R\aorderId\x12F\n" + + "\x0fcustomer_ref_id\x18\x02 \x01(\tB\x1e\xb2\xf8%\x1a\x12\x18customers on customer_idR\rcustomerRefId\x12/\n" + + "\x05items\x18\x03 \x03(\v2\x19.test.relations.OrderItemR\x05items:\f\xaa\xf8%\b\n" + + "\x06orders\"\x7f\n" + + "\tOrderItem\x12/\n" + + "\aitem_id\x18\x02 \x01(\tB\x16\xb2\xf8%\x12\x12\x10items on item_idR\x06itemId\x12\x1a\n" + + "\bquantity\x18\v \x01(\x03R\bquantity:%\xaa\xf8%!\n" + + "\vorder_items\x12\x12orders on order_id\"^\n" + + "\x04Item\x12\x1f\n" + + "\aitem_id\x18\x01 \x01(\tB\x06\xb2\xf8%\x02\x18\x01R\x06itemId\x12\x12\n" + + "\x04name\x18\n" + + " \x01(\tR\x04name\x12\x14\n" + + "\x05price\x18\v \x01(\x01R\x05price:\v\xaa\xf8%\a\n" + + "\x05itemsB\x8d\x01\n" + + "\x12com.test.relationsB\x0eRelationsProtoP\x01Z\x0etest/relations\xa2\x02\x03TRX\xaa\x02\x0eTest.Relations\xca\x02\x0eTest\\Relations\xe2\x02\x1aTest\\Relations\\GPBMetadata\xea\x02\x0fTest::Relationsb\x06proto3" var ( file_test_relations_relations_proto_rawDescOnce sync.Once - file_test_relations_relations_proto_rawDescData = file_test_relations_relations_proto_rawDesc + file_test_relations_relations_proto_rawDescData []byte ) func file_test_relations_relations_proto_rawDescGZIP() []byte { file_test_relations_relations_proto_rawDescOnce.Do(func() { - file_test_relations_relations_proto_rawDescData = protoimpl.X.CompressGZIP(file_test_relations_relations_proto_rawDescData) + file_test_relations_relations_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_test_relations_relations_proto_rawDesc), len(file_test_relations_relations_proto_rawDesc))) }) return file_test_relations_relations_proto_rawDescData } var file_test_relations_relations_proto_msgTypes = make([]protoimpl.MessageInfo, 7) -var file_test_relations_relations_proto_goTypes = []interface{}{ +var file_test_relations_relations_proto_goTypes = []any{ (*Output)(nil), // 0: test.relations.Output (*Entity)(nil), // 1: test.relations.Entity (*TypesTest)(nil), // 2: test.relations.TypesTest @@ -750,93 +685,7 @@ func file_test_relations_relations_proto_init() { if File_test_relations_relations_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_test_relations_relations_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Output); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_test_relations_relations_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Entity); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_test_relations_relations_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TypesTest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_test_relations_relations_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Customer); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_test_relations_relations_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Order); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_test_relations_relations_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*OrderItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_test_relations_relations_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Item); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_test_relations_relations_proto_msgTypes[1].OneofWrappers = []interface{}{ + file_test_relations_relations_proto_msgTypes[1].OneofWrappers = []any{ (*Entity_TypesTest)(nil), (*Entity_Customer)(nil), (*Entity_Order)(nil), @@ -846,7 +695,7 @@ func file_test_relations_relations_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_test_relations_relations_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_test_relations_relations_proto_rawDesc), len(file_test_relations_relations_proto_rawDesc)), NumEnums: 0, NumMessages: 7, NumExtensions: 0, @@ -857,7 +706,6 @@ func file_test_relations_relations_proto_init() { MessageInfos: file_test_relations_relations_proto_msgTypes, }.Build() File_test_relations_relations_proto = out.File - file_test_relations_relations_proto_rawDesc = nil file_test_relations_relations_proto_goTypes = nil file_test_relations_relations_proto_depIdxs = nil } diff --git a/pb/test/semantic_types/example.pb.go b/pb/test/semantic_types/example.pb.go new file mode 100644 index 0000000..ffa9dac --- /dev/null +++ b/pb/test/semantic_types/example.pb.go @@ -0,0 +1,765 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.6 +// protoc (unknown) +// source: test/semantic_types/example.proto + +package semantic_types + +import ( + _ "github.com/streamingfast/substreams-sink-sql/pb/sf/substreams/sink/sql/schema/v1" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + _ "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Example demonstrating semantic type annotations for RisingWave rw_int256 support +type Output struct { + state protoimpl.MessageState `protogen:"open.v1"` + Entities []*Entity `protobuf:"bytes,1,rep,name=entities,proto3" json:"entities,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Output) Reset() { + *x = Output{} + mi := &file_test_semantic_types_example_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Output) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Output) ProtoMessage() {} + +func (x *Output) ProtoReflect() protoreflect.Message { + mi := &file_test_semantic_types_example_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Output.ProtoReflect.Descriptor instead. +func (*Output) Descriptor() ([]byte, []int) { + return file_test_semantic_types_example_proto_rawDescGZIP(), []int{0} +} + +func (x *Output) GetEntities() []*Entity { + if x != nil { + return x.Entities + } + return nil +} + +type Entity struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Types that are valid to be assigned to Entity: + // + // *Entity_EthTransaction + // *Entity_Erc20Transfer + // *Entity_DefiPosition + // *Entity_MetricsData + Entity isEntity_Entity `protobuf_oneof:"entity"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Entity) Reset() { + *x = Entity{} + mi := &file_test_semantic_types_example_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Entity) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Entity) ProtoMessage() {} + +func (x *Entity) ProtoReflect() protoreflect.Message { + mi := &file_test_semantic_types_example_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Entity.ProtoReflect.Descriptor instead. +func (*Entity) Descriptor() ([]byte, []int) { + return file_test_semantic_types_example_proto_rawDescGZIP(), []int{1} +} + +func (x *Entity) GetEntity() isEntity_Entity { + if x != nil { + return x.Entity + } + return nil +} + +func (x *Entity) GetEthTransaction() *EthereumTransaction { + if x != nil { + if x, ok := x.Entity.(*Entity_EthTransaction); ok { + return x.EthTransaction + } + } + return nil +} + +func (x *Entity) GetErc20Transfer() *ERC20Transfer { + if x != nil { + if x, ok := x.Entity.(*Entity_Erc20Transfer); ok { + return x.Erc20Transfer + } + } + return nil +} + +func (x *Entity) GetDefiPosition() *DeFiPosition { + if x != nil { + if x, ok := x.Entity.(*Entity_DefiPosition); ok { + return x.DefiPosition + } + } + return nil +} + +func (x *Entity) GetMetricsData() *MetricsData { + if x != nil { + if x, ok := x.Entity.(*Entity_MetricsData); ok { + return x.MetricsData + } + } + return nil +} + +type isEntity_Entity interface { + isEntity_Entity() +} + +type Entity_EthTransaction struct { + EthTransaction *EthereumTransaction `protobuf:"bytes,1,opt,name=eth_transaction,json=ethTransaction,proto3,oneof"` +} + +type Entity_Erc20Transfer struct { + Erc20Transfer *ERC20Transfer `protobuf:"bytes,2,opt,name=erc20_transfer,json=erc20Transfer,proto3,oneof"` +} + +type Entity_DefiPosition struct { + DefiPosition *DeFiPosition `protobuf:"bytes,3,opt,name=defi_position,json=defiPosition,proto3,oneof"` +} + +type Entity_MetricsData struct { + MetricsData *MetricsData `protobuf:"bytes,4,opt,name=metrics_data,json=metricsData,proto3,oneof"` +} + +func (*Entity_EthTransaction) isEntity_Entity() {} + +func (*Entity_Erc20Transfer) isEntity_Entity() {} + +func (*Entity_DefiPosition) isEntity_Entity() {} + +func (*Entity_MetricsData) isEntity_Entity() {} + +// Ethereum transaction with semantic types optimized for RisingWave +type EthereumTransaction struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Transaction hash - optimized storage for 32-byte hashes + Hash string `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` + // Block hash + BlockHash string `protobuf:"bytes,2,opt,name=block_hash,json=blockHash,proto3" json:"block_hash,omitempty"` + // Transaction value in wei - uses RisingWave's rw_int256 for large numbers + Value string `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` + // Gas price in wei - also uses rw_int256 + GasPrice string `protobuf:"bytes,4,opt,name=gas_price,json=gasPrice,proto3" json:"gas_price,omitempty"` + // From address - optimized VARCHAR(42) storage + FromAddress string `protobuf:"bytes,5,opt,name=from_address,json=fromAddress,proto3" json:"from_address,omitempty"` + // To address + ToAddress string `protobuf:"bytes,6,opt,name=to_address,json=toAddress,proto3" json:"to_address,omitempty"` + // Input data as hex + InputData string `protobuf:"bytes,7,opt,name=input_data,json=inputData,proto3" json:"input_data,omitempty"` + // Block timestamp + BlockTimestamp int64 `protobuf:"varint,8,opt,name=block_timestamp,json=blockTimestamp,proto3" json:"block_timestamp,omitempty"` + // Gas used (regular integer, not 256-bit) + GasUsed int64 `protobuf:"varint,9,opt,name=gas_used,json=gasUsed,proto3" json:"gas_used,omitempty"` + // Transaction index in block + TransactionIndex int32 `protobuf:"varint,10,opt,name=transaction_index,json=transactionIndex,proto3" json:"transaction_index,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *EthereumTransaction) Reset() { + *x = EthereumTransaction{} + mi := &file_test_semantic_types_example_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *EthereumTransaction) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EthereumTransaction) ProtoMessage() {} + +func (x *EthereumTransaction) ProtoReflect() protoreflect.Message { + mi := &file_test_semantic_types_example_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EthereumTransaction.ProtoReflect.Descriptor instead. +func (*EthereumTransaction) Descriptor() ([]byte, []int) { + return file_test_semantic_types_example_proto_rawDescGZIP(), []int{2} +} + +func (x *EthereumTransaction) GetHash() string { + if x != nil { + return x.Hash + } + return "" +} + +func (x *EthereumTransaction) GetBlockHash() string { + if x != nil { + return x.BlockHash + } + return "" +} + +func (x *EthereumTransaction) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +func (x *EthereumTransaction) GetGasPrice() string { + if x != nil { + return x.GasPrice + } + return "" +} + +func (x *EthereumTransaction) GetFromAddress() string { + if x != nil { + return x.FromAddress + } + return "" +} + +func (x *EthereumTransaction) GetToAddress() string { + if x != nil { + return x.ToAddress + } + return "" +} + +func (x *EthereumTransaction) GetInputData() string { + if x != nil { + return x.InputData + } + return "" +} + +func (x *EthereumTransaction) GetBlockTimestamp() int64 { + if x != nil { + return x.BlockTimestamp + } + return 0 +} + +func (x *EthereumTransaction) GetGasUsed() int64 { + if x != nil { + return x.GasUsed + } + return 0 +} + +func (x *EthereumTransaction) GetTransactionIndex() int32 { + if x != nil { + return x.TransactionIndex + } + return 0 +} + +// ERC20 transfer events with precise decimal handling +type ERC20Transfer struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Contract address + ContractAddress string `protobuf:"bytes,1,opt,name=contract_address,json=contractAddress,proto3" json:"contract_address,omitempty"` + // From address + FromAddress string `protobuf:"bytes,2,opt,name=from_address,json=fromAddress,proto3" json:"from_address,omitempty"` + // To address + ToAddress string `protobuf:"bytes,3,opt,name=to_address,json=toAddress,proto3" json:"to_address,omitempty"` + // Transfer amount - 18 decimal places for most ERC20 tokens + Amount string `protobuf:"bytes,4,opt,name=amount,proto3" json:"amount,omitempty"` + // USDC amount example - 6 decimal places + UsdcAmount string `protobuf:"bytes,5,opt,name=usdc_amount,json=usdcAmount,proto3" json:"usdc_amount,omitempty"` + // Log index in transaction + LogIndex int32 `protobuf:"varint,6,opt,name=log_index,json=logIndex,proto3" json:"log_index,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ERC20Transfer) Reset() { + *x = ERC20Transfer{} + mi := &file_test_semantic_types_example_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ERC20Transfer) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ERC20Transfer) ProtoMessage() {} + +func (x *ERC20Transfer) ProtoReflect() protoreflect.Message { + mi := &file_test_semantic_types_example_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ERC20Transfer.ProtoReflect.Descriptor instead. +func (*ERC20Transfer) Descriptor() ([]byte, []int) { + return file_test_semantic_types_example_proto_rawDescGZIP(), []int{3} +} + +func (x *ERC20Transfer) GetContractAddress() string { + if x != nil { + return x.ContractAddress + } + return "" +} + +func (x *ERC20Transfer) GetFromAddress() string { + if x != nil { + return x.FromAddress + } + return "" +} + +func (x *ERC20Transfer) GetToAddress() string { + if x != nil { + return x.ToAddress + } + return "" +} + +func (x *ERC20Transfer) GetAmount() string { + if x != nil { + return x.Amount + } + return "" +} + +func (x *ERC20Transfer) GetUsdcAmount() string { + if x != nil { + return x.UsdcAmount + } + return "" +} + +func (x *ERC20Transfer) GetLogIndex() int32 { + if x != nil { + return x.LogIndex + } + return 0 +} + +// DeFi position with various numeric types +type DeFiPosition struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Position ID as UUID + PositionId string `protobuf:"bytes,1,opt,name=position_id,json=positionId,proto3" json:"position_id,omitempty"` + // User address + UserAddress string `protobuf:"bytes,2,opt,name=user_address,json=userAddress,proto3" json:"user_address,omitempty"` + // Protocol address + ProtocolAddress string `protobuf:"bytes,3,opt,name=protocol_address,json=protocolAddress,proto3" json:"protocol_address,omitempty"` + // Collateral amount (18 decimals) + CollateralAmount string `protobuf:"bytes,4,opt,name=collateral_amount,json=collateralAmount,proto3" json:"collateral_amount,omitempty"` + // Debt amount (18 decimals) + DebtAmount string `protobuf:"bytes,5,opt,name=debt_amount,json=debtAmount,proto3" json:"debt_amount,omitempty"` + // Liquidation threshold (percentage with 18 decimals) + LiquidationThreshold string `protobuf:"bytes,6,opt,name=liquidation_threshold,json=liquidationThreshold,proto3" json:"liquidation_threshold,omitempty"` + // Position metadata as JSON + Metadata string `protobuf:"bytes,7,opt,name=metadata,proto3" json:"metadata,omitempty"` + // Created timestamp + CreatedAt int64 `protobuf:"varint,8,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + // Updated timestamp (milliseconds) + UpdatedAtMs int64 `protobuf:"varint,9,opt,name=updated_at_ms,json=updatedAtMs,proto3" json:"updated_at_ms,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DeFiPosition) Reset() { + *x = DeFiPosition{} + mi := &file_test_semantic_types_example_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DeFiPosition) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeFiPosition) ProtoMessage() {} + +func (x *DeFiPosition) ProtoReflect() protoreflect.Message { + mi := &file_test_semantic_types_example_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeFiPosition.ProtoReflect.Descriptor instead. +func (*DeFiPosition) Descriptor() ([]byte, []int) { + return file_test_semantic_types_example_proto_rawDescGZIP(), []int{4} +} + +func (x *DeFiPosition) GetPositionId() string { + if x != nil { + return x.PositionId + } + return "" +} + +func (x *DeFiPosition) GetUserAddress() string { + if x != nil { + return x.UserAddress + } + return "" +} + +func (x *DeFiPosition) GetProtocolAddress() string { + if x != nil { + return x.ProtocolAddress + } + return "" +} + +func (x *DeFiPosition) GetCollateralAmount() string { + if x != nil { + return x.CollateralAmount + } + return "" +} + +func (x *DeFiPosition) GetDebtAmount() string { + if x != nil { + return x.DebtAmount + } + return "" +} + +func (x *DeFiPosition) GetLiquidationThreshold() string { + if x != nil { + return x.LiquidationThreshold + } + return "" +} + +func (x *DeFiPosition) GetMetadata() string { + if x != nil { + return x.Metadata + } + return "" +} + +func (x *DeFiPosition) GetCreatedAt() int64 { + if x != nil { + return x.CreatedAt + } + return 0 +} + +func (x *DeFiPosition) GetUpdatedAtMs() int64 { + if x != nil { + return x.UpdatedAtMs + } + return 0 +} + +// Metrics and monitoring data +type MetricsData struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Metric ID + MetricId string `protobuf:"bytes,1,opt,name=metric_id,json=metricId,proto3" json:"metric_id,omitempty"` + // Total value locked in protocol (money type for USD values) + TvlUsd string `protobuf:"bytes,2,opt,name=tvl_usd,json=tvlUsd,proto3" json:"tvl_usd,omitempty"` + // Bitcoin amount (8 decimal places) + BtcAmount string `protobuf:"bytes,3,opt,name=btc_amount,json=btcAmount,proto3" json:"btc_amount,omitempty"` + // Large integer metric (uses rw_int256) + LargeMetric string `protobuf:"bytes,4,opt,name=large_metric,json=largeMetric,proto3" json:"large_metric,omitempty"` + // Binary data + BinaryData string `protobuf:"bytes,5,opt,name=binary_data,json=binaryData,proto3" json:"binary_data,omitempty"` + // Signature data + Signature string `protobuf:"bytes,6,opt,name=signature,proto3" json:"signature,omitempty"` + // Public key + PublicKey string `protobuf:"bytes,7,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty"` + // Measurement timestamp + MeasuredAt int64 `protobuf:"varint,8,opt,name=measured_at,json=measuredAt,proto3" json:"measured_at,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *MetricsData) Reset() { + *x = MetricsData{} + mi := &file_test_semantic_types_example_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *MetricsData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MetricsData) ProtoMessage() {} + +func (x *MetricsData) ProtoReflect() protoreflect.Message { + mi := &file_test_semantic_types_example_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MetricsData.ProtoReflect.Descriptor instead. +func (*MetricsData) Descriptor() ([]byte, []int) { + return file_test_semantic_types_example_proto_rawDescGZIP(), []int{5} +} + +func (x *MetricsData) GetMetricId() string { + if x != nil { + return x.MetricId + } + return "" +} + +func (x *MetricsData) GetTvlUsd() string { + if x != nil { + return x.TvlUsd + } + return "" +} + +func (x *MetricsData) GetBtcAmount() string { + if x != nil { + return x.BtcAmount + } + return "" +} + +func (x *MetricsData) GetLargeMetric() string { + if x != nil { + return x.LargeMetric + } + return "" +} + +func (x *MetricsData) GetBinaryData() string { + if x != nil { + return x.BinaryData + } + return "" +} + +func (x *MetricsData) GetSignature() string { + if x != nil { + return x.Signature + } + return "" +} + +func (x *MetricsData) GetPublicKey() string { + if x != nil { + return x.PublicKey + } + return "" +} + +func (x *MetricsData) GetMeasuredAt() int64 { + if x != nil { + return x.MeasuredAt + } + return 0 +} + +var File_test_semantic_types_example_proto protoreflect.FileDescriptor + +const file_test_semantic_types_example_proto_rawDesc = "" + + "\n" + + "!test/semantic_types/example.proto\x12\x13test.semantic_types\x1a\x1fgoogle/protobuf/timestamp.proto\x1a-sf/substreams/sink/sql/schema/v1/schema.proto\"A\n" + + "\x06Output\x127\n" + + "\bentities\x18\x01 \x03(\v2\x1b.test.semantic_types.EntityR\bentities\"\xc5\x02\n" + + "\x06Entity\x12S\n" + + "\x0feth_transaction\x18\x01 \x01(\v2(.test.semantic_types.EthereumTransactionH\x00R\x0eethTransaction\x12K\n" + + "\x0eerc20_transfer\x18\x02 \x01(\v2\".test.semantic_types.ERC20TransferH\x00R\rerc20Transfer\x12H\n" + + "\rdefi_position\x18\x03 \x01(\v2!.test.semantic_types.DeFiPositionH\x00R\fdefiPosition\x12E\n" + + "\fmetrics_data\x18\x04 \x01(\v2 .test.semantic_types.MetricsDataH\x00R\vmetricsDataB\b\n" + + "\x06entity\"\xf7\x03\n" + + "\x13EthereumTransaction\x12)\n" + + "\x04hash\x18\x01 \x01(\tB\x15\xb2\xf8%\x11\n" + + "\atx_hash \x01*\x04hashR\x04hash\x12)\n" + + "\n" + + "block_hash\x18\x02 \x01(\tB\n" + + "\xb2\xf8%\x06*\x04hashR\tblockHash\x12,\n" + + "\x05value\x18\x03 \x01(\tB\x16\xb2\xf8%\x12*\auint2562\adecimalR\x05value\x123\n" + + "\tgas_price\x18\x04 \x01(\tB\x16\xb2\xf8%\x12*\auint2562\adecimalR\bgasPrice\x120\n" + + "\ffrom_address\x18\x05 \x01(\tB\r\xb2\xf8%\t*\aaddressR\vfromAddress\x12,\n" + + "\n" + + "to_address\x18\x06 \x01(\tB\r\xb2\xf8%\t*\aaddressR\ttoAddress\x12(\n" + + "\n" + + "input_data\x18\a \x01(\tB\t\xb2\xf8%\x05*\x03hexR\tinputData\x12=\n" + + "\x0fblock_timestamp\x18\b \x01(\x03B\x14\xb2\xf8%\x10*\x0eunix_timestampR\x0eblockTimestamp\x12\x19\n" + + "\bgas_used\x18\t \x01(\x03R\agasUsed\x12+\n" + + "\x11transaction_index\x18\n" + + " \x01(\x05R\x10transactionIndex:\x16\xaa\xf8%\x12\n" + + "\x10eth_transactions\"\xe6\x02\n" + + "\rERC20Transfer\x128\n" + + "\x10contract_address\x18\x01 \x01(\tB\r\xb2\xf8%\t*\aaddressR\x0fcontractAddress\x120\n" + + "\ffrom_address\x18\x02 \x01(\tB\r\xb2\xf8%\t*\aaddressR\vfromAddress\x12,\n" + + "\n" + + "to_address\x18\x03 \x01(\tB\r\xb2\xf8%\t*\aaddressR\ttoAddress\x120\n" + + "\x06amount\x18\x04 \x01(\tB\x18\xb2\xf8%\x14*\tdecimal182\adecimalR\x06amount\x128\n" + + "\vusdc_amount\x18\x05 \x01(\tB\x17\xb2\xf8%\x13*\bdecimal62\adecimalR\n" + + "usdcAmount\x12\x1b\n" + + "\tlog_index\x18\x06 \x01(\x05R\blogIndex:2\xaa\xf8%.\n" + + "\x0ferc20_transfers\x12\x1beth_transactions on tx_hash\"\x8f\x04\n" + + "\fDeFiPosition\x12-\n" + + "\vposition_id\x18\x01 \x01(\tB\f\xb2\xf8%\b \x01*\x04uuidR\n" + + "positionId\x120\n" + + "\fuser_address\x18\x02 \x01(\tB\r\xb2\xf8%\t*\aaddressR\vuserAddress\x128\n" + + "\x10protocol_address\x18\x03 \x01(\tB\r\xb2\xf8%\t*\aaddressR\x0fprotocolAddress\x12<\n" + + "\x11collateral_amount\x18\x04 \x01(\tB\x0f\xb2\xf8%\v*\tdecimal18R\x10collateralAmount\x120\n" + + "\vdebt_amount\x18\x05 \x01(\tB\x0f\xb2\xf8%\v*\tdecimal18R\n" + + "debtAmount\x12D\n" + + "\x15liquidation_threshold\x18\x06 \x01(\tB\x0f\xb2\xf8%\v*\tdecimal18R\x14liquidationThreshold\x12&\n" + + "\bmetadata\x18\a \x01(\tB\n" + + "\xb2\xf8%\x06*\x04jsonR\bmetadata\x123\n" + + "\n" + + "created_at\x18\b \x01(\x03B\x14\xb2\xf8%\x10*\x0eunix_timestampR\tcreatedAt\x12;\n" + + "\rupdated_at_ms\x18\t \x01(\x03B\x17\xb2\xf8%\x13*\x11unix_timestamp_msR\vupdatedAtMs:\x14\xaa\xf8%\x10\n" + + "\x0edefi_positions\"\x99\x03\n" + + "\vMetricsData\x12)\n" + + "\tmetric_id\x18\x01 \x01(\tB\f\xb2\xf8%\b \x01*\x04uuidR\bmetricId\x12$\n" + + "\atvl_usd\x18\x02 \x01(\tB\v\xb2\xf8%\a*\x05moneyR\x06tvlUsd\x12-\n" + + "\n" + + "btc_amount\x18\x03 \x01(\tB\x0e\xb2\xf8%\n" + + "*\bdecimal8R\tbtcAmount\x128\n" + + "\flarge_metric\x18\x04 \x01(\tB\x15\xb2\xf8%\x11*\x06int2562\adecimalR\vlargeMetric\x12-\n" + + "\vbinary_data\x18\x05 \x01(\tB\f\xb2\xf8%\b*\x06base64R\n" + + "binaryData\x12-\n" + + "\tsignature\x18\x06 \x01(\tB\x0f\xb2\xf8%\v*\tsignatureR\tsignature\x12+\n" + + "\n" + + "public_key\x18\a \x01(\tB\f\xb2\xf8%\b*\x06pubkeyR\tpublicKey\x126\n" + + "\vmeasured_at\x18\b \x01(\x03B\x15\xb2\xf8%\x11*\x0fblock_timestampR\n" + + "measuredAt:\r\xaa\xf8%\t\n" + + "\ametricsB\xa5\x01\n" + + "\x17com.test.semantic_typesB\fExampleProtoP\x01Z\x13test/semantic_types\xa2\x02\x03TSX\xaa\x02\x12Test.SemanticTypes\xca\x02\x12Test\\SemanticTypes\xe2\x02\x1eTest\\SemanticTypes\\GPBMetadata\xea\x02\x13Test::SemanticTypesb\x06proto3" + +var ( + file_test_semantic_types_example_proto_rawDescOnce sync.Once + file_test_semantic_types_example_proto_rawDescData []byte +) + +func file_test_semantic_types_example_proto_rawDescGZIP() []byte { + file_test_semantic_types_example_proto_rawDescOnce.Do(func() { + file_test_semantic_types_example_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_test_semantic_types_example_proto_rawDesc), len(file_test_semantic_types_example_proto_rawDesc))) + }) + return file_test_semantic_types_example_proto_rawDescData +} + +var file_test_semantic_types_example_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_test_semantic_types_example_proto_goTypes = []any{ + (*Output)(nil), // 0: test.semantic_types.Output + (*Entity)(nil), // 1: test.semantic_types.Entity + (*EthereumTransaction)(nil), // 2: test.semantic_types.EthereumTransaction + (*ERC20Transfer)(nil), // 3: test.semantic_types.ERC20Transfer + (*DeFiPosition)(nil), // 4: test.semantic_types.DeFiPosition + (*MetricsData)(nil), // 5: test.semantic_types.MetricsData +} +var file_test_semantic_types_example_proto_depIdxs = []int32{ + 1, // 0: test.semantic_types.Output.entities:type_name -> test.semantic_types.Entity + 2, // 1: test.semantic_types.Entity.eth_transaction:type_name -> test.semantic_types.EthereumTransaction + 3, // 2: test.semantic_types.Entity.erc20_transfer:type_name -> test.semantic_types.ERC20Transfer + 4, // 3: test.semantic_types.Entity.defi_position:type_name -> test.semantic_types.DeFiPosition + 5, // 4: test.semantic_types.Entity.metrics_data:type_name -> test.semantic_types.MetricsData + 5, // [5:5] is the sub-list for method output_type + 5, // [5:5] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_test_semantic_types_example_proto_init() } +func file_test_semantic_types_example_proto_init() { + if File_test_semantic_types_example_proto != nil { + return + } + file_test_semantic_types_example_proto_msgTypes[1].OneofWrappers = []any{ + (*Entity_EthTransaction)(nil), + (*Entity_Erc20Transfer)(nil), + (*Entity_DefiPosition)(nil), + (*Entity_MetricsData)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_test_semantic_types_example_proto_rawDesc), len(file_test_semantic_types_example_proto_rawDesc)), + NumEnums: 0, + NumMessages: 6, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_test_semantic_types_example_proto_goTypes, + DependencyIndexes: file_test_semantic_types_example_proto_depIdxs, + MessageInfos: file_test_semantic_types_example_proto_msgTypes, + }.Build() + File_test_semantic_types_example_proto = out.File + file_test_semantic_types_example_proto_goTypes = nil + file_test_semantic_types_example_proto_depIdxs = nil +} diff --git a/proto/sf/substreams/sink/sql/schema/v1/schema.proto b/proto/sf/substreams/sink/sql/schema/v1/schema.proto index 35d2323..e26c261 100644 --- a/proto/sf/substreams/sink/sql/schema/v1/schema.proto +++ b/proto/sf/substreams/sink/sql/schema/v1/schema.proto @@ -1,8 +1,8 @@ syntax = "proto3"; import "google/protobuf/descriptor.proto"; -package schema; -option go_package = "github.com/streamingfast/substreams-sink-sql/pb/schema"; +package sf.substreams.sink.sql.schema.v1; +option go_package = "github.com/streamingfast/substreams-sink-sql/pb/sf/substreams/sink/sql/schema/v1"; extend google.protobuf.MessageOptions { Table table = 77701; @@ -10,15 +10,14 @@ extend google.protobuf.MessageOptions { extend google.protobuf.FieldOptions { Column field = 77702; - //todo: ignore } message Table { string name = 1; optional string child_of = 2; +// repeated string primary_key_fields = 3; - // should be remove - string many_to_one_relation_field_name = 81; + optional ClickhouseTableOptions clickhouse_table_options = 200; } message Column { @@ -26,4 +25,55 @@ message Column { optional string foreign_key = 2; bool unique = 3; bool primary_key = 4; -} \ No newline at end of file + // Semantic type annotation for custom SQL type mapping + optional string semantic_type = 5; // Semantic type hint (e.g., "uint256", "address") + optional string format_hint = 6; // Format hint for conversion (e.g., "hex", "decimal") +} + +message ClickhouseTableOptions { + repeated ClickhouseOrderByField order_by_fields = 1; + repeated ClickhousePartitionByField partition_fields = 2; + repeated ClickhouseReplacingField replacing_fields = 3; + repeated ClickhouseIndexField index_fields = 4; +} + +message ClickhousePartitionByField { + string name = 1; + Function function = 2; +} + +message ClickhouseOrderByField { + string name = 1; + bool descending = 2; + Function function = 3; +} + +message ClickhouseReplacingField { + string name = 1; +} + +message ClickhouseIndexField { + string name = 1; + string field_name = 2; + IndexType type = 3; + uint32 granularity = 4; + Function function = 5; +} + +enum IndexType { + minmax = 0; + set = 1; + ngrambf_v1 = 2; + tokenbf_v1 = 3; + bloom_filter = 4; +} + +enum Function { + unset = 0; + toYYYYMM = 1; + toYYYYDD = 2; + toYear = 3; + toMonth = 4; + toDate = 5; + toStartOfMonth = 6; +} diff --git a/proto/test/relations/relations.proto b/proto/test/relations/relations.proto index 4ae521b..fa86571 100644 --- a/proto/test/relations/relations.proto +++ b/proto/test/relations/relations.proto @@ -25,9 +25,9 @@ message Entity { } message TypesTest { - option (schema.table) = { name: "types_tests" }; + option (sf.substreams.sink.sql.schema.v1.table) = { name: "types_tests" }; - uint64 id =1 [(schema.field) = { primary_key: true }]; + uint64 id =1 [(sf.substreams.sink.sql.schema.v1.field) = { primary_key: true }]; // Field for each protobuf native type double double_field = 2; float float_field = 3; @@ -50,35 +50,35 @@ message TypesTest { } message Customer { - option (schema.table) = { name: "customers" }; + option (sf.substreams.sink.sql.schema.v1.table) = { name: "customers" }; - string customer_id = 1 [(schema.field) = { primary_key: true }]; + string customer_id = 1 [(sf.substreams.sink.sql.schema.v1.field) = { primary_key: true }]; string name = 2; } message Order { - option (schema.table) = { name: "orders"}; + option (sf.substreams.sink.sql.schema.v1.table) = { name: "orders"}; - string order_id = 1 [(schema.field) = { primary_key: true}]; - string customer_ref_id = 2 [(schema.field) = { foreign_key: "customers on customer_id"}]; + string order_id = 1 [(sf.substreams.sink.sql.schema.v1.field) = { primary_key: true}]; + string customer_ref_id = 2 [(sf.substreams.sink.sql.schema.v1.field) = { foreign_key: "customers on customer_id"}]; repeated OrderItem items = 3; } message OrderItem { - option (schema.table) = { + option (sf.substreams.sink.sql.schema.v1.table) = { name: "order_items", child_of: "orders on order_id" }; // can also leverage orders._id using "order on order_id" if order do not have a external unique identifier - string item_id = 2 [(schema.field) = { foreign_key: "items on item_id"}]; + string item_id = 2 [(sf.substreams.sink.sql.schema.v1.field) = { foreign_key: "items on item_id"}]; int64 quantity = 11; } message Item { - option (schema.table) = { name: "items" }; + option (sf.substreams.sink.sql.schema.v1.table) = { name: "items" }; - string item_id = 1 [(schema.field) = { unique: true }]; + string item_id = 1 [(sf.substreams.sink.sql.schema.v1.field) = { unique: true }]; string name = 10; double price = 11; diff --git a/proto/test/semantic_types/example.proto b/proto/test/semantic_types/example.proto new file mode 100644 index 0000000..27e7ec9 --- /dev/null +++ b/proto/test/semantic_types/example.proto @@ -0,0 +1,211 @@ +syntax = "proto3"; +import "google/protobuf/timestamp.proto"; +import "sf/substreams/sink/sql/schema/v1/schema.proto"; + +package test.semantic_types; +option go_package = "test/semantic_types"; + +// Example demonstrating semantic type annotations for RisingWave rw_int256 support +message Output { + repeated Entity entities = 1; +} + +message Entity { + oneof entity { + EthereumTransaction eth_transaction = 1; + ERC20Transfer erc20_transfer = 2; + DeFiPosition defi_position = 3; + MetricsData metrics_data = 4; + } +} + +// Ethereum transaction with semantic types optimized for RisingWave +message EthereumTransaction { + option (sf.substreams.sink.sql.schema.v1.table) = { name: "eth_transactions" }; + + // Transaction hash - optimized storage for 32-byte hashes + string hash = 1 [(sf.substreams.sink.sql.schema.v1.field) = { + name: "tx_hash", + primary_key: true, + semantic_type: "hash" + }]; + + // Block hash + string block_hash = 2 [(sf.substreams.sink.sql.schema.v1.field) = { + semantic_type: "hash" + }]; + + // Transaction value in wei - uses RisingWave's rw_int256 for large numbers + string value = 3 [(sf.substreams.sink.sql.schema.v1.field) = { + semantic_type: "uint256", + format_hint: "decimal" + }]; + + // Gas price in wei - also uses rw_int256 + string gas_price = 4 [(sf.substreams.sink.sql.schema.v1.field) = { + semantic_type: "uint256", + format_hint: "decimal" + }]; + + // From address - optimized VARCHAR(42) storage + string from_address = 5 [(sf.substreams.sink.sql.schema.v1.field) = { + semantic_type: "address" + }]; + + // To address + string to_address = 6 [(sf.substreams.sink.sql.schema.v1.field) = { + semantic_type: "address" + }]; + + // Input data as hex + string input_data = 7 [(sf.substreams.sink.sql.schema.v1.field) = { + semantic_type: "hex" + }]; + + // Block timestamp + int64 block_timestamp = 8 [(sf.substreams.sink.sql.schema.v1.field) = { + semantic_type: "unix_timestamp" + }]; + + // Gas used (regular integer, not 256-bit) + int64 gas_used = 9; + + // Transaction index in block + int32 transaction_index = 10; +} + +// ERC20 transfer events with precise decimal handling +message ERC20Transfer { + option (sf.substreams.sink.sql.schema.v1.table) = { + name: "erc20_transfers", + child_of: "eth_transactions on tx_hash" + }; + + // Contract address + string contract_address = 1 [(sf.substreams.sink.sql.schema.v1.field) = { + semantic_type: "address" + }]; + + // From address + string from_address = 2 [(sf.substreams.sink.sql.schema.v1.field) = { + semantic_type: "address" + }]; + + // To address + string to_address = 3 [(sf.substreams.sink.sql.schema.v1.field) = { + semantic_type: "address" + }]; + + // Transfer amount - 18 decimal places for most ERC20 tokens + string amount = 4 [(sf.substreams.sink.sql.schema.v1.field) = { + semantic_type: "decimal18", + format_hint: "decimal" + }]; + + // USDC amount example - 6 decimal places + string usdc_amount = 5 [(sf.substreams.sink.sql.schema.v1.field) = { + semantic_type: "decimal6", + format_hint: "decimal" + }]; + + // Log index in transaction + int32 log_index = 6; +} + +// DeFi position with various numeric types +message DeFiPosition { + option (sf.substreams.sink.sql.schema.v1.table) = { name: "defi_positions" }; + + // Position ID as UUID + string position_id = 1 [(sf.substreams.sink.sql.schema.v1.field) = { + primary_key: true, + semantic_type: "uuid" + }]; + + // User address + string user_address = 2 [(sf.substreams.sink.sql.schema.v1.field) = { + semantic_type: "address" + }]; + + // Protocol address + string protocol_address = 3 [(sf.substreams.sink.sql.schema.v1.field) = { + semantic_type: "address" + }]; + + // Collateral amount (18 decimals) + string collateral_amount = 4 [(sf.substreams.sink.sql.schema.v1.field) = { + semantic_type: "decimal18" + }]; + + // Debt amount (18 decimals) + string debt_amount = 5 [(sf.substreams.sink.sql.schema.v1.field) = { + semantic_type: "decimal18" + }]; + + // Liquidation threshold (percentage with 18 decimals) + string liquidation_threshold = 6 [(sf.substreams.sink.sql.schema.v1.field) = { + semantic_type: "decimal18" + }]; + + // Position metadata as JSON + string metadata = 7 [(sf.substreams.sink.sql.schema.v1.field) = { + semantic_type: "json" + }]; + + // Created timestamp + int64 created_at = 8 [(sf.substreams.sink.sql.schema.v1.field) = { + semantic_type: "unix_timestamp" + }]; + + // Updated timestamp (milliseconds) + int64 updated_at_ms = 9 [(sf.substreams.sink.sql.schema.v1.field) = { + semantic_type: "unix_timestamp_ms" + }]; +} + +// Metrics and monitoring data +message MetricsData { + option (sf.substreams.sink.sql.schema.v1.table) = { name: "metrics" }; + + // Metric ID + string metric_id = 1 [(sf.substreams.sink.sql.schema.v1.field) = { + primary_key: true, + semantic_type: "uuid" + }]; + + // Total value locked in protocol (money type for USD values) + string tvl_usd = 2 [(sf.substreams.sink.sql.schema.v1.field) = { + semantic_type: "money" + }]; + + // Bitcoin amount (8 decimal places) + string btc_amount = 3 [(sf.substreams.sink.sql.schema.v1.field) = { + semantic_type: "decimal8" + }]; + + // Large integer metric (uses rw_int256) + string large_metric = 4 [(sf.substreams.sink.sql.schema.v1.field) = { + semantic_type: "int256", + format_hint: "decimal" + }]; + + // Binary data + string binary_data = 5 [(sf.substreams.sink.sql.schema.v1.field) = { + semantic_type: "base64" + }]; + + // Signature data + string signature = 6 [(sf.substreams.sink.sql.schema.v1.field) = { + semantic_type: "signature" + }]; + + // Public key + string public_key = 7 [(sf.substreams.sink.sql.schema.v1.field) = { + semantic_type: "pubkey" + }]; + + // Measurement timestamp + int64 measured_at = 8 [(sf.substreams.sink.sql.schema.v1.field) = { + semantic_type: "block_timestamp" + }]; +} \ No newline at end of file diff --git a/proto/utils.go b/proto/utils.go index a11e9d3..3a7ff97 100644 --- a/proto/utils.go +++ b/proto/utils.go @@ -6,7 +6,7 @@ import ( proto "github.com/golang/protobuf/proto" "github.com/jhump/protoreflect/desc" - "github.com/streamingfast/substreams-sink-sql/pb/sf/substreams/sink/sql/schema/v1" + schema "github.com/streamingfast/substreams-sink-sql/pb/sf/substreams/sink/sql/schema/v1" "google.golang.org/protobuf/types/descriptorpb" ) @@ -50,3 +50,22 @@ func FieldInfo(d *desc.FieldDescriptor) *schema.Column { } } } + +// SemanticTypeInfo extracts semantic type information from field annotations +func SemanticTypeInfo(d *desc.FieldDescriptor) (semanticType string, formatHint string, hasSemanticType bool) { + fieldInfo := FieldInfo(d) + if fieldInfo == nil { + return "", "", false + } + + if fieldInfo.SemanticType != nil { + semanticType = fieldInfo.GetSemanticType() + hasSemanticType = true + } + + if fieldInfo.FormatHint != nil { + formatHint = fieldInfo.GetFormatHint() + } + + return semanticType, formatHint, hasSemanticType +}