123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131 |
- # Copyright Materialize, Inc. and contributors. All rights reserved.
- #
- # Use of this software is governed by the Business Source License
- # included in the LICENSE file at the root of this repository.
- #
- # As of the Change Date specified in that file, in accordance with
- # the Business Source License, use of this software will be governed
- # by the Apache License, Version 2.0.
- $ set-arg-default single-replica-cluster=quickstart
- $ postgres-execute connection=postgres://mz_system:materialize@${testdrive.materialize-internal-sql-addr}
- ALTER SYSTEM SET storage_statistics_collection_interval = 1000
- ALTER SYSTEM SET storage_statistics_interval = 2000
- $ set keyschema={
- "type": "record",
- "name": "Key",
- "fields": [
- {"name": "key", "type": "string"}
- ]
- }
- $ set schema={
- "type" : "record",
- "name" : "test",
- "fields" : [
- {"name":"f1", "type":"string"},
- {"name":"f2", "type":"long"}
- ]
- }
- $ kafka-create-topic topic=upsert partitions=2
- $ kafka-ingest format=avro topic=upsert key-format=avro key-schema=${keyschema} schema=${schema}
- {"key": "fish"} {"f1": "fish", "f2": 1000}
- {"key": "bird1"} {"f1":"goose", "f2": 1}
- {"key": "birdmore"} {"f1":"geese", "f2": 2}
- {"key": "mammal1"} {"f1": "moose", "f2": 1}
- {"key": "bird1"}
- {"key": "birdmore"} {"f1":"geese", "f2": 56}
- {"key": "mammalmore"} {"f1": "moose", "f2": 42}
- {"key": "mammal1"}
- {"key": "mammalmore"} {"f1":"moose", "f2": 2}
- $ kafka-create-topic topic=metrics-test partitions=1
- $ kafka-ingest topic=metrics-test format=bytes
- jack,jill
- goofus,gallant
- > CREATE CONNECTION kafka_conn
- TO KAFKA (BROKER '${testdrive.kafka-addr}', SECURITY PROTOCOL PLAINTEXT);
- > CREATE CONNECTION IF NOT EXISTS csr_conn TO CONFLUENT SCHEMA REGISTRY (
- URL '${testdrive.schema-registry-url}'
- );
- > CREATE SOURCE upsert
- IN CLUSTER ${arg.single-replica-cluster}
- FROM KAFKA CONNECTION kafka_conn (TOPIC
- 'testdrive-upsert-${testdrive.seed}'
- )
- FORMAT AVRO USING CONFLUENT SCHEMA REGISTRY CONNECTION csr_conn
- INCLUDE OFFSET
- ENVELOPE UPSERT
- > CREATE SOURCE counter
- IN CLUSTER ${arg.single-replica-cluster}
- FROM LOAD GENERATOR COUNTER (UP TO 100);
- # Adding a select here so that the ingests after this
- # triggers lookup from the upsert state
- > SELECT key, f1, f2 FROM upsert
- key f1 f2
- ------------------------
- fish fish 1000
- birdmore geese 56
- mammalmore moose 2
- > SELECT
- s.name,
- SUM(u.offset_known) > 0,
- SUM(u.offset_known) = SUM(u.offset_committed)
- FROM mz_sources s
- JOIN mz_internal.mz_source_statistics_raw u ON s.id = u.id
- WHERE s.name IN ('upsert')
- GROUP BY s.name
- ORDER BY s.name
- upsert true true
- > SELECT
- s.name,
- SUM(u.offset_known) > 0,
- SUM(u.offset_committed) > 0
- FROM mz_sources s
- JOIN mz_internal.mz_source_statistics_raw u ON s.id = u.id
- WHERE s.name IN ('counter')
- GROUP BY s.name
- ORDER BY s.name
- counter true true
- $ set-from-sql var=previous-offset-known
- SELECT
- (SUM(u.offset_known))::text
- FROM mz_sources s
- JOIN mz_internal.mz_source_statistics_raw u ON s.id = u.id
- WHERE s.name IN ('upsert')
- $ kafka-ingest format=avro topic=upsert key-format=avro key-schema=${keyschema} schema=${schema}
- {"key": "mammalmore"}
- > SELECT key, f1, f2 FROM upsert
- key f1 f2
- ------------------------
- fish fish 1000
- birdmore geese 56
- > SELECT
- s.name,
- SUM(u.offset_known) > ${previous-offset-known},
- SUM(u.offset_known) = SUM(u.offset_committed)
- FROM mz_sources s
- JOIN mz_internal.mz_source_statistics_raw u ON s.id = u.id
- WHERE s.name IN ('upsert')
- GROUP BY s.name
- ORDER BY s.name
- upsert true true
- > DROP SOURCE upsert CASCADE
- > DROP SOURCE counter CASCADE
|