123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203 |
- # Copyright Materialize, Inc. and contributors. All rights reserved.
- #
- # Use of this software is governed by the Business Source License
- # included in the LICENSE file at the root of this repository.
- #
- # As of the Change Date specified in that file, in accordance with
- # the Business Source License, use of this software will be governed
- # by the Apache License, Version 2.0.
- $ set-arg-default default-storage-size=1
- > CREATE MATERIALIZED VIEW simple_view AS SELECT 1 AS a, 2 AS b, 3 AS c;
- > CREATE CONNECTION kafka_conn
- TO KAFKA (BROKER '${testdrive.kafka-addr}', SECURITY PROTOCOL PLAINTEXT);
- > CREATE CONNECTION IF NOT EXISTS csr_conn TO CONFLUENT SCHEMA REGISTRY (
- URL '${testdrive.schema-registry-url}'
- );
- > CREATE CLUSTER simple_view_sink_cluster SIZE '${arg.default-storage-size}';
- ! CREATE SINK simple_view_sink
- IN CLUSTER simple_view_sink_cluster
- FROM simple_view
- INTO KAFKA CONNECTION kafka_conn (TOPIC 'unnamed-cols-sink-${testdrive.seed}')
- FORMAT JSON ARRAY
- ENVELOPE DEBEZIUM
- contains:JSON ARRAY format in sinks not yet supported
- > CREATE SINK simple_view_sink
- IN CLUSTER simple_view_sink_cluster
- FROM simple_view
- INTO KAFKA CONNECTION kafka_conn (TOPIC 'unnamed-cols-sink-${testdrive.seed}')
- FORMAT JSON
- ENVELOPE DEBEZIUM
- $ kafka-verify-data format=json sink=materialize.public.simple_view_sink key=false
- {"before": null, "after": {"a": 1, "b": 2, "c": 3}}
- > CREATE CLUSTER simple_view_upsert_cluster SIZE '${arg.default-storage-size}';
- > CREATE SINK simple_view_upsert
- IN CLUSTER simple_view_upsert_cluster
- FROM simple_view
- INTO KAFKA CONNECTION kafka_conn (TOPIC 'testdrive-unnamed-upsert-${testdrive.seed}')
- KEY (b)
- FORMAT JSON
- ENVELOPE UPSERT
- $ kafka-verify-data format=json sink=materialize.public.simple_view_upsert key=true
- {"b": 2} {"a": 1, "b": 2, "c": 3}
- > CREATE MATERIALIZED VIEW complex_view AS SELECT LIST[1,3] AS a, 2 AS b, 3 AS c;
- > CREATE CLUSTER mixed_types_cluster SIZE '${arg.default-storage-size}';
- # this should error since binary encoding can't support complex types like lists
- ! CREATE SINK mixed_types
- IN CLUSTER mixed_types_cluster
- FROM complex_view
- INTO KAFKA CONNECTION kafka_conn (TOPIC 'testdrive-unnamed-upsert-${testdrive.seed}')
- KEY (a)
- KEY FORMAT BYTES
- VALUE FORMAT JSON
- ENVELOPE UPSERT
- contains:BYTES format with non-encodable type
- # this should error since compound keys can't use text or binary encoding
- ! CREATE SINK mixed_types
- IN CLUSTER mixed_types_cluster
- FROM simple_view
- INTO KAFKA CONNECTION kafka_conn (TOPIC 'testdrive-unnamed-upsert-${testdrive.seed}')
- KEY (a, b)
- KEY FORMAT TEXT
- VALUE FORMAT JSON
- ENVELOPE UPSERT
- contains:BYTES or TEXT format with multiple columns not yet supported
- > CREATE SINK mixed_types
- IN CLUSTER mixed_types_cluster
- FROM simple_view
- INTO KAFKA CONNECTION kafka_conn (TOPIC 'testdrive-unnamed-upsert-${testdrive.seed}')
- KEY (b)
- KEY FORMAT TEXT
- VALUE FORMAT JSON
- ENVELOPE UPSERT
- $ kafka-verify-data key-format=text value-format=json sink=materialize.public.mixed_types
- 2 {"a": 1, "b": 2, "c": 3}
- # Standard types
- > CREATE MATERIALIZED VIEW types_view AS
- SELECT TRUE::boolean c1,
- FALSE::boolean c2,
- NULL c3,
- 123456789::bigint c4,
- 1234.5678::double c5,
- 1234.5678::decimal c6,
- '2011-11-11 11:11:11.12345'::timestamp c7,
- '2011-11-11 11:11:11.12345+12'::timestamptz c8,
- '2011-11-11'::date c9,
- '11:11:11.123456'::time c10,
- INTERVAL '1 year' c11,
- '324373a5-7718-46b1-a7ea-4a7c9981fc4e'::uuid c12,
- 'текст'::bytea c13,
- '{"a": 2}'::jsonb c14
- > CREATE CLUSTER types_sink_cluster SIZE '${arg.default-storage-size}';
- > CREATE SINK types_sink
- IN CLUSTER types_sink_cluster
- FROM types_view
- INTO KAFKA CONNECTION kafka_conn (TOPIC 'testdrive-types-sink-${testdrive.seed}')
- FORMAT JSON
- ENVELOPE DEBEZIUM
- # Due to limitations in $ kafka-verify, the entire expected JSON output needs to be provided on a single line
- $ kafka-verify-data format=json sink=materialize.public.types_sink key=false
- {"before":null,"after":{"c1":true,"c2":false,"c3":null,"c4":123456789,"c5":1234.5678,"c6":"1234.5678","c7":"1321009871123.450","c8":"1320966671123.450","c9":"2011-11-11","c10":"11:11:11.123456","c11":"1 year","c12":"324373a5-7718-46b1-a7ea-4a7c9981fc4e","c13":[209,130,208,181,208,186,209,129,209,130],"c14":{"a":2}}}
- # Special characters
- > CREATE MATERIALIZED VIEW special_characters_view AS
- SELECT 'текст' c1, '"' c2, '''' c3, '\' c4, E'a\n\tb' c5
- > CREATE CLUSTER special_characters_sink_cluster SIZE '${arg.default-storage-size}';
- > CREATE SINK special_characters_sink
- IN CLUSTER special_characters_sink_cluster
- FROM special_characters_view
- INTO KAFKA CONNECTION kafka_conn (TOPIC 'testdrive-special-characters-sink-${testdrive.seed}')
- FORMAT JSON
- ENVELOPE DEBEZIUM
- $ kafka-verify-data format=json sink=materialize.public.special_characters_sink key=false
- {"before":null,"after":{"c1":"текст","c2":"\"","c3":"'","c4":"\\","c5":"a\n\tb"}}
- # Record
- > CREATE MATERIALIZED VIEW record_view AS SELECT simple_view FROM simple_view;
- > CREATE CLUSTER record_sink_cluster SIZE '${arg.default-storage-size}';
- > CREATE SINK record_sink
- IN CLUSTER record_sink_cluster
- FROM record_view
- INTO KAFKA CONNECTION kafka_conn (TOPIC 'testdrive-record-sink-${testdrive.seed}')
- FORMAT JSON
- ENVELOPE DEBEZIUM
- $ kafka-verify-data format=json sink=materialize.public.record_sink key=false
- {"before":null,"after":{"simple_view":{"a":1,"b":2,"c":3}}}
- # Duplicate column names
- ! CREATE VIEW duplicate_cols AS SELECT 'a1' AS a, 'a1' AS a;
- contains:column "a" specified more than once
- ! CREATE MATERIALIZED VIEW duplicate_cols AS SELECT 'a1' AS a, 'a1' AS a;
- contains:column "a" specified more than once
- # Complex types
- > CREATE TYPE int4_list AS LIST (ELEMENT TYPE = int4);
- > CREATE TYPE int4_list_list AS LIST (ELEMENT TYPE = int4_list);
- > CREATE TYPE int4_map AS MAP (KEY TYPE = text, VALUE TYPE = int4);
- > CREATE TYPE int4_map_map AS MAP (KEY TYPE = text, VALUE TYPE = int4_map);
- # We do this dance here to work around database-issues#7544
- $ set-from-sql var=int4_id
- SELECT id FROM mz_objects WHERE name = '_int4';
- $ set-from-sql var=text_id
- SELECT id FROM mz_objects WHERE name = '_text';
- > CREATE MATERIALIZED VIEW complex_type_view AS
- SELECT
- '{{1,2},{3,4}}'::int4_list_list c1,
- '{a=>{b=>1, c=>2}, d=> {e=>3, f=>4}}'::int4_map_map c2,
- ARRAY[ARRAY[1, 2], ARRAY[3, 4], ARRAY[5, 6]]::[${int4_id} AS "pg_catalog"."_int4"] c3,
- ARRAY[]::[${int4_id} AS "pg_catalog"."_int4"] c4,
- ARRAY[ARRAY[ARRAY['a'], ARRAY['b']], ARRAY[ARRAY['c'], ARRAY['d']]]::[${text_id} AS "pg_catalog"."_text"] c5;
- > CREATE CLUSTER complex_type_sink_cluster SIZE '${arg.default-storage-size}';
- > CREATE SINK complex_type_sink
- IN CLUSTER complex_type_sink_cluster
- FROM complex_type_view
- INTO KAFKA CONNECTION kafka_conn (TOPIC 'testdrive-complex-type-sink-${testdrive.seed}')
- FORMAT JSON
- ENVELOPE DEBEZIUM
- $ kafka-verify-data format=json sink=materialize.public.complex_type_sink key=false
- {"before": null, "after": {"c1": [[1,2],[3,4]], "c2": {"a":{"b":1, "c":2}, "d": {"e":3, "f":4}}, "c3": [[1, 2], [3, 4], [5,6]], "c4": [], "c5": [[["a"], ["b"]], [["c"], ["d"]]]}}
- # testdrive will not automatically clean up types, so we need to do that ourselves
- > DROP MATERIALIZED VIEW complex_type_view CASCADE;
- > DROP TYPE int4_list_list;
- > DROP TYPE int4_list;
- > DROP TYPE int4_map_map;
- > DROP TYPE int4_map;
|