testdrive.td 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226
  1. # Copyright Materialize, Inc. and contributors. All rights reserved.
  2. #
  3. # Use of this software is governed by the Business Source License
  4. # included in the LICENSE file at the root of this repository.
  5. #
  6. # As of the Change Date specified in that file, in accordance with
  7. # the Business Source License, use of this software will be governed
  8. # by the Apache License, Version 2.0.
  9. $ set-arg-default single-replica-cluster=quickstart
  10. # Tests for testdrive itself.
  11. # Uncomment to test that timeouts happen in the time desired.
  12. #
  13. # Note that the duration string format can be anything parsable
  14. # by the parse_duration create
  15. #
  16. # $ set-sql-timeout duration=2minutes
  17. # $ set-sql-timeout duration=default
  18. # > select * from nonexistent
  19. # Test that hashing rows works and is consistent.
  20. > CREATE VIEW v AS VALUES (1, 'foo'), (2, 'bar'), (3, 'foo'), (1, 'bar')
  21. > SELECT * FROM v
  22. 4 values hashing to 7dd470c8470b085df13552e191a244ab
  23. > VALUES ('row', 1), ('row', 2)
  24. row 1
  25. # inline comment
  26. row 2
  27. # Test DATE , TIME, TIMESTAMP output
  28. > CREATE TABLE t1 (f1 DATE, f2 TIME, f3 TIMESTAMP)
  29. > INSERT INTO t1 VALUES ('2011-11-11', '11:11:11', '2011-11-11 11:11:11')
  30. > SELECT * FROM t1
  31. "2011-11-11" "11:11:11" "2011-11-11 11:11:11"
  32. # Test set-regex
  33. $ set-regex match=u\d+ replacement=UID
  34. ? EXPLAIN OPTIMIZED PLAN AS VERBOSE TEXT FOR SELECT * FROM t1 AS a1, t1 AS a2 WHERE a1.f1 IS NOT NULL;
  35. Explained Query:
  36. CrossJoin type=differential
  37. ArrangeBy keys=[[]]
  38. Filter (#0{f1}) IS NOT NULL
  39. ReadStorage materialize.public.t1
  40. ArrangeBy keys=[[]]
  41. ReadStorage materialize.public.t1
  42. Source materialize.public.t1
  43. Target cluster: quickstart
  44. ! SELECT * FROM u1234;
  45. contains:unknown catalog item 'UID'
  46. # Exclude FETCH from the retry logic
  47. > CREATE MATERIALIZED VIEW v1 AS VALUES (1),(2),(3);
  48. > BEGIN
  49. > DECLARE c CURSOR FOR SUBSCRIBE v1 AS OF 18446744073709551615;
  50. > FETCH 4 c WITH (timeout='10s');
  51. 18446744073709551615 1 1
  52. 18446744073709551615 1 2
  53. 18446744073709551615 1 3
  54. > COMMIT
  55. # kafka-verify sort-messages
  56. > CREATE CONNECTION kafka_conn
  57. TO KAFKA (BROKER '${testdrive.kafka-addr}', SECURITY PROTOCOL PLAINTEXT);
  58. > CREATE CONNECTION IF NOT EXISTS csr_conn TO CONFLUENT SCHEMA REGISTRY (
  59. URL '${testdrive.schema-registry-url}'
  60. );
  61. > CREATE MATERIALIZED VIEW sort_messages (a) AS VALUES (2),(1),(3);
  62. > CREATE SINK sort_messages_sink
  63. IN CLUSTER ${arg.single-replica-cluster}
  64. FROM sort_messages
  65. INTO KAFKA CONNECTION kafka_conn (TOPIC 'sort-messages-sink-${testdrive.seed}')
  66. FORMAT AVRO USING CONFLUENT SCHEMA REGISTRY CONNECTION csr_conn
  67. ENVELOPE DEBEZIUM
  68. $ kafka-verify-data format=avro sink=materialize.public.sort_messages_sink sort-messages=true
  69. {"before": null, "after": {"row": {"a": 1}}}
  70. {"before": null, "after": {"row": {"a": 2}}}
  71. {"before": null, "after": {"row": {"a": 3}}}
  72. # Use $ postgresql-execute and ${testdrive.materialize_addr}
  73. $ postgres-execute connection=postgres://materialize:materialize@${testdrive.materialize-sql-addr}
  74. CREATE TABLE postgres_execute (f1 INTEGER);
  75. INSERT INTO postgres_execute VALUES (123);
  76. > SELECT * FROM postgres_execute;
  77. 123
  78. # http-request
  79. $ http-request method=GET url=${testdrive.schema-registry-url}schemas/types
  80. # kafka-ingest repeat
  81. $ set kafka-ingest-repeat={
  82. "type" : "record",
  83. "name" : "test",
  84. "fields" : [
  85. {"name":"f1", "type":"string"}
  86. ]
  87. }
  88. $ kafka-create-topic topic=kafka-ingest-repeat
  89. $ kafka-ingest format=avro topic=kafka-ingest-repeat schema=${kafka-ingest-repeat} repeat=2
  90. {"f1": "fish"}
  91. > CREATE SOURCE kafka_ingest_repeat_input
  92. IN CLUSTER ${arg.single-replica-cluster}
  93. FROM KAFKA CONNECTION kafka_conn (TOPIC 'testdrive-kafka-ingest-repeat-${testdrive.seed}')
  94. > CREATE TABLE kafka_ingest_repeat_input_tbl FROM SOURCE kafka_ingest_repeat_input (REFERENCE "testdrive-kafka-ingest-repeat-${testdrive.seed}")
  95. FORMAT AVRO USING CONFLUENT SCHEMA REGISTRY CONNECTION csr_conn
  96. ENVELOPE NONE
  97. > SELECT * FROM kafka_ingest_repeat_input_tbl;
  98. fish
  99. fish
  100. # kafka-ingest repeat with ${kafka-ingest.iteration}
  101. $ kafka-ingest format=avro topic=kafka-ingest-repeat schema=${kafka-ingest-repeat} repeat=2
  102. {"f1": "${kafka-ingest.iteration}"}
  103. > SELECT * FROM kafka_ingest_repeat_input_tbl;
  104. 0
  105. 1
  106. fish
  107. fish
  108. # kafka-ingest with no explicit 'partition' argument should spread the records evenly across the partitions
  109. $ set kafka-ingest-no-partition-key={"type": "string"}
  110. $ set kafka-ingest-no-partition-value={"type": "record", "name": "r", "fields": [{"name": "a", "type": "string"}]}
  111. $ kafka-create-topic topic=kafka-ingest-no-partition partitions=2
  112. $ kafka-ingest format=avro topic=kafka-ingest-no-partition key-format=avro key-schema=${kafka-ingest-no-partition-key} schema=${kafka-ingest-no-partition-value}
  113. "a" {"a": "a"}
  114. "b" {"a": "b"}
  115. "c" {"a": "c"}
  116. "d" {"a": "d"}
  117. "e" {"a": "e"}
  118. "f" {"a": "f"}
  119. "g" {"a": "g"}
  120. "h" {"a": "h"}
  121. > CREATE SOURCE kafka_ingest_no_partition
  122. IN CLUSTER ${arg.single-replica-cluster}
  123. FROM KAFKA CONNECTION kafka_conn (TOPIC 'testdrive-kafka-ingest-no-partition-${testdrive.seed}')
  124. > CREATE TABLE kafka_ingest_no_partition_tbl FROM SOURCE kafka_ingest_no_partition (REFERENCE "testdrive-kafka-ingest-no-partition-${testdrive.seed}")
  125. FORMAT AVRO USING CONFLUENT SCHEMA REGISTRY CONNECTION csr_conn
  126. ENVELOPE NONE
  127. > SELECT COUNT(*) FROM kafka_ingest_no_partition_tbl;
  128. 8
  129. # kafka-verify with regexp (the set-regexp from above is used)
  130. > CREATE MATERIALIZED VIEW kafka_verify_regexp (a) AS VALUES ('u123'), ('u234');
  131. > CREATE SINK kafka_verify_regexp_sink
  132. IN CLUSTER ${arg.single-replica-cluster}
  133. FROM kafka_verify_regexp
  134. INTO KAFKA CONNECTION kafka_conn (TOPIC 'testdrive-kafka-verify-regexp-sink-${testdrive.seed}')
  135. FORMAT AVRO USING CONFLUENT SCHEMA REGISTRY CONNECTION csr_conn
  136. ENVELOPE DEBEZIUM
  137. $ kafka-verify-data format=avro sink=materialize.public.kafka_verify_regexp_sink sort-messages=true
  138. {"before": null, "after": {"row": {"a": "UID"}}}
  139. {"before": null, "after": {"row": {"a": "UID"}}}
  140. # $ postgresql-connect
  141. > CREATE TABLE postgres_connect (f1 INTEGER);
  142. $ postgres-connect name=conn1 url=postgres://materialize:materialize@${testdrive.materialize-sql-addr}
  143. $ postgres-execute connection=conn1
  144. BEGIN;
  145. INSERT INTO postgres_connect VALUES (1);
  146. # Table is still empty, the transaction we just started is not committed yet
  147. > SELECT COUNT(*) FROM postgres_connect;
  148. 0
  149. $ postgres-execute connection=conn1
  150. INSERT INTO postgres_connect VALUES (2);
  151. COMMIT;
  152. > SELECT COUNT(*) FROM postgres_connect;
  153. 2
  154. # Comments don't affect following lines
  155. > CREATE TABLE t (x int)
  156. > INSERT INTO t VALUES (0)
  157. > SELECT *
  158. FROM t -- this is a comment
  159. WHERE x = 1