testdrive.td 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248
  1. # Copyright Materialize, Inc. and contributors. All rights reserved.
  2. #
  3. # Use of this software is governed by the Business Source License
  4. # included in the LICENSE file at the root of this repository.
  5. #
  6. # As of the Change Date specified in that file, in accordance with
  7. # the Business Source License, use of this software will be governed
  8. # by the Apache License, Version 2.0.
  9. $ set-arg-default single-replica-cluster=quickstart
  10. # Tests for testdrive itself.
  11. # Uncomment to test that timeouts happen in the time desired.
  12. #
  13. # Note that the duration string format can be anything parsable
  14. # by the parse_duration create
  15. #
  16. # $ set-sql-timeout duration=2minutes
  17. # $ set-sql-timeout duration=default
  18. # > select * from nonexistent
  19. # Test that hashing rows works and is consistent.
  20. > CREATE VIEW v AS VALUES (1, 'foo'), (2, 'bar'), (3, 'foo'), (1, 'bar')
  21. > SELECT * FROM v
  22. 4 values hashing to 7dd470c8470b085df13552e191a244ab
  23. > VALUES ('row', 1), ('row', 2)
  24. row 1
  25. # inline comment
  26. row 2
  27. # Test DATE , TIME, TIMESTAMP output
  28. > CREATE TABLE t1 (f1 DATE, f2 TIME, f3 TIMESTAMP)
  29. > INSERT INTO t1 VALUES ('2011-11-11', '11:11:11', '2011-11-11 11:11:11')
  30. > SELECT * FROM t1
  31. "2011-11-11" "11:11:11" "2011-11-11 11:11:11"
  32. # Test set-regex
  33. $ set-regex match=u\d+ replacement=UID
  34. ?[version>=14400] EXPLAIN OPTIMIZED PLAN AS VERBOSE TEXT FOR SELECT * FROM t1 AS a1, t1 AS a2 WHERE a1.f1 IS NOT NULL;
  35. Explained Query:
  36. CrossJoin type=differential
  37. ArrangeBy keys=[[]]
  38. Filter (#0{f1}) IS NOT NULL
  39. ReadStorage materialize.public.t1
  40. ArrangeBy keys=[[]]
  41. ReadStorage materialize.public.t1
  42. Source materialize.public.t1
  43. Target cluster: quickstart
  44. ?[13500<=version<14400] EXPLAIN OPTIMIZED PLAN AS VERBOSE TEXT FOR SELECT * FROM t1 AS a1, t1 AS a2 WHERE a1.f1 IS NOT NULL;
  45. Explained Query:
  46. CrossJoin type=differential
  47. ArrangeBy keys=[[]]
  48. Filter (#0) IS NOT NULL
  49. ReadStorage materialize.public.t1
  50. ArrangeBy keys=[[]]
  51. ReadStorage materialize.public.t1
  52. Source materialize.public.t1
  53. Target cluster: quickstart
  54. ?[version<13500] EXPLAIN OPTIMIZED PLAN FOR SELECT * FROM t1 AS a1, t1 AS a2 WHERE a1.f1 IS NOT NULL;
  55. Explained Query:
  56. CrossJoin type=differential
  57. ArrangeBy keys=[[]]
  58. Filter (#0) IS NOT NULL
  59. ReadStorage materialize.public.t1
  60. ArrangeBy keys=[[]]
  61. ReadStorage materialize.public.t1
  62. Source materialize.public.t1
  63. Target cluster: quickstart
  64. ! SELECT * FROM u1234;
  65. contains:unknown catalog item 'UID'
  66. # Exclude FETCH from the retry logic
  67. > CREATE MATERIALIZED VIEW v1 AS VALUES (1),(2),(3);
  68. > BEGIN
  69. > DECLARE c CURSOR FOR SUBSCRIBE v1 AS OF 18446744073709551615;
  70. > FETCH 4 c WITH (timeout='10s');
  71. 18446744073709551615 1 1
  72. 18446744073709551615 1 2
  73. 18446744073709551615 1 3
  74. > COMMIT
  75. # kafka-verify sort-messages
  76. > CREATE CONNECTION kafka_conn
  77. TO KAFKA (BROKER '${testdrive.kafka-addr}', SECURITY PROTOCOL PLAINTEXT);
  78. > CREATE CONNECTION IF NOT EXISTS csr_conn TO CONFLUENT SCHEMA REGISTRY (
  79. URL '${testdrive.schema-registry-url}'
  80. );
  81. > CREATE MATERIALIZED VIEW sort_messages (a) AS VALUES (2),(1),(3);
  82. > CREATE SINK sort_messages_sink
  83. IN CLUSTER ${arg.single-replica-cluster}
  84. FROM sort_messages
  85. INTO KAFKA CONNECTION kafka_conn (TOPIC 'sort-messages-sink-${testdrive.seed}')
  86. FORMAT AVRO USING CONFLUENT SCHEMA REGISTRY CONNECTION csr_conn
  87. ENVELOPE DEBEZIUM
  88. $ kafka-verify-data format=avro sink=materialize.public.sort_messages_sink sort-messages=true
  89. {"before": null, "after": {"row": {"a": 1}}}
  90. {"before": null, "after": {"row": {"a": 2}}}
  91. {"before": null, "after": {"row": {"a": 3}}}
  92. # Use $ postgresql-execute and ${testdrive.materialize_addr}
  93. $ postgres-execute connection=postgres://materialize:materialize@${testdrive.materialize-sql-addr}
  94. CREATE TABLE postgres_execute (f1 INTEGER);
  95. INSERT INTO postgres_execute VALUES (123);
  96. > SELECT * FROM postgres_execute;
  97. 123
  98. # http-request
  99. $ http-request method=GET url=${testdrive.schema-registry-url}schemas/types
  100. # kafka-ingest repeat
  101. $ set kafka-ingest-repeat={
  102. "type" : "record",
  103. "name" : "test",
  104. "fields" : [
  105. {"name":"f1", "type":"string"}
  106. ]
  107. }
  108. $ kafka-create-topic topic=kafka-ingest-repeat
  109. $ kafka-ingest format=avro topic=kafka-ingest-repeat schema=${kafka-ingest-repeat} repeat=2
  110. {"f1": "fish"}
  111. > CREATE SOURCE kafka_ingest_repeat_input
  112. IN CLUSTER ${arg.single-replica-cluster}
  113. FROM KAFKA CONNECTION kafka_conn (TOPIC 'testdrive-kafka-ingest-repeat-${testdrive.seed}')
  114. FORMAT AVRO USING CONFLUENT SCHEMA REGISTRY CONNECTION csr_conn
  115. ENVELOPE NONE
  116. > SELECT * FROM kafka_ingest_repeat_input;
  117. fish
  118. fish
  119. # kafka-ingest repeat with ${kafka-ingest.iteration}
  120. $ kafka-ingest format=avro topic=kafka-ingest-repeat schema=${kafka-ingest-repeat} repeat=2
  121. {"f1": "${kafka-ingest.iteration}"}
  122. > SELECT * FROM kafka_ingest_repeat_input;
  123. 0
  124. 1
  125. fish
  126. fish
  127. # kafka-ingest with no explicit 'partition' argument should spread the records evenly across the partitions
  128. $ set kafka-ingest-no-partition-key={"type": "string"}
  129. $ set kafka-ingest-no-partition-value={"type": "record", "name": "r", "fields": [{"name": "a", "type": "string"}]}
  130. $ kafka-create-topic topic=kafka-ingest-no-partition partitions=2
  131. $ kafka-ingest format=avro topic=kafka-ingest-no-partition key-format=avro key-schema=${kafka-ingest-no-partition-key} schema=${kafka-ingest-no-partition-value}
  132. "a" {"a": "a"}
  133. "b" {"a": "b"}
  134. "c" {"a": "c"}
  135. "d" {"a": "d"}
  136. "e" {"a": "e"}
  137. "f" {"a": "f"}
  138. "g" {"a": "g"}
  139. "h" {"a": "h"}
  140. > CREATE SOURCE kafka_ingest_no_partition
  141. IN CLUSTER ${arg.single-replica-cluster}
  142. FROM KAFKA CONNECTION kafka_conn (TOPIC 'testdrive-kafka-ingest-no-partition-${testdrive.seed}')
  143. FORMAT AVRO USING CONFLUENT SCHEMA REGISTRY CONNECTION csr_conn
  144. ENVELOPE NONE
  145. > SELECT COUNT(*) FROM kafka_ingest_no_partition;
  146. 8
  147. # kafka-verify with regexp (the set-regexp from above is used)
  148. > CREATE MATERIALIZED VIEW kafka_verify_regexp (a) AS VALUES ('u123'), ('u234');
  149. > CREATE SINK kafka_verify_regexp_sink
  150. IN CLUSTER ${arg.single-replica-cluster}
  151. FROM kafka_verify_regexp
  152. INTO KAFKA CONNECTION kafka_conn (TOPIC 'testdrive-kafka-verify-regexp-sink-${testdrive.seed}')
  153. FORMAT AVRO USING CONFLUENT SCHEMA REGISTRY CONNECTION csr_conn
  154. ENVELOPE DEBEZIUM
  155. $ kafka-verify-data format=avro sink=materialize.public.kafka_verify_regexp_sink sort-messages=true
  156. {"before": null, "after": {"row": {"a": "UID"}}}
  157. {"before": null, "after": {"row": {"a": "UID"}}}
  158. # $ postgresql-connect
  159. > CREATE TABLE postgres_connect (f1 INTEGER);
  160. $ postgres-connect name=conn1 url=postgres://materialize:materialize@${testdrive.materialize-sql-addr}
  161. $ postgres-execute connection=conn1
  162. BEGIN;
  163. INSERT INTO postgres_connect VALUES (1);
  164. # Table is still empty, the transaction we just started is not committed yet
  165. > SELECT COUNT(*) FROM postgres_connect;
  166. 0
  167. $ postgres-execute connection=conn1
  168. INSERT INTO postgres_connect VALUES (2);
  169. COMMIT;
  170. > SELECT COUNT(*) FROM postgres_connect;
  171. 2
  172. # Comments don't affect following lines
  173. > CREATE TABLE t (x int)
  174. > INSERT INTO t VALUES (0)
  175. > SELECT *
  176. FROM t -- this is a comment
  177. WHERE x = 1