github-3135.td 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239
  1. # Copyright Materialize, Inc. and contributors. All rights reserved.
  2. #
  3. # Use of this software is governed by the Business Source License
  4. # included in the LICENSE file at the root of this repository.
  5. #
  6. # As of the Change Date specified in that file, in accordance with
  7. # the Business Source License, use of this software will be governed
  8. # by the Apache License, Version 2.0.
  9. $ set-arg-default default-storage-size=1
  10. $ postgres-execute connection=postgres://mz_system:materialize@${testdrive.materialize-internal-sql-addr}
  11. ALTER SYSTEM SET max_clusters = 20
  12. # Regression test for https://github.com/MaterializeInc/database-issues/issues/3135
  13. #
  14. # This file uses the old create source syntax.
  15. $ set-sql-timeout duration=300s
  16. $ set keyschema={"type": "record", "name": "Key", "fields": [ { "name": "f1", "type": "long" } ] }
  17. $ set schema={"type" : "record", "name" : "test", "fields": [ { "name": "f2", "type": "long" } ] }
  18. $ set count=100000
  19. # Create sources and fill them with data and render one dataflow that uses everything just to
  20. # stress the system.
  21. $ kafka-create-topic topic=multi-topic-0
  22. $ kafka-ingest format=avro topic=multi-topic-0 schema=${schema} repeat=${count}
  23. {"f2": ${kafka-ingest.iteration} }
  24. $ kafka-create-topic topic=multi-topic-1
  25. $ kafka-ingest format=avro topic=multi-topic-1 schema=${schema} repeat=${count}
  26. {"f2": ${kafka-ingest.iteration} }
  27. $ kafka-create-topic topic=multi-topic-2
  28. $ kafka-ingest format=avro topic=multi-topic-2 schema=${schema} repeat=${count}
  29. {"f2": ${kafka-ingest.iteration} }
  30. $ kafka-create-topic topic=multi-topic-3
  31. $ kafka-ingest format=avro topic=multi-topic-3 schema=${schema} repeat=${count}
  32. {"f2": ${kafka-ingest.iteration} }
  33. $ kafka-create-topic topic=multi-topic-4
  34. $ kafka-ingest format=avro topic=multi-topic-4 schema=${schema} repeat=${count}
  35. {"f2": ${kafka-ingest.iteration} }
  36. $ kafka-create-topic topic=multi-topic-5
  37. $ kafka-ingest format=avro topic=multi-topic-5 schema=${schema} repeat=${count}
  38. {"f2": ${kafka-ingest.iteration} }
  39. $ kafka-create-topic topic=multi-topic-6
  40. $ kafka-ingest format=avro topic=multi-topic-6 schema=${schema} repeat=${count}
  41. {"f2": ${kafka-ingest.iteration} }
  42. $ kafka-create-topic topic=multi-topic-7
  43. $ kafka-ingest format=avro topic=multi-topic-7 schema=${schema} repeat=${count}
  44. {"f2": ${kafka-ingest.iteration} }
  45. $ kafka-create-topic topic=multi-topic-8
  46. $ kafka-ingest format=avro topic=multi-topic-8 schema=${schema} repeat=${count}
  47. {"f2": ${kafka-ingest.iteration} }
  48. $ kafka-create-topic topic=multi-topic-9
  49. $ kafka-ingest format=avro topic=multi-topic-9 schema=${schema} repeat=${count}
  50. {"f2": ${kafka-ingest.iteration} }
  51. > CREATE CONNECTION IF NOT EXISTS csr_conn TO CONFLUENT SCHEMA REGISTRY (
  52. URL '${testdrive.schema-registry-url}'
  53. );
  54. > CREATE CONNECTION IF NOT EXISTS kafka_conn TO KAFKA (BROKER '${testdrive.kafka-addr}', SECURITY PROTOCOL PLAINTEXT);
  55. > CREATE CLUSTER s0_cluster SIZE '${arg.default-storage-size}';
  56. > CREATE SOURCE s0 IN CLUSTER s0_cluster
  57. FROM KAFKA CONNECTION kafka_conn
  58. (TOPIC 'testdrive-multi-topic-0-${testdrive.seed}');
  59. > CREATE TABLE s0_tbl FROM SOURCE s0 (REFERENCE "testdrive-multi-topic-0-${testdrive.seed}")
  60. FORMAT AVRO USING CONFLUENT SCHEMA REGISTRY CONNECTION csr_conn ENVELOPE NONE;
  61. > CREATE CLUSTER s1_cluster SIZE '${arg.default-storage-size}';
  62. > CREATE SOURCE s1 IN CLUSTER s1_cluster
  63. FROM KAFKA CONNECTION kafka_conn
  64. (TOPIC 'testdrive-multi-topic-1-${testdrive.seed}');
  65. > CREATE TABLE s1_tbl FROM SOURCE s1 (REFERENCE "testdrive-multi-topic-1-${testdrive.seed}")
  66. FORMAT AVRO USING CONFLUENT SCHEMA REGISTRY CONNECTION csr_conn ENVELOPE NONE;
  67. > CREATE CLUSTER s2_cluster SIZE '${arg.default-storage-size}';
  68. > CREATE SOURCE s2 IN CLUSTER s2_cluster
  69. FROM KAFKA CONNECTION kafka_conn
  70. (TOPIC 'testdrive-multi-topic-2-${testdrive.seed}');
  71. > CREATE TABLE s2_tbl FROM SOURCE s2 (REFERENCE "testdrive-multi-topic-2-${testdrive.seed}")
  72. FORMAT AVRO USING CONFLUENT SCHEMA REGISTRY CONNECTION csr_conn ENVELOPE NONE;
  73. > CREATE CLUSTER s3_cluster SIZE '${arg.default-storage-size}';
  74. > CREATE SOURCE s3 IN CLUSTER s3_cluster
  75. FROM KAFKA CONNECTION kafka_conn
  76. (TOPIC 'testdrive-multi-topic-3-${testdrive.seed}');
  77. > CREATE TABLE s3_tbl FROM SOURCE s3 (REFERENCE "testdrive-multi-topic-3-${testdrive.seed}")
  78. FORMAT AVRO USING CONFLUENT SCHEMA REGISTRY CONNECTION csr_conn ENVELOPE NONE;
  79. > CREATE CLUSTER s4_cluster SIZE '${arg.default-storage-size}';
  80. > CREATE SOURCE s4 IN CLUSTER s4_cluster
  81. FROM KAFKA CONNECTION kafka_conn
  82. (TOPIC 'testdrive-multi-topic-4-${testdrive.seed}');
  83. > CREATE TABLE s4_tbl FROM SOURCE s4 (REFERENCE "testdrive-multi-topic-4-${testdrive.seed}")
  84. FORMAT AVRO USING CONFLUENT SCHEMA REGISTRY CONNECTION csr_conn ENVELOPE NONE;
  85. > CREATE CLUSTER s5_cluster SIZE '${arg.default-storage-size}';
  86. > CREATE SOURCE s5 IN CLUSTER s5_cluster
  87. FROM KAFKA CONNECTION kafka_conn
  88. (TOPIC 'testdrive-multi-topic-5-${testdrive.seed}');
  89. > CREATE TABLE s5_tbl FROM SOURCE s5 (REFERENCE "testdrive-multi-topic-5-${testdrive.seed}")
  90. FORMAT AVRO USING CONFLUENT SCHEMA REGISTRY CONNECTION csr_conn ENVELOPE NONE;
  91. > CREATE CLUSTER s6_cluster SIZE '${arg.default-storage-size}';
  92. > CREATE SOURCE s6 IN CLUSTER s6_cluster
  93. FROM KAFKA CONNECTION kafka_conn
  94. (TOPIC 'testdrive-multi-topic-6-${testdrive.seed}');
  95. > CREATE TABLE s6_tbl FROM SOURCE s6 (REFERENCE "testdrive-multi-topic-6-${testdrive.seed}")
  96. FORMAT AVRO USING CONFLUENT SCHEMA REGISTRY CONNECTION csr_conn ENVELOPE NONE;
  97. > CREATE CLUSTER s7_cluster SIZE '${arg.default-storage-size}';
  98. > CREATE SOURCE s7 IN CLUSTER s7_cluster
  99. FROM KAFKA CONNECTION kafka_conn
  100. (TOPIC 'testdrive-multi-topic-7-${testdrive.seed}');
  101. > CREATE TABLE s7_tbl FROM SOURCE s7 (REFERENCE "testdrive-multi-topic-7-${testdrive.seed}")
  102. FORMAT AVRO USING CONFLUENT SCHEMA REGISTRY CONNECTION csr_conn ENVELOPE NONE;
  103. > CREATE CLUSTER s8_cluster SIZE '${arg.default-storage-size}';
  104. > CREATE SOURCE s8 IN CLUSTER s8_cluster
  105. FROM KAFKA CONNECTION kafka_conn
  106. (TOPIC 'testdrive-multi-topic-8-${testdrive.seed}');
  107. > CREATE TABLE s8_tbl FROM SOURCE s8 (REFERENCE "testdrive-multi-topic-8-${testdrive.seed}")
  108. FORMAT AVRO USING CONFLUENT SCHEMA REGISTRY CONNECTION csr_conn ENVELOPE NONE;
  109. > CREATE CLUSTER s9_cluster SIZE '${arg.default-storage-size}';
  110. > CREATE SOURCE s9 IN CLUSTER s9_cluster
  111. FROM KAFKA CONNECTION kafka_conn
  112. (TOPIC 'testdrive-multi-topic-9-${testdrive.seed}');
  113. > CREATE TABLE s9_tbl FROM SOURCE s9 (REFERENCE "testdrive-multi-topic-9-${testdrive.seed}")
  114. FORMAT AVRO USING CONFLUENT SCHEMA REGISTRY CONNECTION csr_conn ENVELOPE NONE;
  115. > CREATE MATERIALIZED VIEW v1 AS
  116. SELECT SUM(f1) AS f1 FROM
  117. (SELECT
  118. COUNT(*) AS f1 FROM s0_tbl
  119. UNION ALL SELECT COUNT(*) AS f1 FROM s1_tbl
  120. UNION ALL SELECT COUNT(*) AS f1 FROM s2_tbl
  121. UNION ALL SELECT COUNT(*) AS f1 FROM s3_tbl
  122. UNION ALL SELECT COUNT(*) AS f1 FROM s4_tbl
  123. UNION ALL SELECT COUNT(*) AS f1 FROM s5_tbl
  124. UNION ALL SELECT COUNT(*) AS f1 FROM s6_tbl
  125. UNION ALL SELECT COUNT(*) AS f1 FROM s7_tbl
  126. UNION ALL SELECT COUNT(*) AS f1 FROM s8_tbl
  127. UNION ALL SELECT COUNT(*) AS f1 FROM s9_tbl);
  128. # Make sure that s1 has been fully timestamped
  129. > SELECT COUNT(*) FROM s1_tbl AS OF AT LEAST 0;
  130. count
  131. -----
  132. 100000
  133. # Now disable retries, and verify that we get the exact same result multiple times
  134. # in a row. Obviously, this test will not always catch the issue since the original
  135. # bug was nondeterministic, but this is a good best-effort smake test.
  136. $ set-max-tries max-tries=1
  137. > SELECT mz_unsafe.mz_sleep(0.2);
  138. <null>
  139. > SELECT COUNT(*) FROM s1_tbl AS OF AT LEAST 0;
  140. count
  141. -----
  142. 100000
  143. > SELECT mz_unsafe.mz_sleep(0.2);
  144. <null>
  145. > SELECT COUNT(*) FROM s1_tbl AS OF AT LEAST 0;
  146. count
  147. -----
  148. 100000
  149. > SELECT mz_unsafe.mz_sleep(0.2);
  150. <null>
  151. > SELECT COUNT(*) FROM s1_tbl AS OF AT LEAST 0;
  152. count
  153. -----
  154. 100000
  155. > SELECT mz_unsafe.mz_sleep(0.2);
  156. <null>
  157. > SELECT COUNT(*) FROM s1_tbl AS OF AT LEAST 0;
  158. count
  159. -----
  160. 100000
  161. > SELECT mz_unsafe.mz_sleep(0.2);
  162. <null>
  163. > SELECT COUNT(*) FROM s1_tbl AS OF AT LEAST 0;
  164. count
  165. -----
  166. 100000
  167. > SELECT mz_unsafe.mz_sleep(0.2);
  168. <null>
  169. > SELECT COUNT(*) FROM s1_tbl AS OF AT LEAST 0;
  170. count
  171. -----
  172. 100000
  173. > SELECT mz_unsafe.mz_sleep(0.2);
  174. <null>
  175. > SELECT COUNT(*) FROM s1_tbl AS OF AT LEAST 0;
  176. count
  177. -----
  178. 100000