github-3135.td 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219
  1. # Copyright Materialize, Inc. and contributors. All rights reserved.
  2. #
  3. # Use of this software is governed by the Business Source License
  4. # included in the LICENSE file at the root of this repository.
  5. #
  6. # As of the Change Date specified in that file, in accordance with
  7. # the Business Source License, use of this software will be governed
  8. # by the Apache License, Version 2.0.
  9. $ set-arg-default default-storage-size=1
  10. $ postgres-execute connection=postgres://mz_system:materialize@${testdrive.materialize-internal-sql-addr}
  11. ALTER SYSTEM SET max_clusters = 20
  12. # Regression test for https://github.com/MaterializeInc/database-issues/issues/3135
  13. #
  14. # This file uses the old create source syntax.
  15. $ set-sql-timeout duration=300s
  16. $ set keyschema={"type": "record", "name": "Key", "fields": [ { "name": "f1", "type": "long" } ] }
  17. $ set schema={"type" : "record", "name" : "test", "fields": [ { "name": "f2", "type": "long" } ] }
  18. $ set count=100000
  19. # Create sources and fill them with data and render one dataflow that uses everything just to
  20. # stress the system.
  21. $ kafka-create-topic topic=multi-topic-0
  22. $ kafka-ingest format=avro topic=multi-topic-0 schema=${schema} repeat=${count}
  23. {"f2": ${kafka-ingest.iteration} }
  24. $ kafka-create-topic topic=multi-topic-1
  25. $ kafka-ingest format=avro topic=multi-topic-1 schema=${schema} repeat=${count}
  26. {"f2": ${kafka-ingest.iteration} }
  27. $ kafka-create-topic topic=multi-topic-2
  28. $ kafka-ingest format=avro topic=multi-topic-2 schema=${schema} repeat=${count}
  29. {"f2": ${kafka-ingest.iteration} }
  30. $ kafka-create-topic topic=multi-topic-3
  31. $ kafka-ingest format=avro topic=multi-topic-3 schema=${schema} repeat=${count}
  32. {"f2": ${kafka-ingest.iteration} }
  33. $ kafka-create-topic topic=multi-topic-4
  34. $ kafka-ingest format=avro topic=multi-topic-4 schema=${schema} repeat=${count}
  35. {"f2": ${kafka-ingest.iteration} }
  36. $ kafka-create-topic topic=multi-topic-5
  37. $ kafka-ingest format=avro topic=multi-topic-5 schema=${schema} repeat=${count}
  38. {"f2": ${kafka-ingest.iteration} }
  39. $ kafka-create-topic topic=multi-topic-6
  40. $ kafka-ingest format=avro topic=multi-topic-6 schema=${schema} repeat=${count}
  41. {"f2": ${kafka-ingest.iteration} }
  42. $ kafka-create-topic topic=multi-topic-7
  43. $ kafka-ingest format=avro topic=multi-topic-7 schema=${schema} repeat=${count}
  44. {"f2": ${kafka-ingest.iteration} }
  45. $ kafka-create-topic topic=multi-topic-8
  46. $ kafka-ingest format=avro topic=multi-topic-8 schema=${schema} repeat=${count}
  47. {"f2": ${kafka-ingest.iteration} }
  48. $ kafka-create-topic topic=multi-topic-9
  49. $ kafka-ingest format=avro topic=multi-topic-9 schema=${schema} repeat=${count}
  50. {"f2": ${kafka-ingest.iteration} }
  51. > CREATE CONNECTION IF NOT EXISTS csr_conn TO CONFLUENT SCHEMA REGISTRY (
  52. URL '${testdrive.schema-registry-url}'
  53. );
  54. > CREATE CONNECTION IF NOT EXISTS kafka_conn TO KAFKA (BROKER '${testdrive.kafka-addr}', SECURITY PROTOCOL PLAINTEXT);
  55. > CREATE CLUSTER s0_cluster SIZE '${arg.default-storage-size}';
  56. > CREATE SOURCE s0 IN CLUSTER s0_cluster
  57. FROM KAFKA CONNECTION kafka_conn
  58. (TOPIC 'testdrive-multi-topic-0-${testdrive.seed}')
  59. FORMAT AVRO USING CONFLUENT SCHEMA REGISTRY CONNECTION csr_conn ENVELOPE NONE;
  60. > CREATE CLUSTER s1_cluster SIZE '${arg.default-storage-size}';
  61. > CREATE SOURCE s1 IN CLUSTER s1_cluster
  62. FROM KAFKA CONNECTION kafka_conn
  63. (TOPIC 'testdrive-multi-topic-1-${testdrive.seed}')
  64. FORMAT AVRO USING CONFLUENT SCHEMA REGISTRY CONNECTION csr_conn ENVELOPE NONE;
  65. > CREATE CLUSTER s2_cluster SIZE '${arg.default-storage-size}';
  66. > CREATE SOURCE s2 IN CLUSTER s2_cluster
  67. FROM KAFKA CONNECTION kafka_conn
  68. (TOPIC 'testdrive-multi-topic-2-${testdrive.seed}')
  69. FORMAT AVRO USING CONFLUENT SCHEMA REGISTRY CONNECTION csr_conn ENVELOPE NONE;
  70. > CREATE CLUSTER s3_cluster SIZE '${arg.default-storage-size}';
  71. > CREATE SOURCE s3 IN CLUSTER s3_cluster
  72. FROM KAFKA CONNECTION kafka_conn
  73. (TOPIC 'testdrive-multi-topic-3-${testdrive.seed}')
  74. FORMAT AVRO USING CONFLUENT SCHEMA REGISTRY CONNECTION csr_conn ENVELOPE NONE;
  75. > CREATE CLUSTER s4_cluster SIZE '${arg.default-storage-size}';
  76. > CREATE SOURCE s4 IN CLUSTER s4_cluster
  77. FROM KAFKA CONNECTION kafka_conn
  78. (TOPIC 'testdrive-multi-topic-4-${testdrive.seed}')
  79. FORMAT AVRO USING CONFLUENT SCHEMA REGISTRY CONNECTION csr_conn ENVELOPE NONE;
  80. > CREATE CLUSTER s5_cluster SIZE '${arg.default-storage-size}';
  81. > CREATE SOURCE s5 IN CLUSTER s5_cluster
  82. FROM KAFKA CONNECTION kafka_conn
  83. (TOPIC 'testdrive-multi-topic-5-${testdrive.seed}')
  84. FORMAT AVRO USING CONFLUENT SCHEMA REGISTRY CONNECTION csr_conn ENVELOPE NONE;
  85. > CREATE CLUSTER s6_cluster SIZE '${arg.default-storage-size}';
  86. > CREATE SOURCE s6 IN CLUSTER s6_cluster
  87. FROM KAFKA CONNECTION kafka_conn
  88. (TOPIC 'testdrive-multi-topic-6-${testdrive.seed}')
  89. FORMAT AVRO USING CONFLUENT SCHEMA REGISTRY CONNECTION csr_conn ENVELOPE NONE;
  90. > CREATE CLUSTER s7_cluster SIZE '${arg.default-storage-size}';
  91. > CREATE SOURCE s7 IN CLUSTER s7_cluster
  92. FROM KAFKA CONNECTION kafka_conn
  93. (TOPIC 'testdrive-multi-topic-7-${testdrive.seed}')
  94. FORMAT AVRO USING CONFLUENT SCHEMA REGISTRY CONNECTION csr_conn ENVELOPE NONE;
  95. > CREATE CLUSTER s8_cluster SIZE '${arg.default-storage-size}';
  96. > CREATE SOURCE s8 IN CLUSTER s8_cluster
  97. FROM KAFKA CONNECTION kafka_conn
  98. (TOPIC 'testdrive-multi-topic-8-${testdrive.seed}')
  99. FORMAT AVRO USING CONFLUENT SCHEMA REGISTRY CONNECTION csr_conn ENVELOPE NONE;
  100. > CREATE CLUSTER s9_cluster SIZE '${arg.default-storage-size}';
  101. > CREATE SOURCE s9 IN CLUSTER s9_cluster
  102. FROM KAFKA CONNECTION kafka_conn
  103. (TOPIC 'testdrive-multi-topic-9-${testdrive.seed}')
  104. FORMAT AVRO USING CONFLUENT SCHEMA REGISTRY CONNECTION csr_conn ENVELOPE NONE;
  105. > CREATE MATERIALIZED VIEW v1 AS
  106. SELECT SUM(f1) AS f1 FROM
  107. (SELECT
  108. COUNT(*) AS f1 FROM s0
  109. UNION ALL SELECT COUNT(*) AS f1 FROM s1
  110. UNION ALL SELECT COUNT(*) AS f1 FROM s2
  111. UNION ALL SELECT COUNT(*) AS f1 FROM s3
  112. UNION ALL SELECT COUNT(*) AS f1 FROM s4
  113. UNION ALL SELECT COUNT(*) AS f1 FROM s5
  114. UNION ALL SELECT COUNT(*) AS f1 FROM s6
  115. UNION ALL SELECT COUNT(*) AS f1 FROM s7
  116. UNION ALL SELECT COUNT(*) AS f1 FROM s8
  117. UNION ALL SELECT COUNT(*) AS f1 FROM s9);
  118. # Make sure that s1 has been fully timestamped
  119. > SELECT COUNT(*) FROM s1 AS OF AT LEAST 0;
  120. count
  121. -----
  122. 100000
  123. # Now disable retries, and verify that we get the exact same result multiple times
  124. # in a row. Obviously, this test will not always catch the issue since the original
  125. # bug was nondeterministic, but this is a good best-effort smake test.
  126. $ set-max-tries max-tries=1
  127. > SELECT mz_unsafe.mz_sleep(0.2);
  128. <null>
  129. > SELECT COUNT(*) FROM s1 AS OF AT LEAST 0;
  130. count
  131. -----
  132. 100000
  133. > SELECT mz_unsafe.mz_sleep(0.2);
  134. <null>
  135. > SELECT COUNT(*) FROM s1 AS OF AT LEAST 0;
  136. count
  137. -----
  138. 100000
  139. > SELECT mz_unsafe.mz_sleep(0.2);
  140. <null>
  141. > SELECT COUNT(*) FROM s1 AS OF AT LEAST 0;
  142. count
  143. -----
  144. 100000
  145. > SELECT mz_unsafe.mz_sleep(0.2);
  146. <null>
  147. > SELECT COUNT(*) FROM s1 AS OF AT LEAST 0;
  148. count
  149. -----
  150. 100000
  151. > SELECT mz_unsafe.mz_sleep(0.2);
  152. <null>
  153. > SELECT COUNT(*) FROM s1 AS OF AT LEAST 0;
  154. count
  155. -----
  156. 100000
  157. > SELECT mz_unsafe.mz_sleep(0.2);
  158. <null>
  159. > SELECT COUNT(*) FROM s1 AS OF AT LEAST 0;
  160. count
  161. -----
  162. 100000
  163. > SELECT mz_unsafe.mz_sleep(0.2);
  164. <null>
  165. > SELECT COUNT(*) FROM s1 AS OF AT LEAST 0;
  166. count
  167. -----
  168. 100000