shared-timestamp-bindings.td 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167
  1. # Copyright Materialize, Inc. and contributors. All rights reserved.
  2. #
  3. # Use of this software is governed by the Business Source License
  4. # included in the LICENSE file at the root of this repository.
  5. #
  6. # As of the Change Date specified in that file, in accordance with
  7. # the Business Source License, use of this software will be governed
  8. # by the Apache License, Version 2.0.
  9. $ set-arg-default single-replica-cluster=quickstart
  10. $ postgres-execute connection=postgres://mz_system:materialize@${testdrive.materialize-internal-sql-addr}
  11. ALTER SYSTEM SET min_timestamp_interval = '90ms'
  12. #
  13. # Make sure that all materialized views dealing with the same source
  14. # see the same records at the same timestamp.
  15. #
  16. # As per Ruchir's idea, this is being tested by running queries like
  17. # SELECT some_view EXCEPT ALL select_some_other_view
  18. # and making sure they return no rows
  19. #
  20. # Unfortunately this failed to reproduce the original problem, so the
  21. # test is being pushed here in the hope that it will somehow end up
  22. # being useful in the future.
  23. #
  24. $ set schema={
  25. "type": "record",
  26. "name": "envelope",
  27. "fields": [
  28. {"name": "a", "type": "long"}
  29. ]
  30. }
  31. $ kafka-create-topic topic=data partitions=16
  32. $ kafka-ingest format=avro topic=data partition=0 schema=${schema}
  33. {"a": 10}
  34. {"a": 5}
  35. $ kafka-ingest format=avro topic=data partition=1 schema=${schema}
  36. {"a": 1}
  37. {"a": 7}
  38. {"a": 6}
  39. $ kafka-ingest format=avro topic=data partition=2 schema=${schema}
  40. {"a": 10}
  41. {"a": 1}
  42. {"a": 7}
  43. {"a": 8}
  44. $ kafka-ingest format=avro topic=data partition=3 schema=${schema}
  45. {"a": 6}
  46. > CREATE CONNECTION kafka_conn
  47. TO KAFKA (BROKER '${testdrive.kafka-addr}', SECURITY PROTOCOL PLAINTEXT);
  48. > CREATE SOURCE direct_source1
  49. IN CLUSTER ${arg.single-replica-cluster}
  50. FROM KAFKA CONNECTION kafka_conn (TOPIC 'testdrive-data-${testdrive.seed}')
  51. FORMAT AVRO USING SCHEMA '${schema}'
  52. WITH (TIMESTAMP INTERVAL '90ms');
  53. > CREATE SOURCE direct_source2
  54. IN CLUSTER ${arg.single-replica-cluster}
  55. FROM KAFKA CONNECTION kafka_conn (TOPIC 'testdrive-data-${testdrive.seed}')
  56. FORMAT AVRO USING SCHEMA '${schema}'
  57. WITH (TIMESTAMP INTERVAL '100ms');
  58. $ kafka-ingest format=avro topic=data partition=4 schema=${schema}
  59. {"a": 5}
  60. $ kafka-ingest format=avro topic=data partition=5 schema=${schema}
  61. {"a": 1}
  62. {"a": 10}
  63. {"a": 2}
  64. {"a": 5}
  65. $ kafka-ingest format=avro topic=data partition=6 schema=${schema}
  66. {"a": 9}
  67. {"a": 4}
  68. $ kafka-ingest format=avro topic=data partition=7 schema=${schema}
  69. {"a": 5}
  70. {"a": 8}
  71. {"a": 1}
  72. > CREATE MATERIALIZED VIEW direct_view1a AS SELECT a + 0 AS a FROM direct_source1;
  73. # Sleep so that the views are not all created at the same time
  74. > SELECT mz_unsafe.mz_sleep(2);
  75. <null>
  76. > CREATE MATERIALIZED VIEW direct_view1b AS SELECT a + 0 AS a FROM direct_source1;
  77. > CREATE MATERIALIZED VIEW derived_view1a AS SELECT a + 0 AS a FROM direct_view1a;
  78. > SELECT mz_unsafe.mz_sleep(2);
  79. <null>
  80. > CREATE MATERIALIZED VIEW derived_view1b AS SELECT a + 0 AS a FROM direct_view1b;
  81. > CREATE MATERIALIZED VIEW join_view1 AS SELECT a1.a + 0 AS a FROM direct_source1 AS a1, direct_view1a;
  82. > CREATE MATERIALIZED VIEW join_view2 AS SELECT a1.a + 0 AS a FROM direct_view1a AS a1, direct_view1b;
  83. > CREATE MATERIALIZED VIEW join_view3 AS SELECT a2.a + 0 AS a FROM direct_view1a, derived_view1b AS a2;
  84. $ kafka-ingest format=avro topic=data partition=8 schema=${schema}
  85. {"a": 2}
  86. {"a": 8}
  87. {"a": 5}
  88. {"a": 9}
  89. $ kafka-ingest format=avro topic=data partition=9 schema=${schema}
  90. {"a": 6}
  91. $ kafka-ingest format=avro topic=data partition=10 schema=${schema}
  92. {"a": 4}
  93. $ kafka-ingest format=avro topic=data partition=11 schema=${schema}
  94. {"a": 7}
  95. {"a": 2}
  96. {"a": 10}
  97. {"a": 1}
  98. > CREATE MATERIALIZED VIEW check_v1 AS SELECT a + 0 AS a FROM direct_source1 EXCEPT ALL SELECT a - 0 AS a FROM direct_source1;
  99. > CREATE MATERIALIZED VIEW check_v2 AS SELECT a + 0 AS a FROM direct_view1a EXCEPT ALL SELECT a - 0 AS a FROM direct_view1a;
  100. > CREATE MATERIALIZED VIEW check_v3 AS SELECT a + 0 AS a FROM direct_view1a EXCEPT ALL SELECT a - 0 AS a FROM direct_view1b;
  101. > CREATE MATERIALIZED VIEW check_v4 AS SELECT a + 0 AS a FROM derived_view1a EXCEPT ALL SELECT a - 0 AS a FROM derived_view1a;
  102. > CREATE MATERIALIZED VIEW check_v5 AS SELECT a + 0 AS a FROM derived_view1a EXCEPT ALL SELECT a - 0 AS a FROM derived_view1b;
  103. > CREATE MATERIALIZED VIEW check_v6 AS SELECT a + 0 AS a FROM join_view1 EXCEPT ALL SELECT a - 0 AS a FROM join_view2;
  104. > CREATE MATERIALIZED VIEW check_v7 AS SELECT a + 0 AS a FROM join_view2 EXCEPT ALL SELECT a - 0 AS a FROM join_view3;
  105. $ kafka-ingest format=avro topic=data partition=12 schema=${schema}
  106. {"a": 31}
  107. {"a": 32}
  108. $ kafka-ingest format=avro topic=data partition=13 schema=${schema}
  109. {"a": 33}
  110. {"a": 34}
  111. {"a": 35}
  112. $ kafka-ingest format=avro topic=data partition=14 schema=${schema}
  113. {"a": 36}
  114. {"a": 37}
  115. $ kafka-ingest format=avro topic=data partition=15 schema=${schema}
  116. {"a": 38}
  117. {"a": 39}
  118. {"a": 40}
  119. # Make sure that none of the 'EXCEPT ALL' views above has ever produced any records.
  120. # In case the environment has other replicas
  121. > SET cluster_replica = r1
  122. > SELECT COUNT(*) FROM mz_introspection.mz_records_per_dataflow WHERE name LIKE '%check_v%' AND records > 0;
  123. 0