load-generator.td 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218
  1. # Copyright Materialize, Inc. and contributors. All rights reserved.
  2. #
  3. # Use of this software is governed by the Business Source License
  4. # included in the LICENSE file at the root of this repository.
  5. #
  6. # As of the Change Date specified in that file, in accordance with
  7. # the Business Source License, use of this software will be governed
  8. # by the Apache License, Version 2.0.
  9. $ set-arg-default default-replica-size=1
  10. $ set-arg-default single-replica-cluster=quickstart
  11. $ postgres-execute connection=postgres://mz_system:materialize@${testdrive.materialize-internal-sql-addr}
  12. ALTER SYSTEM SET enable_clock_load_generator = true;
  13. > CREATE SOURCE counter_empty
  14. IN CLUSTER ${arg.single-replica-cluster}
  15. FROM LOAD GENERATOR COUNTER (AS OF 5, UP TO 5)
  16. > SELECT count(*) FROM counter_empty
  17. 0
  18. > CREATE SOURCE counter_single
  19. IN CLUSTER ${arg.single-replica-cluster}
  20. FROM LOAD GENERATOR COUNTER (AS OF 0, UP TO 1)
  21. > SELECT count(*) FROM counter_single
  22. 1
  23. > CREATE SOURCE counter_five
  24. IN CLUSTER ${arg.single-replica-cluster}
  25. FROM LOAD GENERATOR COUNTER (AS OF 4, UP TO 5)
  26. > SELECT count(*) FROM counter_five
  27. 5
  28. ! CREATE SOURCE counter
  29. IN CLUSTER ${arg.single-replica-cluster}
  30. FROM LOAD GENERATOR COUNTER (AS OF 5, UP TO 4)
  31. contains:UP TO cannot be less than AS OF
  32. ! CREATE SOURCE counter
  33. IN CLUSTER ${arg.single-replica-cluster}
  34. FROM LOAD GENERATOR COUNTER (SCALE FACTOR 1)
  35. exact:COUNTER load generators do not support SCALE FACTOR values
  36. > DROP SOURCE counter_empty, counter_single, counter_five
  37. > CREATE SOURCE auction_house
  38. IN CLUSTER ${arg.single-replica-cluster}
  39. FROM LOAD GENERATOR AUCTION (AS OF 300, UP TO 301);
  40. $ skip-if
  41. SELECT mz_version_num() < 14400;
  42. # Error if trying to create with subsources
  43. ! CREATE SOURCE g FROM LOAD GENERATOR COUNTER FOR ALL TABLES;
  44. contains:FOR ALL TABLES is only valid for multi-output sources
  45. ! CREATE SOURCE g FROM LOAD GENERATOR CLOCK FOR ALL TABLES;
  46. contains:FOR ALL TABLES is only valid for multi-output sources
  47. ! CREATE SOURCE g FROM LOAD GENERATOR DATUMS FOR ALL TABLES;
  48. contains:FOR ALL TABLES is only valid for multi-output sources
  49. ! CREATE SOURCE g FROM LOAD GENERATOR COUNTER FOR TABLES ("foo");
  50. regex:.*FOR TABLES.*unsupported
  51. ! CREATE SOURCE g FROM LOAD GENERATOR CLOCK FOR TABLES ("foo");
  52. regex:.*FOR TABLES.*unsupported
  53. ! CREATE SOURCE g FROM LOAD GENERATOR DATUMS FOR TABLES ("foo");
  54. regex:.*FOR TABLES.*unsupported
  55. ! CREATE SOURCE g FROM LOAD GENERATOR COUNTER FOR SCHEMAS ("foo");
  56. regex:.*FOR SCHEMAS.*unsupported
  57. ! CREATE SOURCE g FROM LOAD GENERATOR CLOCK FOR SCHEMAS ("foo");
  58. regex:.*FOR SCHEMAS.*unsupported
  59. ! CREATE SOURCE g FROM LOAD GENERATOR DATUMS FOR SCHEMAS ("foo");
  60. regex:.*FOR SCHEMAS.*unsupported
  61. # skip-end
  62. > CREATE TABLE accounts FROM SOURCE auction_house (REFERENCE accounts);
  63. > CREATE TABLE auctions FROM SOURCE auction_house (REFERENCE auctions);
  64. > CREATE TABLE bids FROM SOURCE auction_house (REFERENCE bids);
  65. > CREATE TABLE organizations FROM SOURCE auction_house (REFERENCE organizations);
  66. > CREATE TABLE users FROM SOURCE auction_house (REFERENCE users);
  67. > SHOW SOURCES
  68. auction_house load-generator ${arg.single-replica-cluster} ""
  69. auction_house_progress progress <null> ""
  70. > SHOW TABLES
  71. accounts ""
  72. auctions ""
  73. bids ""
  74. organizations ""
  75. users ""
  76. > SELECT count(*) FROM bids
  77. 255
  78. ! CREATE SOURCE auction_house
  79. IN CLUSTER ${arg.single-replica-cluster}
  80. FROM LOAD GENERATOR AUCTION FOR TABLES (user);
  81. contains:reference to user not found in source
  82. > CREATE SCHEMA another;
  83. > CREATE SOURCE another.auction_house
  84. IN CLUSTER ${arg.single-replica-cluster}
  85. FROM LOAD GENERATOR AUCTION;
  86. > CREATE TABLE another.accounts FROM SOURCE another.auction_house (REFERENCE accounts);
  87. > CREATE TABLE another.auctions FROM SOURCE another.auction_house (REFERENCE auctions);
  88. > CREATE TABLE another.bids FROM SOURCE another.auction_house (REFERENCE bids);
  89. > CREATE TABLE another.organizations FROM SOURCE another.auction_house (REFERENCE organizations);
  90. > CREATE TABLE another.users FROM SOURCE another.auction_house (REFERENCE users);
  91. > SHOW SOURCES FROM another;
  92. auction_house load-generator ${arg.single-replica-cluster} ""
  93. auction_house_progress progress <null> ""
  94. > SHOW TABLES FROM another;
  95. accounts ""
  96. auctions ""
  97. bids ""
  98. organizations ""
  99. users ""
  100. > CREATE CONNECTION IF NOT EXISTS kafka_conn TO KAFKA (BROKER '${testdrive.kafka-addr}', SECURITY PROTOCOL PLAINTEXT);
  101. # Validate that the ID column of the load generator data is usable as a key.
  102. > CREATE SINK accounts_sink
  103. IN CLUSTER ${arg.single-replica-cluster}
  104. FROM accounts
  105. INTO KAFKA CONNECTION kafka_conn (TOPIC 'testdrive-accounts-${testdrive.seed}')
  106. KEY (id)
  107. FORMAT JSON
  108. ENVELOPE UPSERT;
  109. $ set-regex match="DETAILS = '[a-f0-9]+'" replacement=<DETAILS>
  110. # CLOCK load generator source
  111. > CREATE SOURCE clock
  112. IN CLUSTER ${arg.single-replica-cluster}
  113. FROM LOAD GENERATOR CLOCK (TICK INTERVAL '1s')
  114. > SELECT count(*) FROM clock;
  115. 1
  116. > SELECT time < now() + INTERVAL '5s', time > now() - INTERVAL '5s' FROM clock
  117. true true
  118. # Check that non-append-only `COUNTER` sources reach the proper size
  119. > CREATE SOURCE counter
  120. IN CLUSTER ${arg.single-replica-cluster}
  121. FROM LOAD GENERATOR COUNTER (MAX CARDINALITY 8, TICK INTERVAL '0.001s')
  122. > SELECT count(*) FROM counter
  123. 8
  124. # Now make sure it doesn't change
  125. > SELECT mz_unsafe.mz_sleep(1)
  126. <null>
  127. > SELECT count(*) FROM counter
  128. 8
  129. # Check that negative max cardinalities are rejected
  130. ! CREATE SOURCE counter2
  131. IN CLUSTER ${arg.single-replica-cluster}
  132. FROM LOAD GENERATOR COUNTER (MAX CARDINALITY -1)
  133. contains:invalid MAX CARDINALITY: invalid unsigned numeric value: invalid digit found in string
  134. > CREATE SOURCE counter3
  135. IN CLUSTER ${arg.single-replica-cluster}
  136. FROM LOAD GENERATOR COUNTER (MAX CARDINALITY 0)
  137. > SELECT count(*) FROM counter3
  138. 0
  139. > SELECT mz_unsafe.mz_sleep(1)
  140. <null>
  141. > SELECT count(*) FROM counter3
  142. 0
  143. # Check that negative tick intervals are rejected
  144. ! CREATE SOURCE counter4
  145. IN CLUSTER ${arg.single-replica-cluster}
  146. FROM LOAD GENERATOR COUNTER (TICK INTERVAL '-1s')
  147. contains:invalid TICK INTERVAL: cannot convert negative interval to duration
  148. # Check that out of range tick interval values are rejected
  149. ! CREATE SOURCE counter5
  150. IN CLUSTER ${arg.single-replica-cluster}
  151. FROM LOAD GENERATOR COUNTER (TICK INTERVAL '2147483647d')
  152. contains: out of range integral type conversion
  153. # Query automatically generated progress topic
  154. $ set-regex match=\d+ replacement=<NUMBER>
  155. > SELECT "offset" FROM another.auction_house_progress
  156. <NUMBER>
  157. # Ensure we report the write frontier of the progress subsource
  158. $ set-regex match=(\s{12}0|\d{13,20}|u\d{1,5}|\(\d+-\d\d-\d\d\s\d\d:\d\d:\d\d\.\d\d\d\)|true|false) replacement=<>
  159. > EXPLAIN TIMESTAMP FOR SELECT * FROM another.auction_house_progress
  160. " query timestamp: <> <>\n oracle read timestamp: <> <>\nlargest not in advance of upper: <> <>\n upper:[<> <>]\n since:[<> <>]\n can respond immediately: <>\n timeline: Some(EpochMilliseconds)\n session wall time: <> <>\n\nsource materialize.another.auction_house_progress (<>, storage):\n read frontier:[<> <>]\n write frontier:[<> <>]\n\nbinding constraints:\nlower:\n (IsolationLevel(StrictSerializable)): [<> <>]\n"
  161. > DROP SOURCE auction_house CASCADE
  162. > DROP SOURCE another.auction_house CASCADE
  163. > DROP SOURCE counter CASCADE
  164. > DROP SOURCE counter3 CASCADE