top-k-monotonic.td 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289
  1. # Copyright Materialize, Inc. and contributors. All rights reserved.
  2. #
  3. # Use of this software is governed by the Business Source License
  4. # included in the LICENSE file at the root of this repository.
  5. #
  6. # As of the Change Date specified in that file, in accordance with
  7. # the Business Source License, use of this software will be governed
  8. # by the Apache License, Version 2.0.
  9. $ set-arg-default single-replica-cluster=quickstart
  10. $ postgres-execute connection=postgres://mz_system:materialize@${testdrive.materialize-internal-sql-addr}
  11. ALTER SYSTEM SET enable_expressions_in_limit_syntax TO true;
  12. # Test monotonic top-k processing with k > 1.
  13. $ set non-dbz-schema={
  14. "type": "record",
  15. "name": "cpx",
  16. "fields": [
  17. {"name": "a", "type": [ "null", "long" ]},
  18. {"name": "b", "type": "long"}
  19. ]
  20. }
  21. $ kafka-create-topic topic=non-dbz-data
  22. $ kafka-ingest format=avro topic=non-dbz-data schema=${non-dbz-schema} timestamp=1
  23. {"a": {"long": 1}, "b": 1}
  24. {"a": {"long": 1}, "b": 2}
  25. {"a": {"long": 1}, "b": 3}
  26. {"a": {"long": 1}, "b": 4}
  27. {"a": {"long": 1}, "b": 5}
  28. {"a": {"long": 2}, "b": 1000}
  29. {"a": {"long": 2}, "b": 1001}
  30. {"a": {"long": 2}, "b": 1002}
  31. {"a": {"long": 2}, "b": 1003}
  32. {"a": {"long": 2}, "b": 1004}
  33. {"a": {"long": 3}, "b": 2000}
  34. {"a": {"long": 3}, "b": 2000}
  35. {"a": {"long": 4}, "b": 3001}
  36. > CREATE CONNECTION kafka_conn
  37. TO KAFKA (BROKER '${testdrive.kafka-addr}', SECURITY PROTOCOL PLAINTEXT);
  38. > CREATE SOURCE non_dbz_data
  39. IN CLUSTER ${arg.single-replica-cluster}
  40. FROM KAFKA CONNECTION kafka_conn (TOPIC 'testdrive-non-dbz-data-${testdrive.seed}')
  41. FORMAT AVRO USING SCHEMA '${non-dbz-schema}'
  42. ENVELOPE NONE
  43. # Create a monotonic topk plan that has both a limit and a group to test that thinning works as expected
  44. > SELECT * FROM (SELECT DISTINCT a FROM non_dbz_data) grp, LATERAL (SELECT b FROM non_dbz_data WHERE a = grp.a ORDER BY b LIMIT 2);
  45. a b
  46. ---------
  47. 1 1
  48. 1 2
  49. 2 1000
  50. 2 1001
  51. 3 2000
  52. 3 2000
  53. 4 3001
  54. # Create a topk plan that has a limit expression that will initially not fail, but
  55. # then eventually fail after the addition of offending data to a monotonic input.
  56. # We observe that a non-monotonic top-k plan can recover from this condition after
  57. # the addition of even more data, but a monotonic top-k plan cannot.
  58. > CREATE VIEW v_basic AS
  59. SELECT *
  60. FROM
  61. (SELECT sum(a) AS s FROM non_dbz_data GROUP BY a) grp,
  62. LATERAL (
  63. SELECT b FROM non_dbz_data
  64. WHERE (grp.s IS NULL AND a IS NULL) OR a = grp.s
  65. ORDER BY b LIMIT 6 / abs(grp.s+2)
  66. );
  67. > CREATE VIEW v_monotonic AS
  68. SELECT *
  69. FROM
  70. (SELECT DISTINCT a::numeric FROM non_dbz_data) grp,
  71. LATERAL (
  72. SELECT b FROM non_dbz_data
  73. WHERE (grp.a IS NULL AND a IS NULL) OR a = grp.a
  74. ORDER BY b LIMIT 6 / abs(grp.a+2)
  75. );
  76. > CREATE DEFAULT INDEX ON v_basic;
  77. > CREATE DEFAULT INDEX ON v_monotonic;
  78. > SELECT * FROM v_basic ORDER BY s;
  79. s b
  80. ----
  81. 4 3001
  82. > SELECT * FROM v_monotonic ORDER BY a;
  83. a b
  84. ----
  85. 1 1
  86. 1 2
  87. 2 1000
  88. 2 1001
  89. 3 2000
  90. 4 3001
  91. $ kafka-ingest format=avro topic=non-dbz-data schema=${non-dbz-schema} timestamp=2
  92. {"a": {"long": 5}, "b": 4001}
  93. {"a": {"long": 5}, "b": 4002}
  94. {"a": null, "b": 0}
  95. {"a": null, "b": 1}
  96. > SELECT * FROM v_basic ORDER BY s;
  97. s b
  98. ----
  99. 4 3001
  100. 5 4001
  101. <null> 0
  102. <null> 1
  103. > SELECT * FROM v_monotonic ORDER BY a;
  104. a b
  105. ----
  106. 1 1
  107. 1 2
  108. 2 1000
  109. 2 1001
  110. 3 2000
  111. 4 3001
  112. 5 4001
  113. <null> 0
  114. <null> 1
  115. $ kafka-ingest format=avro topic=non-dbz-data schema=${non-dbz-schema} timestamp=3
  116. {"a": {"long": -1}, "b": -1}
  117. {"a": {"long": -1}, "b": -2}
  118. {"a": {"long": -2}, "b": -1001}
  119. ! SELECT * FROM v_basic ORDER BY s;
  120. contains:division by zero
  121. ! SELECT * FROM v_monotonic ORDER BY a;
  122. contains:division by zero
  123. $ kafka-ingest format=avro topic=non-dbz-data schema=${non-dbz-schema} timestamp=4
  124. {"a": {"long": -1}, "b": -3}
  125. {"a": {"long": -2}, "b": -1002}
  126. {"a": {"long": -3}, "b": -2001}
  127. > SELECT * FROM v_basic ORDER BY s;
  128. s b
  129. ----
  130. -3 -2001
  131. -3 -2001
  132. 4 3001
  133. 5 4001
  134. <null> 0
  135. <null> 1
  136. ! SELECT * FROM v_monotonic ORDER BY a;
  137. contains:division by zero
  138. > DROP VIEW v_basic;
  139. > DROP VIEW v_monotonic;
  140. # The following tests repeat the scenario in database-issues#5442.
  141. $ set other-non-dbz-schema={
  142. "type": "record",
  143. "name": "cpx",
  144. "fields": [
  145. {"name": "a", "type": "long" },
  146. {"name": "b", "type": "long"}
  147. ]
  148. }
  149. $ kafka-create-topic topic=other-non-dbz-data
  150. $ kafka-ingest format=avro topic=other-non-dbz-data schema=${other-non-dbz-schema} timestamp=1
  151. {"a": 1, "b": 42}
  152. {"a": 2, "b": 42}
  153. {"a": 3, "b": 42}
  154. {"a": 4, "b": 42}
  155. {"a": 5, "b": 42}
  156. {"a": 6, "b": 42}
  157. {"a": 7, "b": 42}
  158. {"a": 8, "b": 42}
  159. {"a": 9, "b": 42}
  160. {"a": 10, "b": 42}
  161. {"a": 11, "b": 42}
  162. {"a": 12, "b": 42}
  163. {"a": 13, "b": 42}
  164. {"a": 14, "b": 42}
  165. {"a": 15, "b": 42}
  166. {"a": 16, "b": 42}
  167. {"a": 17, "b": 42}
  168. {"a": 18, "b": 42}
  169. {"a": 19, "b": 42}
  170. {"a": 20, "b": 42}
  171. {"a": 21, "b": 42}
  172. {"a": 22, "b": 42}
  173. {"a": 23, "b": 42}
  174. {"a": 24, "b": 42}
  175. {"a": 25, "b": 42}
  176. {"a": 26, "b": 42}
  177. {"a": 27, "b": 42}
  178. {"a": 28, "b": 42}
  179. {"a": 29, "b": 42}
  180. {"a": 30, "b": 42}
  181. {"a": 31, "b": 42}
  182. {"a": 32, "b": 42}
  183. {"a": 33, "b": 42}
  184. {"a": 34, "b": 42}
  185. {"a": 35, "b": 42}
  186. {"a": 36, "b": 42}
  187. {"a": 37, "b": 42}
  188. {"a": 38, "b": 42}
  189. {"a": 39, "b": 42}
  190. > CREATE SOURCE other_non_dbz_data
  191. IN CLUSTER ${arg.single-replica-cluster}
  192. FROM KAFKA CONNECTION kafka_conn (TOPIC 'testdrive-other-non-dbz-data-${testdrive.seed}')
  193. FORMAT AVRO USING SCHEMA '${other-non-dbz-schema}'
  194. ENVELOPE NONE
  195. > SELECT sum(a) FROM (SELECT a FROM other_non_dbz_data ORDER BY b LIMIT 37);
  196. sum
  197. ----
  198. 703
  199. > CREATE VIEW v_other AS
  200. SELECT a FROM other_non_dbz_data ORDER BY b LIMIT 37;
  201. > CREATE DEFAULT INDEX ON v_other;
  202. > SELECT * FROM v_other;
  203. a
  204. ----
  205. 1
  206. 2
  207. 3
  208. 4
  209. 5
  210. 6
  211. 7
  212. 8
  213. 9
  214. 10
  215. 11
  216. 12
  217. 13
  218. 14
  219. 15
  220. 16
  221. 17
  222. 18
  223. 19
  224. 20
  225. 21
  226. 22
  227. 23
  228. 24
  229. 25
  230. 26
  231. 27
  232. 28
  233. 29
  234. 30
  235. 31
  236. 32
  237. 33
  238. 34
  239. 35
  240. 36
  241. 37
  242. # Check arrangements, seeing new arrangements can mean a significant increase
  243. # in memory consumptions and should be understood before adapting the values.
  244. > SET cluster_replica = r1
  245. >[version>=15000] SELECT mdod.dataflow_name, mdod.name
  246. FROM mz_introspection.mz_arrangement_sharing mash
  247. JOIN mz_introspection.mz_dataflow_operator_dataflows mdod ON mash.operator_id = mdod.id
  248. JOIN mz_introspection.mz_compute_exports USING (dataflow_id)
  249. WHERE export_id LIKE 'u%'
  250. "Dataflow: materialize.public.v_other_primary_idx" "ArrangeBy[[Column(0, \"a\")]]"
  251. "Dataflow: materialize.public.v_other_primary_idx" "ArrangeBy[[Column(0, \"a\")]]-errors"
  252. "Dataflow: materialize.public.v_other_primary_idx" "Arranged TopK input"
  253. "Dataflow: materialize.public.v_other_primary_idx" "Reduced TopK input"