fetch-tail-during-ingest.td 1.6 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950
  1. # Copyright Materialize, Inc. and contributors. All rights reserved.
  2. #
  3. # Use of this software is governed by the Business Source License
  4. # included in the LICENSE file at the root of this repository.
  5. #
  6. # As of the Change Date specified in that file, in accordance with
  7. # the Business Source License, use of this software will be governed
  8. # by the Apache License, Version 2.0.
  9. $ set-arg-default single-replica-cluster=quickstart
  10. #
  11. # Make sure that data that was ingested during the lifetime of a SUBSCRIBE cursor can be FETCH-ed
  12. #
  13. $ postgres-execute connection=postgres://mz_system:materialize@${testdrive.materialize-internal-sql-addr}
  14. ALTER SYSTEM SET min_timestamp_interval = '100ms'
  15. $set-regex match=\d{13} replacement=<TIMESTAMP>
  16. $ set int={"type": "record", "name": "field_int", "fields": [ {"name": "f1", "type": "int"} ] }
  17. $ kafka-create-topic topic=tail-fetch-during-ingest
  18. > CREATE CONNECTION kafka_conn
  19. TO KAFKA (BROKER '${testdrive.kafka-addr}', SECURITY PROTOCOL PLAINTEXT);
  20. > CREATE SOURCE fetch_during_ingest
  21. IN CLUSTER ${arg.single-replica-cluster}
  22. FROM KAFKA CONNECTION kafka_conn (TOPIC 'testdrive-tail-fetch-during-ingest-${testdrive.seed}')
  23. FORMAT AVRO USING SCHEMA '${int}'
  24. ENVELOPE NONE
  25. WITH (TIMESTAMP INTERVAL '100ms')
  26. > BEGIN
  27. > DECLARE c CURSOR FOR SUBSCRIBE fetch_during_ingest;
  28. $ kafka-ingest format=avro topic=tail-fetch-during-ingest schema=${int} timestamp=1
  29. {"f1": 123}
  30. > FETCH 1 c WITH (timeout='60s');
  31. <TIMESTAMP> 1 123
  32. $ kafka-ingest format=avro topic=tail-fetch-during-ingest schema=${int} timestamp=2
  33. {"f1": 234}
  34. # The row just inserted is ours to fetch
  35. > FETCH 1 c WITH (timeout='60s');
  36. <TIMESTAMP> 1 234