dates-times.td 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186
  1. # Copyright Materialize, Inc. and contributors. All rights reserved.
  2. #
  3. # Use of this software is governed by the Business Source License
  4. # included in the LICENSE file at the root of this repository.
  5. #
  6. # As of the Change Date specified in that file, in accordance with
  7. # the Business Source License, use of this software will be governed
  8. # by the Apache License, Version 2.0.
  9. $ set-arg-default single-replica-cluster=quickstart
  10. # This test exercises dates and times at the boundary (e.g., by sending them
  11. # through pgwire). Operations on dates and times are more thoroughly tested in
  12. # dates-times.slt.
  13. $ set schema={
  14. "name": "row",
  15. "type": "record",
  16. "fields": [
  17. {
  18. "name": "d",
  19. "type": {
  20. "type": "int",
  21. "logicalType": "date"
  22. }
  23. },
  24. {
  25. "name": "ts_millis",
  26. "type": {
  27. "type": "long",
  28. "logicalType": "timestamp-millis"
  29. }
  30. },
  31. {
  32. "name": "ts_micros",
  33. "type": {
  34. "type": "long",
  35. "logicalType": "timestamp-micros"
  36. }
  37. }
  38. ]
  39. }
  40. $ kafka-create-topic topic=data
  41. $ kafka-ingest format=avro topic=data schema=${schema} timestamp=1
  42. {"d": 18135, "ts_millis": 1566865029000000, "ts_micros": 1566865029000000}
  43. {"d": 0, "ts_millis": 65000000, "ts_micros": 65000000}
  44. > CREATE CONNECTION kafka_conn
  45. TO KAFKA (BROKER '${testdrive.kafka-addr}', SECURITY PROTOCOL PLAINTEXT);
  46. > CREATE CONNECTION csr_conn TO CONFLUENT SCHEMA REGISTRY (
  47. URL '${testdrive.schema-registry-url}'
  48. );
  49. > CREATE SOURCE data
  50. IN CLUSTER ${arg.single-replica-cluster}
  51. FROM KAFKA CONNECTION kafka_conn (TOPIC 'testdrive-data-${testdrive.seed}')
  52. > CREATE TABLE data_tbl FROM SOURCE data (REFERENCE "testdrive-data-${testdrive.seed}")
  53. FORMAT AVRO USING SCHEMA '${schema}'
  54. > CREATE MATERIALIZED VIEW data_view as SELECT * from data_tbl
  55. > SELECT * FROM data_view
  56. 1970-01-01 "1970-01-01 18:03:20" "1970-01-01 00:01:05"
  57. 2019-08-27 "+51621-12-22 21:50:00" "2019-08-27 00:17:09"
  58. > CREATE SINK data_sink
  59. IN CLUSTER ${arg.single-replica-cluster}
  60. FROM data_tbl
  61. INTO KAFKA CONNECTION kafka_conn (TOPIC 'testdrive-data-roundtrip-${testdrive.seed}')
  62. KEY (d) NOT ENFORCED
  63. FORMAT AVRO USING CONFLUENT SCHEMA REGISTRY CONNECTION csr_conn
  64. ENVELOPE UPSERT
  65. $ schema-registry-verify schema-type=avro subject=testdrive-data-roundtrip-${testdrive.seed}-value
  66. {"type":"record","name":"envelope","fields":[{"name":"d","type":{"type":"int","logicalType":"date"}},{"name":"ts_millis","type":{"type":"long","logicalType":"timestamp-millis"}},{"name":"ts_micros","type":{"type":"long","logicalType":"timestamp-micros"}}]}
  67. $ kafka-verify-data format=avro sink=materialize.public.data_sink sort-messages=true
  68. {"d": 0} {"d": 0, "ts_millis": 65000000, "ts_micros": 65000000}
  69. {"d": 18135} {"d": 18135, "ts_millis": 1566865029000000, "ts_micros": 1566865029000000}
  70. > CREATE MATERIALIZED VIEW ts_precision AS
  71. SELECT
  72. TRUE AS key,
  73. '1970-01-01T00:00:00.123456'::timestamp AS ts,
  74. '1970-01-01T00:00:00.123456'::timestamp(0) AS ts0,
  75. '1970-01-01T00:00:00.123456'::timestamp(1) AS ts1,
  76. '1970-01-01T00:00:00.123456'::timestamp(2) AS ts2,
  77. '1970-01-01T00:00:00.123456'::timestamp(3) AS ts3,
  78. '1970-01-01T00:00:00.123456'::timestamp(4) AS ts4,
  79. '1970-01-01T00:00:00.123456'::timestamp(5) AS ts5,
  80. '1970-01-01T00:00:00.123456'::timestamp(6) AS ts6,
  81. '1970-01-01T00:00:00.123456'::timestamptz AS tstz,
  82. '1970-01-01T00:00:00.123456'::timestamptz(0) AS tstz0,
  83. '1970-01-01T00:00:00.123456'::timestamptz(1) AS tstz1,
  84. '1970-01-01T00:00:00.123456'::timestamptz(2) AS tstz2,
  85. '1970-01-01T00:00:00.123456'::timestamptz(3) AS tstz3,
  86. '1970-01-01T00:00:00.123456'::timestamptz(4) AS tstz4,
  87. '1970-01-01T00:00:00.123456'::timestamptz(5) AS tstz5,
  88. '1970-01-01T00:00:00.123456'::timestamptz(6) AS tstz6
  89. > CREATE SINK ts_precision_sink
  90. IN CLUSTER ${arg.single-replica-cluster}
  91. FROM ts_precision
  92. INTO KAFKA CONNECTION kafka_conn (TOPIC 'testdrive-ts-precision-${testdrive.seed}')
  93. KEY (key) NOT ENFORCED
  94. FORMAT AVRO USING CONFLUENT SCHEMA REGISTRY CONNECTION csr_conn
  95. ENVELOPE UPSERT
  96. $ schema-registry-verify schema-type=avro subject=testdrive-ts-precision-${testdrive.seed}-value
  97. {"type":"record","name":"envelope","fields":[{"name":"key","type":"boolean"},{"name":"ts","type":{"type":"long","logicalType":"timestamp-micros"}},{"name":"ts0","type":{"type":"long","logicalType":"timestamp-millis"}},{"name":"ts1","type":{"type":"long","logicalType":"timestamp-millis"}},{"name":"ts2","type":{"type":"long","logicalType":"timestamp-millis"}},{"name":"ts3","type":{"type":"long","logicalType":"timestamp-millis"}},{"name":"ts4","type":{"type":"long","logicalType":"timestamp-micros"}},{"name":"ts5","type":{"type":"long","logicalType":"timestamp-micros"}},{"name":"ts6","type":{"type":"long","logicalType":"timestamp-micros"}},{"name":"tstz","type":{"type":"long","logicalType":"timestamp-micros"}},{"name":"tstz0","type":{"type":"long","logicalType":"timestamp-millis"}},{"name":"tstz1","type":{"type":"long","logicalType":"timestamp-millis"}},{"name":"tstz2","type":{"type":"long","logicalType":"timestamp-millis"}},{"name":"tstz3","type":{"type":"long","logicalType":"timestamp-millis"}},{"name":"tstz4","type":{"type":"long","logicalType":"timestamp-micros"}},{"name":"tstz5","type":{"type":"long","logicalType":"timestamp-micros"}},{"name":"tstz6","type":{"type":"long","logicalType":"timestamp-micros"}}]}
  98. $ kafka-verify-data format=avro sink=materialize.public.ts_precision_sink sort-messages=true
  99. {"key": true} {"key": true, "ts": 123456, "ts0": 0, "ts1": 100, "ts2": 120, "ts3": 123, "ts4": 123500, "ts5": 123460, "ts6": 123456, "tstz": 123456, "tstz0": 0, "tstz1": 100, "tstz2": 120, "tstz3": 123, "tstz4": 123500, "tstz5": 123460, "tstz6": 123456}
  100. ! SELECT '1970-01-01T00:00:00.123456'::timestamp(-1)
  101. contains:precision for type timestamp or timestamptz must be between 0 and 6
  102. ! SELECT '1970-01-01T00:00:00.123456'::timestamp(7)
  103. contains:precision for type timestamp or timestamptz must be between 0 and 6
  104. > SELECT INTERVAL '1' SECOND
  105. "00:00:01"
  106. > SELECT INTERVAL '2' MINUTE
  107. "00:02:00"
  108. > SELECT INTERVAL '3' HOUR
  109. "03:00:00"
  110. > SELECT INTERVAL '1' DAY
  111. "1 day"
  112. > SELECT INTERVAL '6' MONTH
  113. "6 months"
  114. > SELECT INTERVAL '10' YEAR
  115. "10 years"
  116. > SELECT TIMESTAMP WITH TIME ZONE '1989-06-01 10:10:10.410+04:00'
  117. "1989-06-01 06:10:10.410 UTC"
  118. > SELECT TIMESTAMPTZ '1989-06-01 9:10:10.410+07:00'
  119. "1989-06-01 02:10:10.410 UTC"
  120. # microseconds are returned with nanosecond precision in the binary format
  121. > SELECT TIMESTAMPTZ '1989-06-01 06:10:10.12345678+00:00'
  122. "1989-06-01 06:10:10.123457 UTC"
  123. > SELECT TIMESTAMP WITHOUT TIME ZONE '1989-06-01 10:10:10.410+04:00'
  124. "1989-06-01 10:10:10.410"
  125. > SELECT TIMESTAMP '1989-06-01 10:10:10.410+04:00'
  126. "1989-06-01 10:10:10.410"
  127. > SELECT TIMESTAMPTZ '1989-06-01 9:10:10.410+7'
  128. "1989-06-01 02:10:10.410 UTC"
  129. > SELECT TIMESTAMPTZ '1989-06-01 9:10:10.410+700'
  130. "1989-06-01 02:10:10.410 UTC"
  131. > SELECT '1989-06-01 10:10:10.410+04:00'::timestamptz::text
  132. "1989-06-01 06:10:10.41+00"
  133. > SELECT '1989-06-01 10:10:10.413+04:00'::timestamptz::text
  134. "1989-06-01 06:10:10.413+00"
  135. # The text format should only ever return microseconds
  136. > SELECT '1989-06-01 10:10:10.12345678+04:00'::timestamptz::text
  137. "1989-06-01 06:10:10.123457+00"
  138. > SELECT '1989-06-01 10:10:10.1234564+04:00'::timestamptz::text
  139. "1989-06-01 06:10:10.123456+00"
  140. # Timestamp columns with precisions
  141. > DROP TABLE IF EXISTS temporal_types;
  142. > CREATE TABLE temporal_types (timestamp_col TIMESTAMP, timestamptz_col TIMESTAMPTZ, timestamp_prec_col TIMESTAMP(3), timestamptz_prec_col TIMESTAMPTZ(1));
  143. > INSERT INTO temporal_types VALUES ('2010-10-10 10:10:10.123456789+00','2010-10-10 10:10:10.123456789+00', '2010-10-10 10:10:10.123456789+00','2010-10-10 10:10:10.123456789+00');
  144. > SELECT * FROM temporal_types;
  145. "2010-10-10 10:10:10.123457" "2010-10-10 10:10:10.123457 UTC" "2010-10-10 10:10:10.123" "2010-10-10 10:10:10.100 UTC"