copy-to-s3.td 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129
  1. # Copyright Materialize, Inc. and contributors. All rights reserved.
  2. #
  3. # Use of this software is governed by the Business Source License
  4. # included in the LICENSE file at the root of this repository.
  5. #
  6. # As of the Change Date specified in that file, in accordance with
  7. # the Business Source License, use of this software will be governed
  8. # by the Apache License, Version 2.0.
  9. # Tests for COPY TO expr.
  10. $ postgres-execute connection=postgres://mz_system:materialize@${testdrive.materialize-internal-sql-addr}
  11. ALTER SYSTEM SET enable_copy_to_expr = true;
  12. # Prepare table data
  13. > CREATE TABLE t (a int);
  14. > INSERT INTO t VALUES (1), (2), (3), (4), (5), (6), (7), (8), (9), (0);
  15. > CREATE SECRET aws_secret_access_key as '${arg.secret-key}';
  16. > CREATE CONNECTION aws_conn
  17. TO AWS (
  18. ACCESS KEY ID = '${arg.access-key}',
  19. SECRET ACCESS KEY = SECRET aws_secret_access_key,
  20. ENDPOINT = '${arg.endpoint}',
  21. REGION = '${arg.region}'
  22. );
  23. ! COPY t TO 's3://path/to/dir' WITH (FORMAT = 'csv');
  24. contains:AWS CONNECTION is required for COPY ... TO <expr>
  25. ! COPY t TO 's3://path/to/dir';
  26. contains:COPY TO <expr> requires a FORMAT option
  27. ! COPY t TO 's3://path/to/dir'
  28. WITH (
  29. AWS CONNECTION = aws_conn,
  30. FORMAT = 'text'
  31. );
  32. contains:FORMAT TEXT not yet supported
  33. ! COPY t TO 's3://path/to/dir'
  34. WITH (
  35. AWS CONNECTION = aws_conn,
  36. FORMAT = 'binary'
  37. );
  38. contains:FORMAT BINARY not yet supported
  39. ! COPY t TO '/path/'
  40. WITH (
  41. AWS CONNECTION = aws_conn,
  42. FORMAT = 'csv'
  43. );
  44. contains:only 's3://...' urls are supported as COPY TO target
  45. ! COPY t TO NULL
  46. WITH (
  47. AWS CONNECTION = aws_conn,
  48. FORMAT = 'csv'
  49. );
  50. contains:COPY TO target value can not be null
  51. ! COPY t TO 1234
  52. WITH (
  53. AWS CONNECTION = aws_conn,
  54. FORMAT = 'csv'
  55. );
  56. contains:COPY TO target must have type text, not type integer
  57. ! COPY (SELECT * FROM t ORDER BY 1) TO NULL
  58. WITH (
  59. AWS CONNECTION = aws_conn,
  60. FORMAT = 'csv'
  61. );
  62. contains:ORDER BY is not supported in SELECT query for COPY statements
  63. ! COPY t TO 's3://path/to/dir'
  64. WITH (
  65. AWS CONNECTION = aws_conn,
  66. FORMAT = 'csv',
  67. MAX FILE SIZE = '1kB'
  68. );
  69. contains:MAX FILE SIZE cannot be less than 16MB
  70. # Creating cluster with multiple replicas, each with multiple workers
  71. > CREATE CLUSTER c1 REPLICAS (r1 (size '2'), r2 (size '1'));
  72. > SET cluster = c1;
  73. # functions like now() should work in the s3 path
  74. > COPY t TO 's3://${arg.s3-prefix}/1/' || TO_CHAR(now(), 'YYYY-MM-DD')
  75. WITH (
  76. AWS CONNECTION = aws_conn,
  77. MAX FILE SIZE = "100MB",
  78. FORMAT = 'csv'
  79. );
  80. > SELECT a FROM t
  81. 0
  82. 1
  83. 2
  84. 3
  85. 4
  86. 5
  87. 6
  88. 7
  89. 8
  90. 9
  91. > COPY (SELECT a FROM t) TO 's3://${arg.s3-prefix}/2/'
  92. WITH (
  93. AWS CONNECTION = aws_conn,
  94. MAX FILE SIZE = "100MB",
  95. FORMAT = 'csv'
  96. );
  97. > COPY (SELECT 1000) TO 's3://${arg.s3-prefix}/3/'
  98. WITH (
  99. AWS CONNECTION = aws_conn,
  100. MAX FILE SIZE = "100MB",
  101. FORMAT = 'csv'
  102. );
  103. > COPY (SELECT generate_series(1, 1000000)) TO 's3://${arg.s3-prefix}/4/'
  104. WITH (
  105. AWS CONNECTION = aws_conn,
  106. MAX FILE SIZE = "100MB",
  107. FORMAT = 'csv'
  108. );
  109. > DROP CLUSTER c1 CASCADE;