123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160 |
- # Copyright Materialize, Inc. and contributors. All rights reserved.
- #
- # Use of this software is governed by the Business Source License
- # included in the LICENSE file at the root of this repository.
- #
- # As of the Change Date specified in that file, in accordance with
- # the Business Source License, use of this software will be governed
- # by the Apache License, Version 2.0.
- # Tests for COPY FROM expr.
- # COPY FROM expressions should immediately succeed or fail on their first runs
- $ set-max-tries max-tries=1
- $ postgres-execute connection=postgres://mz_system:materialize@${testdrive.materialize-internal-sql-addr}
- ALTER SYSTEM SET enable_copy_from_remote = true;
- # Prepare the table we want to COPY INTO.
- > CREATE TABLE t1 (a text, b text);
- $ s3-file-upload bucket=copytos3 key=csv/1.csv repeat=2
- none,100
- $ s3-set-presigned-url bucket=copytos3 key=csv/1.csv var-name=1_csv_url
- > COPY INTO t1 FROM '${1_csv_url}' (FORMAT CSV);
- > SELECT * FROM t1;
- none 100
- none 100
- # gzip Compression.
- $ s3-file-upload bucket=copytos3 key=csv/2.csv.gz repeat=2 compression=gzip
- gzip,200
- $ s3-set-presigned-url bucket=copytos3 key=csv/2.csv.gz var-name=2_csv_url
- > COPY INTO t1 FROM '${2_csv_url}' (FORMAT CSV);
- > SELECT * FROM t1;
- gzip 200
- gzip 200
- none 100
- none 100
- # bzip2 Compression.
- $ s3-file-upload bucket=copytos3 key=csv/3.csv.bz2 repeat=2 compression=bzip2
- bzip2,300
- $ s3-set-presigned-url bucket=copytos3 key=csv/3.csv.bz2 var-name=3_csv_url
- > COPY INTO t1 FROM '${3_csv_url}' (FORMAT CSV);
- > SELECT * FROM t1;
- bzip2 300
- bzip2 300
- gzip 200
- gzip 200
- none 100
- none 100
- # xz Compression.
- $ s3-file-upload bucket=copytos3 key=csv/4.csv.xz repeat=2 compression=xz
- xz,400
- $ s3-set-presigned-url bucket=copytos3 key=csv/4.csv.xz var-name=4_csv_url
- > COPY INTO t1 FROM '${4_csv_url}' (FORMAT CSV);
- > SELECT * FROM t1;
- bzip2 300
- bzip2 300
- gzip 200
- gzip 200
- none 100
- none 100
- xz 400
- xz 400
- # zstd Compression.
- $ s3-file-upload bucket=copytos3 key=csv/5.csv.zst repeat=2 compression=zstd
- zstd,500
- $ s3-set-presigned-url bucket=copytos3 key=csv/5.csv.zst var-name=5_csv_url
- > COPY INTO t1 FROM '${5_csv_url}' (FORMAT CSV);
- > SELECT * FROM t1;
- bzip2 300
- bzip2 300
- gzip 200
- gzip 200
- none 100
- none 100
- xz 400
- xz 400
- zstd 500
- zstd 500
- # Map and Project.
- > CREATE TABLE t2 (a text DEFAULT 'hello', b text, c text);
- $ s3-file-upload bucket=copytos3 key=default_vals.csv repeat=5
- world
- $ s3-set-presigned-url bucket=copytos3 key=default_vals.csv var-name=default_vals_csv_url
- > COPY INTO t2 (b) FROM '${default_vals_csv_url}' (FORMAT CSV);
- > SELECT * FROM t2;
- hello world <null>
- hello world <null>
- hello world <null>
- hello world <null>
- hello world <null>
- # Test the AWS Source.
- > CREATE SECRET aws_secret AS '${arg.aws-secret-access-key}'
- > CREATE CONNECTION aws_conn
- TO AWS (
- ACCESS KEY ID = '${arg.aws-access-key-id}',
- SECRET ACCESS KEY = SECRET aws_secret,
- ENDPOINT = '${arg.aws-endpoint}',
- REGION = 'us-east-1'
- );
- # Test glob patterns.
- > CREATE TABLE t3 (a text, b text);
- > COPY INTO t3 FROM 's3://copytos3' (FORMAT CSV, AWS CONNECTION = aws_conn, PATTERN = "csv/**");
- > SELECT * FROM t3;
- bzip2 300
- bzip2 300
- gzip 200
- gzip 200
- none 100
- none 100
- xz 400
- xz 400
- zstd 500
- zstd 500
- # Test explicit file lists.
- > CREATE TABLE t4 (a text, b text);
- > COPY INTO t4 FROM 's3://copytos3/csv' (FORMAT CSV, AWS CONNECTION = aws_conn, FILES = ["csv/1.csv"]);
- > SELECT * FROM t4;
- none 100
- none 100
|