Fix flaky postgres test (#7228)
* Fix flaky postgres test * add pgcrypto extension to test * fix postgres ref link CONTRIBUTING guide and add pgcrypto extension * Update CONTRIBUTING.md
This commit is contained in:
@@ -112,7 +112,7 @@ Once you have babel running in watch mode, you can start making changes to parse
|
|||||||
|
|
||||||
If your pull request introduces a change that may affect the storage or retrieval of objects, you may want to make sure it plays nice with Postgres.
|
If your pull request introduces a change that may affect the storage or retrieval of objects, you may want to make sure it plays nice with Postgres.
|
||||||
|
|
||||||
* Run the tests against the postgres database with `PARSE_SERVER_TEST_DB=postgres PARSE_SERVER_TEST_DATABASE_URI=postgres://postgres:password@localhost:5432/parse_server_postgres_adapter_test_database npm run testonly`. You'll need to have postgres running on your machine and setup [appropriately](https://github.com/parse-community/parse-server/blob/master/.travis.yml#L43) or use [`Docker`](#run-a-parse-postgres-with-docker).
|
* Run the tests against the postgres database with `PARSE_SERVER_TEST_DB=postgres PARSE_SERVER_TEST_DATABASE_URI=postgres://postgres:password@localhost:5432/parse_server_postgres_adapter_test_database npm run testonly`. You'll need to have postgres running on your machine and setup [appropriately](https://github.com/parse-community/parse-server/blob/master/scripts/before_script_postgres.sh) or use [`Docker`](#run-a-parse-postgres-with-docker).
|
||||||
* The Postgres adapter has a special debugger that traces all the sql commands. You can enable it with setting the environment variable `PARSE_SERVER_LOG_LEVEL=debug`
|
* The Postgres adapter has a special debugger that traces all the sql commands. You can enable it with setting the environment variable `PARSE_SERVER_LOG_LEVEL=debug`
|
||||||
* If your feature is intended to only work with MongoDB, you should disable PostgreSQL-specific tests with:
|
* If your feature is intended to only work with MongoDB, you should disable PostgreSQL-specific tests with:
|
||||||
|
|
||||||
@@ -135,7 +135,7 @@ If your pull request introduces a change that may affect the storage or retrieva
|
|||||||
[PostGIS images (select one with v2.2 or higher) on docker dashboard](https://hub.docker.com/r/postgis/postgis) is based off of the official [postgres](https://registry.hub.docker.com/_/postgres/) image and will work out-of-the-box (as long as you create a user with the necessary extensions for each of your Parse databases; see below). To launch the compatible Postgres instance, copy and paste the following line into your shell:
|
[PostGIS images (select one with v2.2 or higher) on docker dashboard](https://hub.docker.com/r/postgis/postgis) is based off of the official [postgres](https://registry.hub.docker.com/_/postgres/) image and will work out-of-the-box (as long as you create a user with the necessary extensions for each of your Parse databases; see below). To launch the compatible Postgres instance, copy and paste the following line into your shell:
|
||||||
|
|
||||||
```
|
```
|
||||||
docker run -d --name parse-postgres -p 5432:5432 -e POSTGRES_PASSWORD=password --rm postgis/postgis:11-3.0-alpine && sleep 20 && docker exec -it parse-postgres psql -U postgres -c 'CREATE DATABASE parse_server_postgres_adapter_test_database;' && docker exec -it parse-postgres psql -U postgres -c 'CREATE EXTENSION postgis;' -d parse_server_postgres_adapter_test_database && docker exec -it parse-postgres psql -U postgres -c 'CREATE EXTENSION postgis_topology;' -d parse_server_postgres_adapter_test_database
|
docker run -d --name parse-postgres -p 5432:5432 -e POSTGRES_PASSWORD=password --rm postgis/postgis:11-3.0-alpine && sleep 20 && docker exec -it parse-postgres psql -U postgres -c 'CREATE DATABASE parse_server_postgres_adapter_test_database;' && docker exec -it parse-postgres psql -U postgres -c 'CREATE EXTENSION pgcrypto; CREATE EXTENSION postgis;' -d parse_server_postgres_adapter_test_database && docker exec -it parse-postgres psql -U postgres -c 'CREATE EXTENSION postgis_topology;' -d parse_server_postgres_adapter_test_database
|
||||||
```
|
```
|
||||||
To stop the Postgres instance:
|
To stop the Postgres instance:
|
||||||
|
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ echo "[SCRIPT] Before Script :: Setup Parse DB for Postgres"
|
|||||||
PGPASSWORD=postgres psql -v ON_ERROR_STOP=1 -h localhost -U postgres <<-EOSQL
|
PGPASSWORD=postgres psql -v ON_ERROR_STOP=1 -h localhost -U postgres <<-EOSQL
|
||||||
CREATE DATABASE parse_server_postgres_adapter_test_database;
|
CREATE DATABASE parse_server_postgres_adapter_test_database;
|
||||||
\c parse_server_postgres_adapter_test_database;
|
\c parse_server_postgres_adapter_test_database;
|
||||||
|
CREATE EXTENSION pgcrypto;
|
||||||
CREATE EXTENSION postgis;
|
CREATE EXTENSION postgis;
|
||||||
CREATE EXTENSION postgis_topology;
|
CREATE EXTENSION postgis_topology;
|
||||||
EOSQL
|
EOSQL
|
||||||
|
|||||||
@@ -165,7 +165,7 @@ describe_only_db('postgres')('PostgresStorageAdapter', () => {
|
|||||||
]);
|
]);
|
||||||
//Postgres won't take advantage of the index until it has a lot of records because sequential is faster for small db's
|
//Postgres won't take advantage of the index until it has a lot of records because sequential is faster for small db's
|
||||||
await client.none(
|
await client.none(
|
||||||
'INSERT INTO $1:name ($2:name, $3:name) SELECT MD5(random()::text), MD5(random()::text) FROM generate_series(1,5000)',
|
'INSERT INTO $1:name ($2:name, $3:name) SELECT gen_random_uuid(), gen_random_uuid() FROM generate_series(1,5000)',
|
||||||
[tableName, 'objectId', 'username']
|
[tableName, 'objectId', 'username']
|
||||||
);
|
);
|
||||||
const caseInsensitiveData = 'bugs';
|
const caseInsensitiveData = 'bugs';
|
||||||
@@ -245,7 +245,7 @@ describe_only_db('postgres')('PostgresStorageAdapter', () => {
|
|||||||
//Postgres won't take advantage of the index until it has a lot of records because sequential is faster for small db's
|
//Postgres won't take advantage of the index until it has a lot of records because sequential is faster for small db's
|
||||||
const client = adapter._client;
|
const client = adapter._client;
|
||||||
await client.none(
|
await client.none(
|
||||||
'INSERT INTO $1:name ($2:name, $3:name) SELECT MD5(random()::text), MD5(random()::text) FROM generate_series(1,5000)',
|
'INSERT INTO $1:name ($2:name, $3:name) SELECT gen_random_uuid(), gen_random_uuid() FROM generate_series(1,5000)',
|
||||||
[tableName, 'objectId', 'username']
|
[tableName, 'objectId', 'username']
|
||||||
);
|
);
|
||||||
const caseInsensitiveData = 'bugs';
|
const caseInsensitiveData = 'bugs';
|
||||||
@@ -303,7 +303,7 @@ describe_only_db('postgres')('PostgresStorageAdapter', () => {
|
|||||||
//Postgres won't take advantage of the index until it has a lot of records because sequential is faster for small db's
|
//Postgres won't take advantage of the index until it has a lot of records because sequential is faster for small db's
|
||||||
const client = adapter._client;
|
const client = adapter._client;
|
||||||
await client.none(
|
await client.none(
|
||||||
'INSERT INTO $1:name ($2:name, $3:name) SELECT MD5(random()::text), MD5(random()::text) FROM generate_series(1,5000)',
|
'INSERT INTO $1:name ($2:name, $3:name) SELECT gen_random_uuid(), gen_random_uuid() FROM generate_series(1,5000)',
|
||||||
[tableName, 'objectId', 'username']
|
[tableName, 'objectId', 'username']
|
||||||
);
|
);
|
||||||
|
|
||||||
@@ -345,11 +345,11 @@ describe_only_db('postgres')('PostgresStorageAdapter', () => {
|
|||||||
//Postgres won't take advantage of the index until it has a lot of records because sequential is faster for small db's
|
//Postgres won't take advantage of the index until it has a lot of records because sequential is faster for small db's
|
||||||
const client = adapter._client;
|
const client = adapter._client;
|
||||||
await client.none(
|
await client.none(
|
||||||
'INSERT INTO $1:name ($2:name, $3:name) SELECT MD5(random()::text), MD5(random()::text) FROM generate_series(1,5000)',
|
'INSERT INTO $1:name ($2:name, $3:name) SELECT gen_random_uuid(), gen_random_uuid() FROM generate_series(1,5000)',
|
||||||
[firstTableName, 'objectId', uniqueField]
|
[firstTableName, 'objectId', uniqueField]
|
||||||
);
|
);
|
||||||
await client.none(
|
await client.none(
|
||||||
'INSERT INTO $1:name ($2:name, $3:name) SELECT MD5(random()::text), MD5(random()::text) FROM generate_series(1,5000)',
|
'INSERT INTO $1:name ($2:name, $3:name) SELECT gen_random_uuid(), gen_random_uuid() FROM generate_series(1,5000)',
|
||||||
[secondTableName, 'objectId', uniqueField]
|
[secondTableName, 'objectId', uniqueField]
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user