Begin moving to postgres spilo + adding pgvector (#8309)
We will remove the `twenty-postgres` image that was used for local development and only use `twenty-postgres-pilo` (which we use in prod), bringing the development environment closer to prod and avoiding having to maintain 2 images. Instead of provisioning the super user after the db initialization, we directly rely on the superuser provided by Spilo for simplicity. We also introduce a change that tries to create the right database (`default` or `test`) based on the context. How to test: ``` docker build -t twentycrm/twenty-postgres-spilo:latest -f ./packages/twenty-docker/twenty-postgres-spilo/Dockerfile . docker images --no-trunc | grep twenty-postgres-spilo postgres-on-docker: docker run \ --name twenty_pg \ -e PGUSER_SUPERUSER=twenty \ -e PGPASSWORD_SUPERUSER=twenty \ -e ALLOW_NOSSL=true \ -v twenty_db_data:/home/postgres/pgdata \ -p 5432:5432 \ REPLACE_WITH_IMAGE_ID ```
This commit is contained in:
@ -1,5 +1,5 @@
|
||||
# Use this for local setup
|
||||
PG_DATABASE_URL=postgres://twenty:twenty@localhost:5432/default
|
||||
PG_DATABASE_URL=postgres://postgres:twenty@localhost:5432/default
|
||||
REDIS_URL=redis://localhost:6379
|
||||
|
||||
FRONT_BASE_URL=http://localhost:3001
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
PG_DATABASE_URL=postgres://twenty:twenty@localhost:5432/test
|
||||
PG_DATABASE_URL=postgres://postgres:twenty@localhost:5432/test
|
||||
REDIS_URL=redis://localhost:6379
|
||||
|
||||
DEBUG_MODE=true
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
#!/bin/sh
|
||||
export PG_DATABASE_URL=postgres://twenty:twenty@$PG_DATABASE_HOST:$PG_DATABASE_PORT/default
|
||||
export PG_DATABASE_URL=postgres://postgres:twenty@$PG_DATABASE_HOST:$PG_DATABASE_PORT/default
|
||||
yarn database:init:prod
|
||||
node dist/src/main
|
||||
|
||||
@ -1,3 +1,3 @@
|
||||
#!/bin/sh
|
||||
export PG_DATABASE_URL=postgres://twenty:twenty@$PG_DATABASE_HOST:$PG_DATABASE_PORT/default
|
||||
export PG_DATABASE_URL=postgres://postgres:twenty@$PG_DATABASE_HOST:$PG_DATABASE_PORT/default
|
||||
node dist/src/queue-worker/queue-worker
|
||||
|
||||
@ -7,6 +7,11 @@ import { camelToSnakeCase, performQuery } from './utils';
|
||||
rawDataSource
|
||||
.initialize()
|
||||
.then(async () => {
|
||||
await performQuery(
|
||||
'CREATE EXTENSION IF NOT EXISTS "vector"',
|
||||
'create extension "vector (pgvector)"',
|
||||
);
|
||||
|
||||
await performQuery(
|
||||
'CREATE SCHEMA IF NOT EXISTS "public"',
|
||||
'create schema "public"',
|
||||
@ -53,7 +58,7 @@ rawDataSource
|
||||
for (const wrapper of supabaseWrappers) {
|
||||
await performQuery(
|
||||
`
|
||||
CREATE FOREIGN DATA WRAPPER "${wrapper.toLowerCase()}_fdw"
|
||||
CREATE FOREIGN DATA WRAPPER IF NOT EXISTS "${wrapper.toLowerCase()}_fdw"
|
||||
HANDLER "${camelToSnakeCase(wrapper)}_fdw_handler"
|
||||
VALIDATOR "${camelToSnakeCase(wrapper)}_fdw_validator";
|
||||
`,
|
||||
|
||||
@ -21,6 +21,14 @@ async function dropSchemasSequentially() {
|
||||
// Iterate over each schema and drop it
|
||||
// This is to avoid dropping all schemas at once, which would cause an out of shared memory error
|
||||
for (const schema of schemas) {
|
||||
if (
|
||||
schema.schema_name === 'metric_helpers' ||
|
||||
schema.schema_name === 'user_management' ||
|
||||
schema.schema_name === 'public'
|
||||
) {
|
||||
continue;
|
||||
}
|
||||
|
||||
await performQuery(
|
||||
`
|
||||
DROP SCHEMA IF EXISTS "${schema.schema_name}" CASCADE;
|
||||
|
||||
Reference in New Issue
Block a user