Skip to content
Snippets Groups Projects
Commit 3ed28906 authored by Etienne LOUPIAS's avatar Etienne LOUPIAS
Browse files

feat(ci): add openshift

parent 371b3d89
Branches
Tags
2 merge requests!420V3.2.0,!397feat(ci): add openshift
stages:
- test
- quality
- build
- manual-db-copy
- deploy
- quality
default:
services:
- name: ${CI_DEPENDENCY_PROXY_DIRECT_GROUP_IMAGE_PREFIX}/docker:18.09-dind
- name: ${CI_DEPENDENCY_PROXY_DIRECT_GROUP_IMAGE_PREFIX}/docker:25-dind
alias: docker
before_script:
- export IMAGE_TAG=$CI_COMMIT_REF_NAME
- echo $IMAGE_TAG
variables:
DEPENDENCY_PROXY: ${CI_DEPENDENCY_PROXY_DIRECT_GROUP_IMAGE_PREFIX}/
......@@ -16,35 +20,19 @@ build:
stage: build
only:
- dev
- rec
- master
needs: []
variables:
DOCKER_TLS_CERTDIR: ''
DOCKER_HOST: tcp://docker:2375/
DOCKER_DRIVER: overlay2
image: ${CI_DEPENDENCY_PROXY_DIRECT_GROUP_IMAGE_PREFIX}/docker:18.09
image: ${CI_DEPENDENCY_PROXY_DIRECT_GROUP_IMAGE_PREFIX}/docker:25
script:
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
- docker build --pull -t "$CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG" .
- docker push "$CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG"
- docker login ${CI_DEPENDENCY_PROXY_SERVER} -u ${CI_DEPENDENCY_PROXY_USER} -p ${CI_DEPENDENCY_PROXY_PASSWORD}
- docker build --pull -t "$CI_REGISTRY_IMAGE:$IMAGE_TAG" --build-arg DEPENDENCY_PROXY="$DEPENDENCY_PROXY" .
- docker push "$CI_REGISTRY_IMAGE:$IMAGE_TAG"
build_branch:
stage: build
only:
- merge_requests
needs: []
variables:
DOCKER_TLS_CERTDIR: ''
DOCKER_HOST: tcp://docker:2375/
DOCKER_DRIVER: overlay2
image: ${CI_DEPENDENCY_PROXY_DIRECT_GROUP_IMAGE_PREFIX}/docker:18.09
script:
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
- docker build --pull -t "$CI_REGISTRY_IMAGE/feat:$CI_COMMIT_REF_SLUG" --build-arg conf=prod .
# - docker push "$CI_REGISTRY_IMAGE/feat:$CI_COMMIT_REF_SLUG"
build-release:
build-tag:
stage: build
only:
- tags
......@@ -52,11 +40,12 @@ build-release:
DOCKER_TLS_CERTDIR: ''
DOCKER_HOST: tcp://docker:2375/
DOCKER_DRIVER: overlay2
image: ${CI_DEPENDENCY_PROXY_DIRECT_GROUP_IMAGE_PREFIX}/docker:18.09
image: ${CI_DEPENDENCY_PROXY_DIRECT_GROUP_IMAGE_PREFIX}/docker:25
script:
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
- docker build --pull -t "$CI_REGISTRY_IMAGE/tags:$CI_COMMIT_TAG" --build-arg conf=prod .
- docker push "$CI_REGISTRY_IMAGE/tags:$CI_COMMIT_TAG"
- docker login ${CI_DEPENDENCY_PROXY_SERVER} -u ${CI_DEPENDENCY_PROXY_USER} -p ${CI_DEPENDENCY_PROXY_PASSWORD}
- docker build --pull -t "$CI_REGISTRY_IMAGE:$IMAGE_TAG" --build-arg DEPENDENCY_PROXY="$DEPENDENCY_PROXY" .
- docker push "$CI_REGISTRY_IMAGE:$IMAGE_TAG"
deploy_dev:
stage: deploy
......@@ -64,8 +53,7 @@ deploy_dev:
- deploy
only:
- dev
needs:
- build
needs: ['test', 'build']
script:
- cd /home/mps/ram
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
......@@ -80,13 +68,19 @@ test:
stage: test
only:
- dev
- merge_requests
needs: []
image: ${CI_DEPENDENCY_PROXY_DIRECT_GROUP_IMAGE_PREFIX}/node:18.17.0
services:
- name: ${CI_DEPENDENCY_PROXY_DIRECT_GROUP_IMAGE_PREFIX}/elasticsearch:7.16.2
alias: elasticsearch
command: ['bin/elasticsearch', '-Expack.security.enabled=false', '-Ediscovery.type=single-node']
variables:
ES_JAVA_OPTS: "-Xms256m -Xmx256m"
command:
[
'bin/elasticsearch',
'-Expack.security.enabled=false',
'-Ediscovery.type=single-node',
]
before_script:
- export GHOST_HOST_AND_PORT=http://localhost:2368
- export GHOST_ADMIN_API_KEY=60142bc9e33940000156bccc:6217742e2671e322612e89cac9bab61fcd01822709fe5d8f5e6a5b3e54d5e6bb
......@@ -129,29 +123,92 @@ sonarqube:
-Dsonar.exclusions=test/**,scripts/**,src/**/*.spec.ts*,src/migrations/scripts/**
-Dsonar.qualitygate.wait=true
sonarqube-mr:
stage: quality
.db-copy:
script:
- if [ "$CI_ENVIRONMENT_NAME" == "dev" ]; then export KUBECONFIG=$KUBECONFIG_DEV; fi
- if [ "$CI_ENVIRONMENT_NAME" == "rec" ]; then export KUBECONFIG=$KUBECONFIG_REC; fi
- ./scripts/db/oc-mongodb-copy-prod.sh
tags:
- ns-res-$NAMESPACE_ENV-syn
db-copy-10-dev:
stage: manual-db-copy
only:
- merge_requests
- tags
needs: []
image: ${CI_DEPENDENCY_PROXY_DIRECT_GROUP_IMAGE_PREFIX}/sonarsource/sonar-scanner-cli:4
extends: .db-copy
environment:
name: dev
variables:
SONAR_USER_HOME: '${CI_PROJECT_DIR}/.sonar' # Defines the location of the analysis task cache
GIT_DEPTH: '0' # T
cache:
key: '${CI_JOB_NAME}'
paths:
- .sonar/cache
NAMESPACE_ENV: "d01"
when: manual
db-copy-20-rec:
stage: manual-db-copy
only:
- tags
needs: []
extends: .db-copy
environment:
name: rec
variables:
NAMESPACE_ENV: "r01"
when: manual
.deploy:
script:
- >
sonar-scanner
-Dsonar.projectName=${SONAR_PROJECT_KEY}
-Dsonar.projectVersion=1.0
-Dsonar.sourceEncoding=UTF-8
-Dsonar.projectBaseDir=.
-Dsonar.host.url=${SONAR_URL}
-Dsonar.projectKey=${SONAR_PROJECT_KEY}
-Dsonar.login=${SONAR_MR_TOKEN}
-Dsonar.cpd.exclusions=test/**,scripts/**,src/**/*.spec.ts*
-Dsonar.exclusions=test/**,scripts/**,src/**/*.spec.ts*,src/migrations/scripts/**
-Dsonar.qualitygate.wait=true
- if [ "$CI_ENVIRONMENT_NAME" == "dev" ]; then export KUBECONFIG=$KUBECONFIG_DEV; fi
- if [ "$CI_ENVIRONMENT_NAME" == "rec" ]; then export KUBECONFIG=$KUBECONFIG_REC; fi
- if [ "$CI_ENVIRONMENT_NAME" == "pro" ]; then export KUBECONFIG=$KUBECONFIG_PRO; fi
- export URL_ENV_SUFFIX="-$CI_ENVIRONMENT_NAME"
- export URL_APPS_SUFFIX=".apps"
- if [ "$CI_ENVIRONMENT_NAME" == "pro" ]; then export URL_ENV_SUFFIX=""; URL_APPS_SUFFIX=""; fi
- echo ${URL_ENV_SUFFIX}${URL_APPS_SUFFIX}
- mkdir -p k8s/env
# Use envsubst to substitute env variables in all deployment/*.yml files
- cd k8s ; for f in *.yml; do envsubst < $f > env/$f ; done ; cd ..
- ls k8s/env/
- oc whoami
- oc apply -f k8s/env/
- oc delete pod -l app=res-server
tags:
- ns-res-$NAMESPACE_ENV-syn
deploy-10-dev:
stage: deploy
only:
- dev
needs: ['test', 'build']
inherit:
default: [before_script]
extends: .deploy
environment:
name: dev
variables:
NAMESPACE_ENV: "d01"
deploy-20-rec:
stage: deploy
only:
- tags
inherit:
default: [before_script]
extends: .deploy
environment:
name: rec
variables:
NAMESPACE_ENV: "r01"
when: manual
deploy-30-pro:
stage: deploy
only:
- tags
inherit:
default: [before_script]
extends: .deploy
environment:
name: pro
variables:
NAMESPACE_ENV: "p01"
when: manual
FROM node:18-bullseye
ARG DEPENDENCY_PROXY=
FROM ${DEPENDENCY_PROXY}node:18-bullseye
# Create app directory
WORKDIR /app
......
......@@ -11,6 +11,7 @@ data:
MONGO_NON_ROOT_USERNAME: '$MONGO_NON_ROOT_USERNAME_K8S'
ME_CONFIG_BASICAUTH_USERNAME: '$ME_CONFIG_BASICAUTH_USERNAME_K8S'
MAIL_URL: '$MAIL_URL_K8S'
MAIL_FROM: '$MAIL_FROM_K8S'
MAIL_SOSTECH: '$MAIL_SOSTECH_K8S'
MAIL_CONTACT: '$MAIL_CONTACT_K8S'
GHOST_HOST_AND_PORT: '$GHOST_HOST_AND_PORT_K8S'
......
kind: Secret
apiVersion: v1
metadata:
name: res-secret
stringData:
MONGODB_ROOT_PASSWORD: '$MONGODB_ROOT_PASSWORD_K8S'
MONGODB_PASSWORD: '$MONGO_NON_ROOT_PASSWORD_K8S'
MONGO_NON_ROOT_PASSWORD: '$MONGO_NON_ROOT_PASSWORD_K8S'
ME_CONFIG_MONGODB_ADMINPASSWORD: '$MONGODB_ROOT_PASSWORD_K8S'
ME_CONFIG_BASICAUTH_PASSWORD: '$ME_CONFIG_BASICAUTH_PASSWORD_K8S'
MAIL_TOKEN: '$MAIL_TOKEN_K8S'
GHOST_ADMIN_API_KEY: '$GHOST_ADMIN_API_KEY_K8S'
GHOST_CONTENT_API_KEY: '$GHOST_CONTENT_API_KEY_K8S'
ELASTICSEARCH_PASSWORD: '$ELASTICSEARCH_PASSWORD_K8S'
ELASTIC_PASSWORD: '$ELASTICSEARCH_PASSWORD_K8S'
MC_API_KEY: '$MC_API_KEY_K8S'
JWT_SECRET: '$JWT_SECRET_K8S'
SALT: '$SALT_K8S'
MYSQL_ROOT_PASSWORD: '$GHOST_DB_PASSWORD_K8S'
GHOST_DATABASE_PASSWORD: '$GHOST_DB_PASSWORD_K8S'
GHOST_PASSWORD: '$GHOST_PASSWORD_K8S'
kind: Deployment
apiVersion: apps/v1
metadata:
name: res-db-deployment
spec:
replicas: 1
selector:
matchLabels:
app: res-db
template:
metadata:
labels:
app: res-db
spec:
volumes:
- name: res-pvc-data
persistentVolumeClaim:
claimName: pvc-02-ns-res-$NAMESPACE_ENV-syn-claim
containers:
- name: res-db
envFrom:
- configMapRef:
name: res-configmap
- secretRef:
name: res-secret
image: docker.io/bitnami/mongodb:4.2.3
volumeMounts:
- mountPath: /bitnami/mongodb
name: res-pvc-data
ports:
- containerPort: 27017
protocol: TCP
readinessProbe:
tcpSocket:
port: 27017
livenessProbe:
tcpSocket:
port: 27017
initialDelaySeconds: 120
resources:
requests:
memory: 50Mi
cpu: 400m
limits:
memory: 400Mi
cpu: 400m
---
kind: Service
apiVersion: v1
metadata:
name: res-db-service
spec:
selector:
app: res-db
ports:
- protocol: TCP
port: 27017
targetPort: 27017
kind: Deployment
apiVersion: apps/v1
metadata:
name: res-dbadmin-deployment
spec:
replicas: 1
selector:
matchLabels:
app: res-dbadmin
template:
metadata:
labels:
app: res-dbadmin
spec:
containers:
- name: res-dbadmin
image: docker.io/mongo-express:0.54.0
envFrom:
- configMapRef:
name: res-configmap
- secretRef:
name: res-secret
env:
- name: ME_CONFIG_MONGODB_ADMINUSERNAME
value: "root"
- name: ME_CONFIG_MONGODB_SERVER
value: "res-db-service"
- name: ME_CONFIG_MONGODB_URL
value: "mongodb://res-db-service:27017"
ports:
- containerPort: 8081
protocol: TCP
readinessProbe:
tcpSocket:
port: 8081
livenessProbe:
tcpSocket:
port: 8081
resources:
requests:
memory: 50Mi
cpu: 50m
limits:
memory: 400Mi
cpu: 100m
securityContext:
seLinuxOptions:
type: spc_t
---
kind: Service
apiVersion: v1
metadata:
name: res-dbadmin-service
spec:
selector:
app: res-dbadmin
ports:
- protocol: TCP
port: 8081
targetPort: 8081
---
kind: Route
apiVersion: route.openshift.io/v1
metadata:
name: res-dbadmin-route
spec:
# .apps url and certificate set for this route for all environments including prod
host: resin-db$URL_ENV_SUFFIX.apps.grandlyon.com
to:
kind: Service
name: res-dbadmin-service
port:
targetPort: 8081
wildcardPolicy: None
tls:
termination: edge
insecureEdgeTerminationPolicy: Redirect
certificate: $HTTPS_CERTIFICATE_APPS
kind: Deployment
apiVersion: apps/v1
metadata:
name: res-ghost-db-deployment
spec:
replicas: 1
selector:
matchLabels:
app: res-ghost-db
template:
metadata:
labels:
app: res-ghost-db
spec:
volumes:
- name: res-pvc-data-ghost
persistentVolumeClaim:
claimName: pvc-03-ns-res-$NAMESPACE_ENV-syn-claim
containers:
- name: res-ghost-db
envFrom:
- configMapRef:
name: res-configmap
- secretRef:
name: res-secret
image: docker.io/mysql:8.4.0
volumeMounts:
- mountPath: /var/lib/mysql
name: res-pvc-data-ghost
ports:
- containerPort: 3306
protocol: TCP
readinessProbe:
tcpSocket:
port: 3306
livenessProbe:
tcpSocket:
port: 3306
initialDelaySeconds: 120
resources:
requests:
memory: 400Mi
cpu: 400m
limits:
memory: 800Mi
cpu: 400m
---
kind: Service
apiVersion: v1
metadata:
name: res-ghost-db-service
spec:
selector:
app: res-ghost-db
ports:
- protocol: TCP
port: 3306
targetPort: 3306
kind: Deployment
apiVersion: apps/v1
metadata:
name: res-ghost-dbadmin-deployment
spec:
replicas: 0
selector:
matchLabels:
app: res-ghost-dbadmin
template:
metadata:
labels:
app: res-ghost-dbadmin
spec:
containers:
- name: res-ghost-dbadmin
image: docker.io/nazarpc/phpmyadmin:5.0.2_build-1
env:
- name: MYSQL_HOST
value: res-ghost-db-service
- name: HTTP_PORT
value: "5000"
- name: HTTPS_PORT
value: "5001"
ports:
- containerPort: 5000
protocol: TCP
readinessProbe:
httpGet:
path: /
port: 5000
livenessProbe:
httpGet:
path: /
port: 5000
resources:
requests:
memory: 50Mi
cpu: 50m
limits:
memory: 200Mi
cpu: 200m
---
kind: Service
apiVersion: v1
metadata:
name: res-ghost-dbadmin-service
spec:
selector:
app: res-ghost-dbadmin
ports:
- protocol: TCP
port: 5000
targetPort: 5000
---
kind: Route
apiVersion: route.openshift.io/v1
metadata:
name: res-ghost-dbadmin-route
spec:
# .apps url and certificate set for this route for all environments including prod
host: resin-cms-db$URL_ENV_SUFFIX.apps.grandlyon.com
to:
kind: Service
name: res-ghost-dbadmin-service
port:
targetPort: 5000
wildcardPolicy: None
tls:
termination: edge
insecureEdgeTerminationPolicy: Redirect
certificate: $HTTPS_CERTIFICATE_APPS
kind: Deployment
apiVersion: apps/v1
metadata:
name: res-ghost-deployment
spec:
replicas: 1
selector:
matchLabels:
app: res-ghost
template:
metadata:
labels:
app: res-ghost
spec:
volumes:
- name: res-pvc-files
persistentVolumeClaim:
claimName: pvc-01-ns-res-$NAMESPACE_ENV-syn-claim
containers:
- name: res-ghost
image: docker.io/bitnami/ghost:5.82.6
volumeMounts:
- mountPath: /bitnami/ghost
name: res-pvc-files
subPath: "ghost"
env:
- name: GHOST_HOST
value: resin-cms${URL_ENV_SUFFIX}.apps.grandlyon.com
- name: GHOST_ENABLE_HTTPS
value: "yes"
envFrom:
- configMapRef:
name: res-configmap
- secretRef:
name: res-secret
ports:
- containerPort: 2368
protocol: TCP
readinessProbe:
tcpSocket:
port: 2368
livenessProbe:
tcpSocket:
port: 2368
initialDelaySeconds: 180
resources:
requests:
memory: 50Mi
cpu: 600m
limits:
memory: 400Mi
cpu: 600m
imagePullSecrets:
- name: forge-secret
---
kind: Service
apiVersion: v1
metadata:
name: res-ghost-service
spec:
selector:
app: res-ghost
ports:
- protocol: TCP
port: 2368
targetPort: 2368
---
kind: Route
apiVersion: route.openshift.io/v1
metadata:
name: res-ghost-route
spec:
# .apps url and certificate set for this route for all environments including prod
host: resin-cms${URL_ENV_SUFFIX}.apps.grandlyon.com
to:
kind: Service
name: res-ghost-service
port:
targetPort: 2368
wildcardPolicy: None
tls:
termination: edge
insecureEdgeTerminationPolicy: Redirect
certificate: $HTTPS_CERTIFICATE_APPS
kind: Deployment
apiVersion: apps/v1
metadata:
name: res-es-deployment
spec:
replicas: 1
selector:
matchLabels:
app: res-es
template:
metadata:
labels:
app: res-es
spec:
volumes:
- name: res-pvc-es
persistentVolumeClaim:
claimName: pvc-04-ns-res-$NAMESPACE_ENV-syn-claim
containers:
- name: res-es
image: docker.io/elasticsearch:8.12.2
volumeMounts:
- mountPath: /usr/share/elasticsearch/data
name: res-pvc-es
envFrom:
- configMapRef:
name: res-configmap
- secretRef:
name: res-secret
env:
- name: discovery.type
value: "single-node"
- name: xpack.security.enabled
value: "true"
ports:
- containerPort: 9200
protocol: TCP
readinessProbe:
tcpSocket:
port: 9200
livenessProbe:
tcpSocket:
port: 9200
initialDelaySeconds: 120
resources:
requests:
memory: 50Mi
cpu: 600m
limits:
memory: 800Mi
cpu: 600m
imagePullSecrets:
- name: forge-secret
---
kind: Service
apiVersion: v1
metadata:
name: res-es-service
spec:
selector:
app: res-es
ports:
- protocol: TCP
port: 9200
targetPort: 9200
---
kind: Route
apiVersion: route.openshift.io/v1
metadata:
name: res-es-route
spec:
# .apps url and certificate set for this route for all environments including prod
host: resin-es${URL_ENV_SUFFIX}.apps.grandlyon.com
to:
kind: Service
name: res-es-service
port:
targetPort: 9200
wildcardPolicy: None
tls:
termination: edge
insecureEdgeTerminationPolicy: Redirect
certificate: $HTTPS_CERTIFICATE_APPS
kind: Deployment
apiVersion: apps/v1
metadata:
name: res-server-deployment
spec:
replicas: 1
selector:
matchLabels:
app: res-server
template:
metadata:
labels:
app: res-server
annotations:
# Velero need a working pod to ensure backup of PVC
backup.velero.io/backup-volumes: res-pvc-files
spec:
volumes:
- name: res-pvc-files
persistentVolumeClaim:
claimName: pvc-01-ns-res-$NAMESPACE_ENV-syn-claim
containers:
- name: res-server
image: registry.forge.grandlyon.com/web-et-numerique/factory/pamn_plateforme-des-acteurs-de-la-mediation-numerique/pamn_server:$IMAGE_TAG
imagePullPolicy: Always
volumeMounts:
- mountPath: /.npm
name: res-pvc-files
subPath: ".npm"
- mountPath: /app/.migrate
name: res-pvc-files
subPath: ".migrate"
env:
- name: GHOST_HOST_AND_PORT
value: https://resin-cms${URL_ENV_SUFFIX}.apps.grandlyon.com
envFrom:
- configMapRef:
name: res-configmap
- secretRef:
name: res-secret
ports:
- containerPort: 3000
protocol: TCP
readinessProbe:
httpGet:
path: /api/healthcheck
port: 3000
livenessProbe:
httpGet:
path: /api/healthcheck
port: 3000
initialDelaySeconds: 120
resources:
requests:
memory: 100Mi
cpu: 800m
limits:
memory: 400Mi
cpu: 800m
imagePullSecrets:
- name: forge-secret
---
kind: Service
apiVersion: v1
metadata:
name: res-server-service
spec:
selector:
app: res-server
ports:
- protocol: TCP
port: 3000
targetPort: 3000
kind: Deployment
apiVersion: apps/v1
metadata:
name: res-maintenance-deployment
spec:
replicas: 1
selector:
matchLabels:
app: res-maintenance
template:
metadata:
labels:
app: res-maintenance
spec:
volumes:
- name: res-pvc-files
persistentVolumeClaim:
claimName: pvc-01-ns-res-$NAMESPACE_ENV-syn-claim
- name: res-pvc-data
persistentVolumeClaim:
claimName: pvc-02-ns-res-$NAMESPACE_ENV-syn-claim
- name: res-pvc-data-ghost
persistentVolumeClaim:
claimName: pvc-03-ns-res-$NAMESPACE_ENV-syn-claim
- name: res-pvc-es
persistentVolumeClaim:
claimName: pvc-04-ns-res-$NAMESPACE_ENV-syn-claim
securityContext:
seLinuxOptions:
type: spc_t
fsGroupChangePolicy: OnRootMismatch
containers:
- name: res-maintenance
image: docker.io/nginxinc/nginx-unprivileged:1.25
volumeMounts:
- mountPath: /pvc-files
name: res-pvc-files
# /!\ Only for debugging or panic purpose ! These block-typed PVC can not be mounted of more than one POD.
#- mountPath: /pvc-data
# name: res-pvc-data
#- mountPath: /pvc-data-ghost
# name: res-pvc-data-ghost
#- mountPath: /pvc-es
# name: res-pvc-es
- mountPath: /etc/nginx/conf.d
name: res-pvc-files
subPath: "maintenance-page/nginx"
- mountPath: /usr/share/nginx/html
name: res-pvc-files
subPath: "maintenance-page/html"
readinessProbe:
exec:
command:
- cat
- /dev/null
livenessProbe:
exec:
command:
- cat
- /dev/null
resources:
requests:
cpu: 20m
memory: 50Mi
limits:
cpu: 100m
memory: 100Mi
---
kind: Service
apiVersion: v1
metadata:
name: res-maintenance-service
spec:
selector:
app: res-maintenance
ports:
- protocol: TCP
port: 8080
targetPort: 8080
---
kind: Route
apiVersion: route.openshift.io/v1
metadata:
name: res-maintenance-route
spec:
# .apps url and certificate set for this route for all environments including prod
host: resin-maintenance$URL_ENV_SUFFIX.apps.grandlyon.com
to:
kind: Service
name: res-maintenance-service
port:
targetPort: 8080
wildcardPolicy: None
tls:
termination: edge
insecureEdgeTerminationPolicy: Redirect
certificate: $HTTPS_CERTIFICATE_APPS
db.users.aggregate([
{
$addFields: {
emailTemp: { $concat: [{ $toLower: '$name' }, '.', { $toLower: '$surname' }, '@nowhere69.com'] },
},
},
{ $out: 'users' },
]);
db.users.updateMany({ role: { $ne: 1 } }, [
{
$set: {
email: '$emailTemp',
password: { $literal: '$2a$12$vLQjJ9zAWyUwiXLeQDa6w.XzrlgPBhw.2GWrjog/yuEjIaZnQwmZu' },
},
},
]);
db.users.updateMany({}, [{ $unset: 'emailTemp' }]);
db.tempusers.deleteMany({});
db.newslettersubscriptions.deleteMany({});
set -e # Exit shell when an error is encountered
if [ "$1" = "-h" ]; then
echo "Usage: `basename $0` [DB_NAME (default=ram)]"
exit 0
fi
# Config
DB_NAME=${1:-"ram"}
echo "DB_NAME=$DB_NAME"
# Check project
CURRENT_PROJECT=$(oc project --short)
echo "CURRENT_PROJECT=$CURRENT_PROJECT"
if [ "${CURRENT_PROJECT:0:7}" != "ns-res-" ]; then
echo "Forbidden: This is not a Resin project"
exit 0
fi
if [ "${CURRENT_PROJECT:0:8}" == "ns-res-p" ]; then
echo "Forbidden: This is the production project"
exit 0
fi
# Restore of the source backup
curl -kLSs ${BACKUP_URL}/latest.tar.gz -o latest.tar.gz
curl -kLSs ${BACKUP_URL}/.migrate -o .migrate
tar -xvf ./latest.tar.gz
oc exec deploy/res-db-deployment -- bash -c 'rm -rf /tmp/dump'
oc exec deploy/res-db-deployment -- bash -c 'mkdir /tmp/dump'
DB_POD_NAME=$(oc get pods -l app=res-db -o jsonpath='{.items[0].metadata.name}')
echo "DB_POD_NAME=$DB_POD_NAME"
oc cp ./mongo__*/ram/ $DB_POD_NAME:/tmp/dump/$DB_NAME
oc exec deploy/res-db-deployment -- bash -c "mongo --authenticationDatabase admin --username root --password \$MONGODB_ROOT_PASSWORD $DB_NAME --eval 'db.dropDatabase()'"
oc exec deploy/res-db-deployment -- bash -c "mongorestore --authenticationDatabase admin --username root --password \$MONGODB_ROOT_PASSWORD -d $DB_NAME /tmp/dump/$DB_NAME/"
echo "***** Latest backup restored in $DB_POD_NAME in db $DB_NAME"
# Cleanup user email and password change (not for admin users)
oc cp $(dirname "$(realpath "$0")")/mongo_clean_users.js "${DB_POD_NAME}:/tmp/dump/"
oc exec deploy/res-db-deployment -- bash -c "mongo --authenticationDatabase admin --username root --password \$MONGODB_ROOT_PASSWORD $DB_NAME < /tmp/dump/mongo_clean_users.js"
# .migrate file copy
if [ "$DB_NAME" = "ram" ]; then
oc exec deploy/res-maintenance-deployment -- bash -c 'mv -f /pvc-files/.migrate /tmp/.migrate'
oc cp ./.migrate $(oc get pods -l app=res-maintenance -o jsonpath='{.items[0].metadata.name}'):/pvc-files/.migrate
fi
oc exec deploy/res-db-deployment -- bash -c 'rm -rf /tmp/dump/'
echo ""
echo "*********************************************************** Summary ***********************************************************"
echo "Latest backup restored in $DB_POD_NAME in db $DB_NAME (and data cleaned up)"
if [ "$DB_NAME" = "ram" ]; then echo "The local .migrate file has been copied"; fi
if [ "$DB_NAME" = "ram" ]; then echo "Don't forget to reset elasticsearch indexes!!!"; fi
echo "*******************************************************************************************************************************"
exit 0
......@@ -10,7 +10,7 @@ export const md5 = (data: string): string => crypto.createHash('md5').update(dat
export function rewriteGhostImgUrl(configService: ConfigurationService, itemData: Page | Post): Page | Post {
// Handle image display. Rewrite image URL to fit ghost infra issue.
if (!configService.isLocalConf()) {
if (!configService.isLocalConf() && process.env.IS_OPENSHIFT !== 'true') {
if (itemData.feature_image) {
itemData.feature_image = `https://${configService.config.host}/blog/content${
itemData.feature_image.split('/content')[1]
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment