Skip to content
Snippets Groups Projects
Commit 49658a42 authored by Rémi PAILHAREY's avatar Rémi PAILHAREY :fork_knife_plate:
Browse files

feat: OpenShift deployment

parent 06c128d4
Branches
No related tags found
4 merge requests!73Deploy Openshift v2,!65MEP: removed Meilisearch,!52back-office SGE before canary release,!39Feat/us823 openshift deploy
Showing
with 396 additions and 137 deletions
.env .env
backoffice.db backoffice.db
meili_data/ meili_data/
\ No newline at end of file db_data/
\ No newline at end of file
...@@ -10,30 +10,7 @@ ...@@ -10,30 +10,7 @@
"request": "launch", "request": "launch",
"mode": "debug", "mode": "debug",
"program": "${workspaceFolder}/main.go", "program": "${workspaceFolder}/main.go",
"env": { "envFile": "${workspaceFolder}/.env",
"HOSTNAME": "localhost",
"ADMIN_ROLE": "ADMINS",
"DEBUG_MODE": "true",
"MOCK_OAUTH2": "true",
"HTTPS_PORT": "1443",
"IMAGE_FOLDER": "image-lib",
"REDIRECT_URL": "https://localhost:1443/OAuth2Callback",
"CLIENT_ID": "foo",
"CLIENT_SECRET": "bar",
"AUTH_URL": "http://localhost:8090/auth",
"TOKEN_URL": "http://localhost:8090/token",
"USERINFO_URL": "http://localhost:8090/admininfo",
"DATABASE_USER": "root",
"DATABASE_PASSWORD": "password",
"DATABASE_NAME": "backoffice",
"DATABASE_HOST": "127.0.0.1",
"LOGOUT_URL": "/",
"SGE_API_TOKEN": "sgeapitoken",
"MEILI_HOST": "http://localhost:7700",
"MEILI_MASTER_KEY": "masterkey"
},
"showLog": true "showLog": true
}, },
{ {
......
...@@ -42,7 +42,7 @@ RUN setcap cap_net_bind_service=+ep /app/backoffice-server ...@@ -42,7 +42,7 @@ RUN setcap cap_net_bind_service=+ep /app/backoffice-server
############################## ##############################
# STEP 2 build a small image # # STEP 2 build a small image #
############################## ##############################
FROM scratch FROM curlimages/curl:7.85.0
WORKDIR /app WORKDIR /app
...@@ -56,7 +56,7 @@ COPY --from=builder /etc/mime.types /etc/mime.types ...@@ -56,7 +56,7 @@ COPY --from=builder /etc/mime.types /etc/mime.types
# Copy static executable and application resources # Copy static executable and application resources
COPY --from=builder /app/backoffice-server /app/backoffice-server COPY --from=builder /app/backoffice-server /app/backoffice-server
COPY --from=builder /app/dev_certificates /app/dev_certificates COPY --from=builder /app/dev_certificates /app/dev_certificates
COPY --from=builder /app/configs /app/configs COPY --from=builder /app/mnt/configs /app/mnt/configs
# Use an unprivileged user. # Use an unprivileged user.
USER appuser:appuser USER appuser:appuser
......
...@@ -30,23 +30,15 @@ Once deployed, you can access to a Swagger documentation of the API on https://$ ...@@ -30,23 +30,15 @@ Once deployed, you can access to a Swagger documentation of the API on https://$
## Launch locally ## Launch locally
To launch it locally : Launch all necessary services :
- Run `docker-compose up -d` - Run `docker-compose up -d`
In local you can access phpmyadmin interface to manage the database : http://localhost:8008 In local you can access phpmyadmin interface to manage the database : http://localhost:8008
## Launch locally in standalone for development Launch the server by pressing F5 on vscode, you will see the logs on the debug console.
To launch the backend for development goal: All API routes are available on https://localhost:1443
- edit _main.go_ file, uncomment the last line "log.Fatal(http.ListenAndServe(":"+strconv.Itoa(httpsPort), rootMux.Router))" and comment the line above "log.Fatal(http.ListenAndServeTLS(":"+strconv.Itoa(httpsPort), "./dev_certificates/localhost.crt", "./dev_certificates/localhost.key", rootMux.Router))"
- This way you disable https so make sure your requests goes on http adresses
- in _vscode/launch.json_ set "REDIRECT_URL" to "http://localhost:1443/OAuth2Callback",
- also comment the port and host values
- if you comment the DATABASE_USER line, it will launches with a sqlite database instead of mysql
- launch the app by pressing F5 on vscode, you will see the logs on the debug console.
- this way you won't have the login every time you relaunch the backend for development
## Build image for local ## Build image for local
......
FROM alpine:3.14.0
RUN apk add apk-cron mysql-client findutils
# Copy cron job file to the cron.d directory
COPY backup-cron-job /etc/cron.d/backup-cron-job
# Give execution rights on the cron job
RUN chmod 0644 /etc/cron.d/backup-cron-job
# Copy backup script
COPY backup.sh /etc/cron.d/backup.sh
# Give execution rights on backup script
RUN chmod 0644 /etc/cron.d/backup.sh
# Copy restore script
COPY restore.sh /etc/cron.d/restore.sh
# Give execution rights on restore script
RUN chmod 0644 /etc/cron.d/restore.sh
# Apply cron job
RUN crontab /etc/cron.d/backup-cron-job
# Create the log file to be able to run tail
RUN touch /var/log/cron.log
# Run the command on container startup
CMD crond && tail -f /var/log/cron.log
\ No newline at end of file
# Run the backup script every day at 12:00AM
0 0 * * * sh /etc/cron.d/backup.sh
\ No newline at end of file
#!/bin/bash
DATE=$(date +"%Y-%m-%d-%H-%M-%S")
BACKUP_DIRECTORY="/backup"
DIRECTORIES_NAME_PATTERN='ecolyo-agent-20*'
NUMBER_TO_KEEP=14
BACKUP="${BACKUP_DIRECTORY}/ecolyo-agent-${DATE}"
# Create a new directory into backup directory location for this date
mkdir -p $BACKUP
# Dumb the database
mysqldump --host=database-agent --port=3306 -u root -p$MYSQL_ROOT_PASSWORD $MYSQL_DATABASE >"$BACKUP/$MYSQL_DATABASE.sql"
# Remove all but last n saves
find $BACKUP_DIR/* -maxdepth 1 -type d -name $DIRECTORIES_NAME_PATTERN -printf '%T@\t%p\n' |
sort -t $'\t' -g |
head -n -$NUMBER_TO_KEEP |
cut -d $'\t' -f 2- |
xargs rm -Rf
#!/bin/bash
BACKUP_DIR="/backup"
mysql --host=database-agent --port=3306 -u root -p$MYSQL_ROOT_PASSWORD $MYSQL_DATABASE <"$BACKUP_DIR/$1/$MYSQL_DATABASE.sql"
LOAD DATA LOCAL INFILE '/fluidprices.CSV' INTO TABLE prices LOAD DATA LOCAL INFILE '/var/lib/mysql/fluidprices.csv' INTO TABLE prices
FIELDS TERMINATED BY ',' FIELDS TERMINATED BY ','
LINES TERMINATED BY '\n' LINES TERMINATED BY '\n'
IGNORE 1 ROWS; IGNORE 1 ROWS;
\ No newline at end of file
File moved
...@@ -23,3 +23,25 @@ docker exec -it <container-id> bash ...@@ -23,3 +23,25 @@ docker exec -it <container-id> bash
mysql --local-infile=1 -uroot -p backoffice < /dbinit/dbinit.sql mysql --local-infile=1 -uroot -p backoffice < /dbinit/dbinit.sql
``` ```
## OpenShift
- Connect to the database pod via the terminal on the console
- Go to /var/lib/mysql
```
cd /var/lib/mysql
```
- Import the two files with curl (you can use temporary hosting solutions like tmpfiles.org)
```
curl --output dbinit.sql https://tmpfiles.org/dl/80799/dbinit.sql
curl --output fluidprices.csv https://tmpfiles.org/dl/80799/fluidprices.csv
```
- Execute dbinit.sql script (the password is the cluster secret named "ecolyo-agent-database")
```
mysql --local-infile=1 -uroot -p ecolyo-agent-database < /dbinit/dbinit.sql
```
\ No newline at end of file
...@@ -9,7 +9,7 @@ services: ...@@ -9,7 +9,7 @@ services:
timeout: 10s timeout: 10s
retries: 60 retries: 60
volumes: volumes:
- ./dbinit:/dbinit - ./db_data:/var/lib/mysql
networks: networks:
- ecolyo-agent-network - ecolyo-agent-network
ports: ports:
...@@ -19,7 +19,7 @@ services: ...@@ -19,7 +19,7 @@ services:
MYSQL_DATABASE: ${DATABASE_NAME} MYSQL_DATABASE: ${DATABASE_NAME}
meilisearch: meilisearch:
image: getmeili/meilisearch:v0.27.2 image: getmeili/meilisearch:v0.28.1
healthcheck: healthcheck:
test: ["CMD", "curl", "-f", "http://0.0.0.0:7700"] test: ["CMD", "curl", "-f", "http://0.0.0.0:7700"]
interval: 10s interval: 10s
...@@ -46,46 +46,9 @@ services: ...@@ -46,46 +46,9 @@ services:
environment: environment:
PMA_HOST: database PMA_HOST: database
backoffice-container:
image: backoffice
depends_on:
database:
condition: service_healthy
meilisearch:
condition: service_healthy
build: .
restart: unless-stopped
volumes:
- /etc/localtime:/etc/localtime:ro
- ./configs:/app/configs
- ./letsencrypt_cache:/app/letsencrypt_cache
- ./data:/app/data
- ./../${IMAGE_FOLDER}:/app/${IMAGE_FOLDER}
networks:
- ecolyo-agent-network
ports:
- ${HTTPS_PORT}:${HTTPS_PORT}
- 8090:8090
environment:
- HOSTNAME=${HOSTNAME}
- HTTPS_PORT=${HTTPS_PORT}
- ADMIN_ROLE=${ADMIN_ROLE}
- REDIRECT_URL=${REDIRECT_URL}
- IMAGE_FOLDER=${IMAGE_FOLDER}
- CLIENT_ID=${CLIENT_ID}
- CLIENT_SECRET=${CLIENT_SECRET}
- AUTH_URL=${AUTH_URL}
- TOKEN_URL=${TOKEN_URL}
- USERINFO_URL=${USERINFO_URL}
- DEBUG_MODE=${DEBUG_MODE}
- MOCK_OAUTH2=${MOCK_OAUTH2}
- DATABASE_USER=${DATABASE_USER}
- DATABASE_NAME=${DATABASE_NAME}
- DATABASE_PASSWORD=${DATABASE_PASSWORD}
- DATABASE_HOST=database
volumes: volumes:
db_data: db_data:
meili_data:
networks: networks:
ecolyo-agent-network: ecolyo-agent-network:
\ No newline at end of file
...@@ -56,7 +56,7 @@ func TestMain(m *testing.M) { ...@@ -56,7 +56,7 @@ func TestMain(m *testing.M) {
// Setup the token manager to use debug mode // Setup the token manager to use debug mode
os.Setenv("DEBUG_MODE", "true") os.Setenv("DEBUG_MODE", "true")
tokens.Init("../configs/tokenskey.json", true) tokens.Init("../mnt/configs/tokenskey.json", true)
// Convert example objects to string // Convert example objects to string
mailSubjectBytes, _ := json.Marshal(mailSubject) mailSubjectBytes, _ := json.Marshal(mailSubject)
......
## Connexion à la machine de rebond
### Connexion au bureau à distance
Possible d'utiliser Remmina sous Linux
Configuration:
- serveur : kubernetes.synaaps.com:33338
- nom d'utilisateur : paas338.<pnom> (ex: paas338.rpailharey)
- mot de passe : fourni par SMS + possibilité de le changer sur https://safeweb.synaaps.com/cloud/?orgname=synaaps&domainid=2
### Installer VS Code sur la machine
- Suivre les étapes d'installation sur https://code.visualstudio.com/download
### Accès à la console OpenShift
- Se rendre sur https://console-openshift-console.apps.dedicated-0003.kaas.synaaps.intra
- Se connecter (mêmes identifiants que ceux du bureau à distance)
- Pour pouvoir lancer les commandes OpenShift (ex: oc apply) depuis un terminal
- Se rendre sur https://oauth-openshift.apps.dedicated-0003.kaas.synaaps.intra/oauth/token/request
- Se connecter avec les identifiants précédents
- Cliquer sur "Display login"
- Recopier la commande `oc login --token=YOUR_TOKEN --server=https://api-dedicated-0003.kaas.synaaps.intra:6443`
- Lancer dans un terminal
## Création des secrets
### Accès au repo de la forge
- Depuis la console Web, se rendre dans la section "Workloads > Secrets"
- Cliquer sur le bouton bleu "Create" puis "Image pull secret"
- Donner les informations :
- Secret name : llle-project
- Authentification type : Image registry credentials
- Registry server address : registry.forge.grandlyon.com
- Username: llle-project
- Password: <demander le password>
- Cliquer sur Create
### Secrets de la database
- Depuis VS Code, créer un fichier ecolyo-agent-database.yml
- Copier le contenu du fichier k8s/secrets/ecolyo-agent-database.yml dedans
- Remplacer les valeurs de "host", "name" et "password"
- Lancer `oc apply -f ecolyo-agent-database.yml`
### Secrets des APIs SGE
- Depuis VS Code, créer un fichier sge-api.yml
- Copier le contenu du fichier k8s/secrets/sge-api.yml dedans
- Remplacer la valeur de "token"
- Lancer `oc apply -f sge-api.yml`
### Secrets de Meilisearch
- Depuis VS Code, créer un fichier meilisearch.yml
- Copier le contenu du fichier k8s/secrets/meilisearch.yml dedans
- Remplacer la valeur de "master-key"
- Lancer `oc apply -f meilisearch.yml`
## Création des volumes
- Demander la création des Persistent Volumes (PV) de façon à obtenir la configuration suivante (contacter un administrateur du cluster) :
| Name | Capacity |
| ------ | ------ |
| pvc-1-ns-selfdata-d01-syn-claim | 1 GiB |
| pvc-2-ns-selfdata-d01-syn-claim | 500 MiB |
| pvc-3-ns-selfdata-d01-syn-claim | 2 GiB |
## Création des déploiements
### Déployer la base de données MySQL
- Depuis VS Code, créer un fichier ecolyo-agent-database-deployment.yml
- Copier le contenu du fichier k8s/deployments/ecolyo-agent-database-deployment.yml dedans
- Lancer `oc apply -f ecolyo-agent-database-deployment.yml`
### Créer le service de la base de données MySQL
- Depuis VS Code, créer un fichier ecolyo-agent-database-service.yml
- Copier le contenu du fichier k8s/services/ecolyo-agent-database-service.yml dedans
- Lancer `oc apply -f ecolyo-agent-database-service.yml`
### Déployer la base de données Meilisearch
- Depuis VS Code, créer un fichier ecolyo-agent-meilisearch-deployment.yml
- Copier le contenu du fichier k8s/deployments/ecolyo-agent-meilisearch-deployment.yml dedans
- Lancer `oc apply -f ecolyo-agent-meilisearch-deployment.yml`
### Créer le service de la base de données Meilisearch
- Depuis VS Code, créer un fichier ecolyo-agent-meilisearch-service.yml
- Copier le contenu du fichier k8s/services/ecolyo-agent-meilisearch-service.yml dedans
- Lancer `oc apply -f ecolyo-agent-meilisearch-service.yml`
### Déployer le serveur d'Ecolyo Agent
- Depuis VS Code, créer un fichier ecolyo-agent-server-deployment.yml
- Copier le contenu du fichier k8s/deployments/ecolyo-agent-server-deployment.yml dedans
- Lancer `oc apply -f ecolyo-agent-server-deployment.yml`
### Créer le service du serveur d'Ecolyo Agent
- Depuis VS Code, créer un fichier ecolyo-agent-server-service.yml
- Copier le contenu du fichier k8s/services/ecolyo-agent-server-service.yml dedans
- Lancer `oc apply -f ecolyo-agent-server-service.yml`
### Déployer le client d'Ecolyo Agent
- Depuis VS Code, créer un fichier ecolyo-agent-client-deployment.yml
- Copier le contenu du fichier k8s/deployments/ecolyo-agent-client-deployment.yml dedans
- Lancer `oc apply -f ecolyo-agent-client-deployment.yml`
### Créer le service du client d'Ecolyo Agent
- Depuis VS Code, créer un fichier ecolyo-agent-client-service.yml
- Copier le contenu du fichier k8s/services/ecolyo-agent-client-service.yml dedans
- Lancer `oc apply -f ecolyo-agent-client-service.yml`
### Créer la route publique d'accès au client d'Ecolyo Agent
- Extraire les certificats TLS avec la commande `oc extract secret/gl-cert -n default --confirm | Out-Null`
- Lancer la commande `oc create route edge ecolyo-agent --namespace=ns-selfdata-d01-syn --port=8080 --service=ecolyo-agent-client-service --cert=tls --cert=tls.crt --key=tls.key --hostname=ecolyo-agent.apps.grandlyon.com`
## Regénérer et initialiser les assets
- Depuis la forge, se rendre sur le projet backoffice-server, dans la section CI/CD et lancer la pipeline sur la branche master
- Lancer manuellement le job import-convert-assets et noter son JOB_ID présent dans l'URL
- Depuis la console OpenShift, se connecter au terminal du pod ecolyo-agent-server
- Aller dans le dossier du volume "mnt" avec la commande `cd /app/mnt`
- Télécharger les assets avec la commande `curl --output image-lib.zip --header "PRIVATE-TOKEN: <BO_SERVER_API_ACCESS_TOKEN>" https://forge.grandlyon.com/api/v4/projects/621/jobs/<JOB_ID>/artifacts`
- Décompresser l'archive avec la commande `unzip -o image-lib.zip`
## Initialiser la BDD MySQL
-Suivre les étapes décrites dans le fichier [init.md](/dbinit/init.md)
\ No newline at end of file
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: ecolyo-agent-client
namespace: ns-selfdata-d01-syn
spec:
replicas: 1
selector:
matchLabels:
app: ecolyo-agent-client
template:
metadata:
labels:
app: ecolyo-agent-client
spec:
containers:
- name: ecolyo-agent-client
image: registry.forge.grandlyon.com/web-et-numerique/llle_project/backoffice-client:master
imagePullPolicy: Always
ports:
- containerPort: 8080
protocol: TCP
nodeSelector:
node-role.kubernetes.io/worker: ''
imagePullSecrets:
- name: llle-project
\ No newline at end of file
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
kompose.cmd: kompose convert
kompose.version: 1.26.1 (a9d05d509)
creationTimestamp: null
labels:
io.kompose.service: ecolyo-agent-database
name: ecolyo-agent-database
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: ecolyo-agent-database
strategy:
type: Recreate
template:
metadata:
annotations:
kompose.cmd: kompose convert
kompose.version: 1.26.1 (a9d05d509)
creationTimestamp: null
labels:
io.kompose.network/ecolyo-agent-network: "true"
io.kompose.service: ecolyo-agent-database
spec:
containers:
- env:
- name: MYSQL_DATABASE
value: ecolyo-agent-database
- name: MYSQL_ROOT_PASSWORD
value: root_password
image: mysql:5
livenessProbe:
exec:
command:
- mysqladmin ping -h 127.0.0.1 -u root --password=$MYSQL_ROOT_PASSWORD
failureThreshold: 60
periodSeconds: 5
timeoutSeconds: 10
name: ecolyo-agent-database
ports:
- containerPort: 3306
resources: {}
restartPolicy: Always
status: {}
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
kompose.cmd: kompose convert
kompose.version: 1.26.1 (a9d05d509)
creationTimestamp: null
labels:
io.kompose.service: ecolyo-agent-meilisearch
name: ecolyo-agent-meilisearch
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: ecolyo-agent-meilisearch
strategy:
type: Recreate
template:
metadata:
annotations:
kompose.cmd: kompose convert
kompose.version: 1.26.1 (a9d05d509)
creationTimestamp: null
labels:
io.kompose.network/ecolyo-agent-network: "true"
io.kompose.service: ecolyo-agent-meilisearch
spec:
containers:
- env:
- name: MEILI_MASTER_KEY
valueFrom:
secretKeyRef:
name: meilisearch
key: master-key
image: getmeili/meilisearch:v0.28.1
livenessProbe:
exec:
command:
- curl
- -f
- http://0.0.0.0:7700
failureThreshold: 3
periodSeconds: 10
timeoutSeconds: 10
name: ecolyo-agent-meilisearch
ports:
- containerPort: 7700
resources: {}
volumeMounts:
- mountPath: /meili_data
name: pvc-1-ns-selfdata-d01-syn-claim
restartPolicy: Always
volumes:
- name: pvc-1-ns-selfdata-d01-syn-claim
persistentVolumeClaim:
claimName: pvc-1-ns-selfdata-d01-syn-claim
status: {}
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
kompose.cmd: kompose convert
kompose.version: 1.26.1 (a9d05d509)
creationTimestamp: null
labels:
io.kompose.service: ecolyo-agent-server
name: ecolyo-agent-server
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: ecolyo-agent-server
strategy:
type: Recreate
template:
metadata:
annotations:
kompose.cmd: kompose convert
kompose.version: 1.26.1 (a9d05d509)
creationTimestamp: null
labels:
io.kompose.network/ecolyo-agent-network: "true"
io.kompose.service: ecolyo-agent-server
spec:
volumes:
- name: pvc-2-ns-selfdata-d01-syn-claim
persistentVolumeClaim:
claimName: pvc-2-ns-selfdata-d01-syn-claim
containers:
- env:
- name: ADMIN_ROLE
- name: AUTH_URL
- name: CLIENT_ID
- name: CLIENT_SECRET
- name: HOSTNAME
- name: DEBUG_MODE
- name: HTTPS_PORT
- name: IMAGE_FOLDER
- name: MOCK_OAUTH2
- name: REDIRECT_URL
- name: TOKEN_URL
- name: USERINFO_URL
- name: MEILI_HOST
value: 'http://ecolyo-agent-meilisearch-service:7700'
- name: DATABASE_HOST
valueFrom:
secretKeyRef:
name: ecolyo-agent-database
key: host
- name: DATABASE_NAME
valueFrom:
secretKeyRef:
name: ecolyo-agent-database
key: name
- name: DATABASE_USER
value: root
- name: DATABASE_PASSWORD
valueFrom:
secretKeyRef:
name: ecolyo-agent-database
key: password
- name: SGE_API_TOKEN
valueFrom:
secretKeyRef:
name: sge-api
key: token
- name: MEILI_MASTER_KEY
valueFrom:
secretKeyRef:
name: meilisearch
key: master-key
image: registry.forge.grandlyon.com/web-et-numerique/llle_project/backoffice-server:master
imagePullPolicy: Always
volumeMounts:
- name: pvc-2-ns-selfdata-d01-syn-claim
mountPath: /app/mnt
name: ecolyo-agent-server
ports:
- containerPort: 1443
- containerPort: 8090
resources: {}
imagePullSecrets:
- name: llle-project
restartPolicy: Always
status: {}
kind: Secret
apiVersion: v1
metadata:
name: ecolyo-agent-database
namespace: ns-selfdata-d01-syn
stringData:
host: DATABASE_HOST
name: DATABASE_NAME
password: DATABASE_PASSWORD
type: Opaque
\ No newline at end of file
kind: Secret
apiVersion: v1
metadata:
name: meilisearch
namespace: ns-selfdata-d01-syn
stringData:
master-key: MASTER_KEY
type: Opaque
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment