In order to mitigate against the brute force attacks against Gitlab accounts, we are moving to all edu-ID Logins. We would like to remind you to link your account with your edu-id. Login will be possible only by edu-ID after November 30, 2021. Here you can find the instructions for linking your account.

If you don't have a SWITCH edu-ID, you can create one with this guide here

kind regards

This Server has been upgraded to GitLab release 14.2.6

Verified Commit 49ccb5da authored by Sebastian Schüpbach's avatar Sebastian Schüpbach
Browse files

move to helm

parent 4c0a1d22
Pipeline #27725 passed with stages
in 5 minutes and 6 seconds
......@@ -13,4 +13,5 @@ include:
- project: 'memoriav/memobase-2020/utilities/ci-templates'
ref: modular
file: 'docker/default.yml'
- project: 'memoriav/memobase-2020/utilities/ci-templates'
file: 'helm-chart/helm-chart.yml'
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/
\ No newline at end of file
apiVersion: v2
name: dd-marker-prod
description: A helm chart for the record deletion marker
type: application
version: 0.0.0
appVersion: 0.0.0
maintainers:
- name: Sebastian Schüpbach
email: sebastian.schuepbach@unibas.ch
tag: "latest"
k8sEnvironment: prod
k8sRequestsCpu: "0.2"
k8sRequestsMemory: "128Mi"
k8sLimitsCpu: "0.5"
k8sLimitsMemory: "512Mi"
jobArgs: [ # Customise to match your needs
"--record-set-filter", "<id>",
"--record-filter", "<id>",
"--institution-filter", "<id>",
"--session-filter", "<id>",
"--created-after", "<datetime>",
"--created-before", "<datetime>"
"<your-session-id>"
]
kafkaConfigs: prod-kafka-bootstrap-servers
inputTopicName: mb-di-transactions-records-prod
outputTopicName: mb-dd-deletes-prod
clientId: dd-marker-prod
pollTimeout: "20000"
tag: "latest"
k8sEnvironment: stage
k8sRequestsCpu: "0.2"
k8sRequestsMemory: "128Mi"
k8sLimitsCpu: "0.5"
k8sLimitsMemory: "512Mi"
jobArgs: [ # Customise to match your needs
"--record-set-filter", "<id>",
"--record-filter", "<id>",
"--institution-filter", "<id>",
"--session-filter", "<id>",
"--created-after", "<datetime>",
"--created-before", "<datetime>"
"<your-session-id>"
]
kafkaConfigs: stage-kafka-bootstrap-servers
inputTopicName: mb-di-transactions-records-stage
outputTopicName: mb-dd-deletes-stage
clientId: dd-marker-stage
pollTimeout: "20000"
tag: "latest"
k8sEnvironment: test
k8sRequestsCpu: "0.2"
k8sRequestsMemory: "128Mi"
k8sLimitsCpu: "0.5"
k8sLimitsMemory: "512Mi"
jobArgs: [ # Customise to match your needs
"--record-set-filter", "<id>",
"--record-filter", "<id>",
"--institution-filter", "<id>",
"--session-filter", "<id>",
"--created-after", "<datetime>",
"--created-before", "<datetime>"
"<your-session-id>"
]
kafkaConfigs: test-kafka-bootstrap-servers
inputTopicName: mb-di-transactions-records-prod
outputTopicName: mb-dd-deletes-prod
clientId: dd-marker-test
pollTimeout: "20000"
apiVersion: v1
kind: ConfigMap
metadata:
name: "{{ .Values.k8sGroupId }}-{{ .Values.k8sName }}-{{ .Values.k8sEnvironment}}-config"
namespace: "{{ .Values.k8sNamespace }}"
data:
CLIENT_ID: "{{ .Values.clientId}}"
TOPIC_IN: "{{ .Values.inputTopicName }}"
TOPIC_OUT: "{{ .Values.outputTopicName }}"
POLL_TIMEOUT: "{{ .Values.pollTimeout }}"
apiVersion: batch/v1
kind: Job
metadata:
name: "{{ .Values.k8sGroupId }}-{{ .Values.k8sName }}-{{ .Values.k8sEnvironment}}"
namespace: "{{ .Values.k8sNamespace }}"
labels:
app: "{{ .Values.k8sGroupId }}-{{ .Values.k8sName }}-{{ .Values.k8sEnvironment}}-app"
spec:
template:
metadata:
labels:
app: "{{ .Values.k8sGroupId }}-{{ .Values.k8sName }}-{{ .Values.k8sEnvironment}}-app"
group: "{{ .Values.k8sGroupName }}"
environment: "{{ .Values.k8sEnvironment }}"
spec:
containers:
- name: "{{ .Values.k8sGroupId }}-{{ .Values.k8sName }}-{{ .Values.k8sEnvironment}}-container"
image: "{{ .Values.registry }}/{{ .Values.image }}:{{ .Values.tag }}"
imagePullPolicy: Always
args: {{ .Values.jobArgs }}
resources:
requests:
cpu: "{{ .Values.k8sRequestsCpu }}"
memory: "{{ .Values.k8sRequestsMemory }}"
limits:
cpu: "{{ .Values.k8sLimitsCpu }}"
memory: "{{ .Values.k8sLimitsMemory }}"
envFrom:
- configMapRef:
name: "{{ .Values.k8sGroupId }}-{{ .Values.k8sName }}-{{ .Values.k8sEnvironment}}-config"
- configMapRef:
name: "{{ .Values.kafkaConfigs }}"
restartPolicy: Never
backoffLimit: 1
registry: "cr.gitlab.switch.ch"
image: "memoriav/memobase-2020/services/deletion-components/import-process-deleter"
tag: placeholder
k8sName: marker
k8sNamespace: memobase
k8sGroupId: dd
k8sGroupName: documents-delete
k8sEnvironment: placeholder
k8sRequestsCpu: placeholder
k8sRequestsMemory: placeholder
k8sLimitsCpu: placeholder
k8sLimitsMemory: placeholder
jobArgs: placeholder # Filled dynamically by Import API
kafkaConfigs: placeholder
inputTopicName: placeholder
outputTopicName: placeholder
clientId: placeholder
pollTimeout: placeholder
apiVersion: batch/v1
kind: Job
metadata:
name: dd-marker-prod
namespace: memobase
labels:
app: dd-marker-prod-app
spec:
template:
metadata:
labels:
app: dd-marker-prod-app
group: documents-delete
environment: prod
spec:
containers:
- name: dd-marker-prod-container
args: [ # Customise to match your needs
"--record-set-filter", "<id>",
"--record-filter", "<id>",
"--institution-filter", "<id>",
"--session-filter", "<id>",
"--created-after", "<datetime>",
"--created-before", "<datetime>"
"<your-session-id>"
]
image: cr.gitlab.switch.ch/memoriav/memobase-2020/services/deletion-components/import-process-delete:latest
imagePullPolicy: Always
envFrom:
- configMapRef:
- name: "prod-kafka-bootstrap-servers"
env:
- name: CLIENT_ID
value: dd-marker-prod
- name: TOPIC_IN
value: mb-di-transactions-records-prod
- name: TOPIC_OUT
value: mb-dd-deletes-prod
- name: POLL_TIMEOUT
value: "20000"
restartPolicy: Never
backoffLimit: 1
apiVersion: batch/v1
kind: Job
metadata:
name: dd-marker-stage
namespace: memobase
labels:
app: dd-marker-stage-app
spec:
template:
metadata:
labels:
app: dd-marker-stage-app
group: documents-delete
environment: stage
spec:
containers:
- name: dd-marker-stage-container
args: [ # Customise to match your needs
"--record-set-filter", "<id>",
"--record-filter", "<id>",
"--institution-filter", "<id>",
"--session-filter", "<id>",
"--created-after", "<datetime>",
"--created-before", "<datetime>"
"<your-session-id>"
]
image: cr.gitlab.switch.ch/memoriav/memobase-2020/services/deletion-components/import-process-delete:latest
imagePullPolicy: Always
envFrom:
- configMapRef:
- name: "prod-kafka-bootstrap-servers"
env:
- name: CLIENT_ID
value: dd-marker-stage
- name: TOPIC_IN
value: mb-di-transactions-records-stage
- name: TOPIC_OUT
value: mb-dd-deletes-stage
- name: POLL_TIMEOUT
value: "20000"
restartPolicy: Never
backoffLimit: 1
apiVersion: batch/v1
kind: Job
metadata:
name: dd-marker-test
namespace: memobase
labels:
app: dd-marker-test-app
spec:
template:
metadata:
labels:
app: dd-marker-test-app
group: documents-delete
environment: test
spec:
containers:
- name: dd-marker-test-container
args: [ # Customise to match your needs
"--record-set-filter", "<id>",
"--record-filter", "<id>",
"--institution-filter", "<id>",
"--session-filter", "<id>",
"--created-after", "<datetime>",
"--created-before", "<datetime>"
"<your-session-id>"
]
image: cr.gitlab.switch.ch/memoriav/memobase-2020/services/deletion-components/import-process-delete:latest
imagePullPolicy: Always
envFrom:
- configMapRef:
- name: "test-kafka-bootstrap-servers"
env:
- name: CLIENT_ID
value: dd-marker-test
- name: TOPIC_IN
value: mb-di-transactions-records-prod
- name: TOPIC_OUT
value: mb-dd-deletes-prod
- name: POLL_TIMEOUT
value: "20000"
restartPolicy: Never
backoffLimit: 1
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment