Helm Chart 迭代创建 pods , Containers , ports , service

Helm Chart iteratively create pods , Containers , ports , service

我有 4 个微服务,它们都有不同的名称、不同的图像和不同的容器端口和服务端口,我将这段代码作为堆栈溢出的答案之一,现在这段代码正在做的是创建我的 4使用它的名称和图像进行部署,但我无法根据它们的端口和资源创建 4 个容器。 我的主要目标是创建一个主模板,我可以在其中输入少量值,它可以处理新微服务的新清单,而不是单独处理一堆清单。

deployment.yaml
{{ if .Values.componentTests }}
{{- range .Values.componentTests }}
apiVersion: apps/v1
kind: Deployment
metadata:
  name: {{ . }}
  labels:
    environment: {{ $.Values.environment }}
    app: {{ . }}
    aadpodidbinding: podid-{{ . }}
    chart: {{ $.Chart.Name }}-{{ $.Chart.Version | replace "+" "_" }}
    release: {{ $.Release.Name }}
    heritage: {{ $.Release.Service }}
spec:
  replicas: {{ $.Values.replicaCount }}
  selector:
    matchLabels:
      app: {{ . }}
  template:
    metadata:
      labels:
        app: {{ . }}
    spec:
      nodeSelector:
        "beta.kubernetes.io/os": linux
      containers:
        - name: {{ . }}
          image: mycr.azurecr.io/master/{{ . }}:{{ $.Values.image.tag }}
          imagePullPolicy: {{ $.Values.image.pullPolicy }}
          resources:
            {{- range $.Values.high.resources }}
---
{{- end }}
{{- end }}
{{ end }}
values.yaml

replicaCount: 1

image:
#   repository: nginx
  pullPolicy: IfNotPresent
#   # Overrides the image tag whose default is the chart appVersion.
  tag: "latest"
componentTests:
   - service01
   - service02
   - service03
   - service04
environment: QA
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
services:
   - service01
   - service02
   - service03
# serviceAccount:
#   # Specifies whether a service account should be created
#   create: true
#   # Annotations to add to the service account
#   annotations: {}
#   # The name of the service account to use.
#   # If not set and create is true, a name is generated using the fullname template
#   name: ""

podAnnotations: {}

podSecurityContext: {}
  # fsGroup: 2000

securityContext: {}
  # capabilities:
  #   drop:
  #   - ALL
  # readOnlyRootFilesystem: true
  # runAsNonRoot: true
  # runAsUser: 1000

# service:
#   type: ClusterIP
#   port: 80

# ingress:
#   enabled: false
#   annotations: {}
#     # kubernetes.io/ingress.class: nginx
#     # kubernetes.io/tls-acme: "true"
#   hosts:
#     - host: chart-example.local
#       paths: []
#   tls: []
#   #  - secretName: chart-example-tls
#   #    hosts:
#   #      - chart-example.local
high:
  resources: 
    requests:
      cpu: 350m
      memory: 800Mi
    limits:
      cpu: 400m
      memory: 850Mi
medium:
  resources: 
    requests:
      cpu: 200m
      memory: 650Mi
    limits:
      cpu: 250m
      memory: 700Mi
low:
  resources:
    requests:
      cpu: 100m
      memory: 500Mi
    limits:
      cpu: 150m
      memory: 550Mi

autoscaling:
  enabled: false
  minReplicas: 2
  maxReplicas: 4
  targetCPUUtilizationPercentage: 80
  # targetMemoryUtilizationPercentage: 80

tolerations: []
affinity: {}
output

MANIFEST:
---
# Source: test/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: service01
  labels:
    environment: QA
    app: service01
    aadpodidbinding: podid-service01
    chart: test-0.1.1
    release: api
    heritage: Helm
spec:
  replicas: 1
  selector:
    matchLabels:
      app: service01
  template:
    metadata:
      labels:
        app: service01
    spec:
      nodeSelector:
        "beta.kubernetes.io/os": linux
      containers:
        - name: service01
          image: mycr.azurecr.io/master/service01:latest
          imagePullPolicy: IfNotPresent
          resources:
---
# Source: test/templates/deployment.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: service02
  labels:
    environment: QA
    app: service02
    aadpodidbinding: podid-service02
    chart: test-0.1.1
    release: api
    heritage: Helm
spec:
  replicas: 1
  selector:
    matchLabels:
      app: service02
  template:
    metadata:
      labels:
        app: service02
    spec:
      nodeSelector:
        "beta.kubernetes.io/os": linux
      containers:
        - name: service02
          image: mycr.azurecr.io/master/service02:latest
          imagePullPolicy: IfNotPresent
          resources:
---
# Source: test/templates/deployment.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: service02-ui
  labels:
    environment: QA
    app: service02-ui
    aadpodidbinding: podid-service02-ui
    chart: test-0.1.1
    release: api
    heritage: Helm
spec:
  replicas: 1
  selector:
    matchLabels:
      app: service02-ui
  template:
    metadata:
      labels:
        app: service02-ui
    spec:
      nodeSelector:
        "beta.kubernetes.io/os": linux
      containers:
        - name: service02-ui
          image: mycr.azurecr.io/master/service02-ui:latest
          imagePullPolicy: IfNotPresent
          resources:
---
# Source: test/templates/deployment.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: service03
  labels:
    environment: QA
    app: service03
    aadpodidbinding: podid-service03
    chart: test-0.1.1
    release: api
    heritage: Helm
spec:
  replicas: 1
  selector:
    matchLabels:
      app: service03
  template:
    metadata:
      labels:
        app: service03
    spec:
      nodeSelector:
        "beta.kubernetes.io/os": linux
      containers:
        - name: service03
          image: mycr.azurecr.io/master/service03:latest
          imagePullPolicy: IfNotPresent
          resources:
---
# Source: test/templates/deployment.yaml
service01.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: service01
  labels:
    aadpodidbinding: podid-service01
spec:
  replicas: 1
  selector:
    matchLabels:
      app: service01
  template:
    metadata:
      labels:
        app: service01
        aadpodidbinding: podid-service01
      annotations:
        build: "2020102901"
    spec:
      nodeSelector:
        "beta.kubernetes.io/os": linux
      containers:
      - name: service01
        image: mycr.azurecr.io/master/service01:latest
        resources:
          requests:
            cpu: 250m
            memory: "700Mi"
          limits:
            memory: "700Mi"
        ports:
        - containerPort: 7474
        env:
        - name: KEY_VAULT_ID
          value: "key-vault"
        - name: AZURE_ACCOUNT_NAME
          value: "storage"
        readinessProbe:
          httpGet:
            path: /actuator/health
            port: 7474
            scheme: HTTP
            httpHeaders:
            - name: service-id
              value: root
            - name: request-id
              value: healthcheck
          initialDelaySeconds: 60
          periodSeconds: 30
        livenessProbe:
          httpGet:
            path: /actuator/health
            port: 7474
            scheme: HTTP
            httpHeaders:
            - name: service-id
              value: root
            - name: request-id
              value: healthcheck
          initialDelaySeconds: 60
          periodSeconds: 30
---
apiVersion: v1
kind: Service
metadata:
  name: service01
spec:
  ports:
  - port: 7474
    name: main
#  - port: 9999
#    name: health
  selector:
    app: service01
---
apiVersion: autoscaling/v2beta2
kind: HorizontalPodAutoscaler
metadata:
  name: service01
spec:
  scaleTargetRef:
    apiVersion: apps/v1
    kind: Deployment
    name: service01
  minReplicas: 1
  maxReplicas: 4
  metrics:
  - type: Resource
    resource:
      name: cpu
      target:
        type: Utilization
        averageUtilization: 50

我不知道 Helm 图表是在什么情况下创建的,但据我所知,它用于集成测试或类似的东西。这当然不是您想要用于您的用例的东西。您最好使用 Helm Chart,它只为一个服务生成清单,然后您就可以为所有服务重用该 Chart。这意味着您将使用不同的值执行多个 helm install,而不是创建所有服务的一个 helm install。使用这样的大图表,每次添加新服务时都需要更新图表。

你将拥有:

helm install -f service01-values.yaml ./mychart
helm install -f service02-values.yaml ./mychart
helm install -f service03-values.yaml ./mychart
helm install -f service04-values.yaml ./mychart

而不是:

helm install -f values.yaml ./mychart

要做到这一点,您需要稍微更改图表并删除循环 {{- range .Values.componentTests }}。了解如何构建图表,比您想象的更简单:Create Your First Helm Chart