feat(kafka, pinot, star-rocks): update configurations and resource limits across multiple components

- Updated Kafka configuration to specify Kubernetes version and API versions.
- Enabled Vertical Pod Autoscaler (VPA) for Pinot and adjusted resource limits for CPU and memory.
- Removed obsolete certificate configuration for Pinot.
- Enhanced StarRocks values.yaml with comprehensive configurations for deployment, including service specifications and resource requests/limits.
- Increased timeout settings in production values for Freeleaps to improve service resilience.

Signed-off-by: zhenyus <zhenyus@mathmast.com>
This commit is contained in:
zhenyus 2025-06-26 23:04:03 +08:00
parent a3b3b3f12f
commit 9c07783780
12 changed files with 3544 additions and 673 deletions

View File

@ -0,0 +1,897 @@
# Copyright Broadcom, Inc. All Rights Reserved.
# SPDX-License-Identifier: APACHE-2.0
## @section Global parameters
## Global Docker image parameters
## Please, note that this will override the image parameters, including dependencies, configured to use the global value
## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass
##
## @param global.imageRegistry Global Docker image registry
## @param global.imagePullSecrets Global Docker registry secret names as an array
## @param global.defaultStorageClass Global default StorageClass for Persistent Volume(s)
## @param global.storageClass DEPRECATED: use global.defaultStorageClass instead
##
global:
imageRegistry: ""
## E.g.
## imagePullSecrets:
## - myRegistryKeySecretName
##
imagePullSecrets: []
defaultStorageClass: ""
storageClass: "azure-disk-std-lrs"
## Security parameters
##
security:
## @param global.security.allowInsecureImages Allows skipping image verification
allowInsecureImages: false
## Compatibility adaptations for Kubernetes platforms
##
compatibility:
## Compatibility adaptations for Openshift
##
openshift:
## @param global.compatibility.openshift.adaptSecurityContext Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation)
##
adaptSecurityContext: auto
## @section Common parameters
##
## @param nameOverride String to partially override common.names.fullname
##
nameOverride: ""
## @param fullnameOverride String to fully override common.names.fullname
##
fullnameOverride: ""
## @param kubeVersion Force target Kubernetes version (using Helm capabilities if not set)
##
kubeVersion: ""
## @param commonLabels Labels to add to all deployed objects (sub-charts are not considered)
##
commonLabels: {}
## @param commonAnnotations Annotations to add to all deployed objects
##
commonAnnotations: {}
## @param clusterDomain Default Kubernetes cluster domain
##
clusterDomain: freeleaps.cluster
## @param extraDeploy Array of extra objects to deploy with the release
##
extraDeploy: []
## Enable diagnostic mode in the deployment
##
diagnosticMode:
## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden)
##
enabled: false
## @param diagnosticMode.command Command to override all containers in the deployment
##
command:
- sleep
## @param diagnosticMode.args Args to override all containers in the deployment
##
args:
- infinity
## @section Apache Flink parameters
##
## Bitnami Apache Flink image
## ref: https://hub.docker.com/r/bitnami/flink/tags/
## @param image.registry [default: REGISTRY_NAME] Apache Flink image registry
## @param image.repository [default: REPOSITORY_NAME/flink] Apache Flink image repository
## @skip image.tag Apache Flink image tag (immutable tags are recommended)
## @param image.digest Apache Flink image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
## @param image.pullPolicy image pull policy
## @param image.pullSecrets Apache Flink image pull secrets
## @param image.debug Enable image debug mode
##
image:
registry: docker.io
repository: bitnami/flink
tag: 2.0.0-debian-12-r7
digest: ""
## Specify a imagePullPolicy
## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images
##
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
## e.g:
## pullSecrets:
## - myRegistryKeySecretName
##
pullSecrets: []
## Enable debug mode
##
debug: false
## @section Jobmanager deployment parameters
##
jobmanager:
## @param jobmanager.command Command for running the container (set to default if not set). Use array form
##
command: []
## @param jobmanager.args Args for running the container (set to default if not set). Use array form
##
args: []
## @param jobmanager.lifecycleHooks [object] Override default etcd container hooks
##
lifecycleHooks: {}
## @param jobmanager.automountServiceAccountToken Mount Service Account token in pod
##
automountServiceAccountToken: false
## @param jobmanager.hostAliases Set pod host aliases
## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
##
hostAliases: []
## @param jobmanager.extraEnvVars Extra environment variables to be set on flink container
## For example:
## - name: FOO
## value: BAR
##
extraEnvVars: []
## @param jobmanager.extraEnvVarsCM Name of existing ConfigMap containing extra env vars
##
extraEnvVarsCM: ""
## @param jobmanager.extraEnvVarsSecret Name of existing Secret containing extra env vars
##
extraEnvVarsSecret: ""
## @param jobmanager.replicaCount Number of Apache Flink Jobmanager replicas
##
replicaCount: 1
## Configure extra options for container's liveness, readiness and startup probes
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes
## @param jobmanager.livenessProbe.enabled Enable livenessProbe on Jobmanager nodes
## @param jobmanager.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
## @param jobmanager.livenessProbe.periodSeconds Period seconds for livenessProbe
## @param jobmanager.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
## @param jobmanager.livenessProbe.failureThreshold Failure threshold for livenessProbe
## @param jobmanager.livenessProbe.successThreshold Success threshold for livenessProbe
##
livenessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 20
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
## @param jobmanager.startupProbe.enabled Enable startupProbe on Jobmanager containers
## @param jobmanager.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
## @param jobmanager.startupProbe.periodSeconds Period seconds for startupProbe
## @param jobmanager.startupProbe.timeoutSeconds Timeout seconds for startupProbe
## @param jobmanager.startupProbe.failureThreshold Failure threshold for startupProbe
## @param jobmanager.startupProbe.successThreshold Success threshold for startupProbe
##
startupProbe:
enabled: true
initialDelaySeconds: 20
periodSeconds: 10
timeoutSeconds: 1
failureThreshold: 15
successThreshold: 1
## @param jobmanager.readinessProbe.enabled Enable readinessProbe
## @param jobmanager.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
## @param jobmanager.readinessProbe.periodSeconds Period seconds for readinessProbe
## @param jobmanager.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
## @param jobmanager.readinessProbe.failureThreshold Failure threshold for readinessProbe
## @param jobmanager.readinessProbe.successThreshold Success threshold for readinessProbe
##
readinessProbe:
enabled: true
initialDelaySeconds: 20
periodSeconds: 10
timeoutSeconds: 1
failureThreshold: 15
successThreshold: 1
## @param jobmanager.customLivenessProbe Custom livenessProbe that overrides the default one
##
customLivenessProbe: {}
## @param jobmanager.customStartupProbe [object] Override default startup probe
##
customStartupProbe: {}
## @param jobmanager.customReadinessProbe [object] Override default readiness probe
##
customReadinessProbe: {}
## Apache Flink pods' resource requests and limits
## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
## Minimum memory for development is 4GB and 2 CPU cores
## Minimum memory for production is 8GB and 4 CPU cores
## ref: http://docs.datastax.com/en/archived/flink/2.0/flink/architecture/architecturePlanningHardware_c.html
##
## We usually recommend not to specify default resources and to leave this as a conscious
## choice for the user. This also increases chances charts run on environments with little
## resources, such as Minikube. If you do want to specify resources, uncomment the following
## lines, adjust them as necessary, and remove the curly braces after 'resources:'.
## @param jobmanager.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if jobmanager.resources is set (jobmanager.resources is recommended for production).
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
##
resourcesPreset: "small"
## @param jobmanager.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
## Example:
## resources:
## requests:
## cpu: 2
## memory: 512Mi
## limits:
## cpu: 3
## memory: 1024Mi
##
resources:
requests:
cpu: 200Mi
memory: 1Gi
limits:
cpu: 500Mi
memory: 2Gi
## @param jobmanager.extraVolumeMounts Optionally specify extra list of additional volumeMounts for flink container
##
extraVolumeMounts: []
## Container ports to expose
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
containerPorts:
## @param jobmanager.containerPorts.rpc Port for RPC
##
rpc: 6123
## @param jobmanager.containerPorts.http Port for http UI
##
http: 8081
## @param jobmanager.containerPorts.blob Port for blob server
##
blob: 6124
## Apache Flink jobmanager.service parameters
##
service:
## @param jobmanager.service.type Apache Flink service type
##
type: ClusterIP
## Ports to expose
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
ports:
## @param jobmanager.service.ports.rpc Port for RPC
##
rpc: 6123
## @param jobmanager.service.ports.http Port for http UI
##
http: 8081
## @param jobmanager.service.ports.blob Port for blob server
## Due the Apache Flink specificities this port should match the jobmanager.containerPorts.blob port. The taskmanager should be
## able to communicate with the jobmanager through the port jobmanager indicates to the taskmanager, being the jobmanager not aware of the service port.
blob: 6124
## Node ports to expose
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
nodePorts:
## @param jobmanager.service.nodePorts.rpc Node port for RPC
##
rpc: ""
## @param jobmanager.service.nodePorts.http Node port for http UI
##
http: ""
## @param jobmanager.service.nodePorts.blob Port for blob server
##
blob: ""
## @param jobmanager.service.extraPorts Extra ports to expose in the service (normally used with the `sidecar` value)
##
extraPorts: []
## @param jobmanager.service.loadBalancerIP LoadBalancerIP if service type is `LoadBalancer`
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
loadBalancerIP: ""
## @param jobmanager.service.loadBalancerSourceRanges Service Load Balancer sources
## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
## e.g:
## loadBalancerSourceRanges:
## - 10.10.10.0/24
##
loadBalancerSourceRanges: []
## @param jobmanager.service.clusterIP Service Cluster IP
## e.g.:
## clusterIP: None
##
clusterIP: ""
## @param jobmanager.service.externalTrafficPolicy Service external traffic policy
## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
##
externalTrafficPolicy: Cluster
## @param jobmanager.service.annotations Provide any additional annotations which may be required.
## This can be used to set the LoadBalancer service type to internal only.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
annotations: {}
## @param jobmanager.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
## If "ClientIP", consecutive client requests will be directed to the same Pod
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
##
sessionAffinity: None
## @param jobmanager.service.sessionAffinityConfig Additional settings for the sessionAffinity
## sessionAffinityConfig:
## clientIP:
## timeoutSeconds: 300
##
sessionAffinityConfig: {}
## Network Policies
## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
##
networkPolicy:
## @param jobmanager.networkPolicy.enabled Specifies whether a NetworkPolicy should be created
##
enabled: true
## @param jobmanager.networkPolicy.allowExternal Don't require server label for connections
## The Policy model to apply. When set to false, only pods with the correct
## server label will have network access to the ports server is listening
## on. When true, server will accept connections from any source
## (with the correct destination port).
##
allowExternal: true
## @param jobmanager.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations.
##
allowExternalEgress: true
## @param jobmanager.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy
## e.g:
## extraIngress:
## - ports:
## - port: 1234
## from:
## - podSelector:
## - matchLabels:
## - role: frontend
## - podSelector:
## - matchExpressions:
## - key: role
## operator: In
## values:
## - frontend
extraIngress: []
## @param jobmanager.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy
## e.g:
## extraEgress:
## - ports:
## - port: 1234
## to:
## - podSelector:
## - matchLabels:
## - role: frontend
## - podSelector:
## - matchExpressions:
## - key: role
## operator: In
## values:
## - frontend
##
extraEgress: []
## @param jobmanager.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces
## @param jobmanager.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces
##
ingressNSMatchLabels: {}
ingressNSPodMatchLabels: {}
## Apache Flink Jobmanager serviceAccount parameters
##
serviceAccount:
## @param jobmanager.serviceAccount.create Enables ServiceAccount
##
create: true
## @param jobmanager.serviceAccount.name ServiceAccount name
##
name: ""
## @param jobmanager.serviceAccount.annotations Annotations to add to all deployed objects
##
annotations: {}
## @param jobmanager.serviceAccount.automountServiceAccountToken Automount API credentials for a service account.
##
automountServiceAccountToken: false
## Pod security context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
## @param jobmanager.podSecurityContext.enabled Enabled Apache Flink pods' Security Context
## @param jobmanager.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy
## @param jobmanager.podSecurityContext.sysctls Set kernel settings using the sysctl interface
## @param jobmanager.podSecurityContext.supplementalGroups Set filesystem extra groups
## @param jobmanager.podSecurityContext.fsGroup Set Apache Flink pod's Security Context fsGroup
##
podSecurityContext:
enabled: true
fsGroupChangePolicy: Always
sysctls: []
supplementalGroups: []
fsGroup: 1001
## Configure Container Security Context (only main container)
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
## @param jobmanager.containerSecurityContext.enabled Enabled Apache Flink containers' Security Context
## @param jobmanager.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
## @param jobmanager.containerSecurityContext.runAsUser Set Apache Flink container's Security Context runAsUser
## @param jobmanager.containerSecurityContext.runAsGroup Set Apache Flink container's Security Context runAsGroup
## @param jobmanager.containerSecurityContext.runAsNonRoot Force the container to be run as non root
## @param jobmanager.containerSecurityContext.allowPrivilegeEscalation Allows privilege escalation
## @param jobmanager.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem
## @param jobmanager.containerSecurityContext.privileged Set primary container's Security Context privileged
## @param jobmanager.containerSecurityContext.capabilities.drop List of capabilities to be dropped
## @param jobmanager.containerSecurityContext.seccompProfile.type Rules specifying actions to take based on the requested syscall
##
containerSecurityContext:
enabled: true
seLinuxOptions: {}
runAsUser: 1001
runAsGroup: 1001
runAsNonRoot: true
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
privileged: false
capabilities:
drop: ["ALL"]
seccompProfile:
type: "RuntimeDefault"
## @param jobmanager.podAnnotations Additional pod annotations
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
##
podAnnotations: {}
## @param jobmanager.podLabels Additional pod labels
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
##
podLabels: {}
## @param jobmanager.podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
##
podAffinityPreset: ""
## @param jobmanager.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
##
podAntiAffinityPreset: soft
## Node affinity preset
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
##
nodeAffinityPreset:
## @param jobmanager.nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
##
type: ""
## @param jobmanager.nodeAffinityPreset.key Node label key to match. Ignored if `affinity` is set
##
key: ""
## @param jobmanager.nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set
## E.g.
## values:
## - e2e-az1
## - e2e-az2
##
values: []
## @param jobmanager.priorityClassName Server priorityClassName
## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
##
priorityClassName: ""
## @param jobmanager.affinity Affinity for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
## NOTE: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set
##
affinity: {}
## @param jobmanager.nodeSelector Node labels for pod assignment
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
##
nodeSelector: {}
## @param jobmanager.tolerations Tolerations for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## @param jobmanager.topologySpreadConstraints Topology Spread Constraints for pod assignment
## https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
## The value is evaluated as a template
##
topologySpreadConstraints: []
## @param jobmanager.schedulerName Alternative scheduler
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
schedulerName: ""
## @param jobmanager.updateStrategy.type Apache Flink jobmanager deployment strategy type
## @param jobmanager.updateStrategy.rollingUpdate [object,nullable] Apache Flink jobmanager deployment rolling update configuration parameters
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
##
updateStrategy:
type: RollingUpdate
rollingUpdate: null
## @param jobmanager.extraVolumes Optionally specify extra list of additional volumes for flink container
##
extraVolumes: []
## @param jobmanager.initContainers Add additional init containers to the flink pods
##
initContainers: []
## @param jobmanager.sidecars Add additional sidecar containers to the flink pods
##
sidecars: []
## @param jobmanager.pdb.create Enable/disable a Pod Disruption Budget creation
## @param jobmanager.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled
## @param jobmanager.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable.Defaults to `1` if both `secondary.pdb.minAvailable` and `secondary.pdb.maxUnavailable` are empty.
##
pdb:
create: true
minAvailable: ""
maxUnavailable: ""
## @section TaskManager deployment parameters
##
taskmanager:
## @param taskmanager.command Command for running the container (set to default if not set). Use array form
##
command: []
## @param taskmanager.args Args for running the container (set to default if not set). Use array form
##
args: []
## @param taskmanager.lifecycleHooks [object] Override default etcd container hooks
##
lifecycleHooks: {}
## @param taskmanager.automountServiceAccountToken Mount Service Account token in pod
##
automountServiceAccountToken: false
## @param taskmanager.hostAliases Set pod host aliases
## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
##
hostAliases: []
## @param taskmanager.extraEnvVars Extra environment variables to be set on flink container
## For example:
## - name: FOO
## value: BAR
##
extraEnvVars: []
## @param taskmanager.extraEnvVarsCM Name of existing ConfigMap containing extra env vars
##
extraEnvVarsCM: ""
## @param taskmanager.extraEnvVarsSecret Name of existing Secret containing extra env vars
##
extraEnvVarsSecret: ""
## @param taskmanager.replicaCount Number of Apache Flink replicas
##
replicaCount: 1
## Configure extra options for container's liveness, readiness and startup probes
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes
## @param taskmanager.livenessProbe.enabled Enable livenessProbe on taskmanager nodes
## @param taskmanager.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
## @param taskmanager.livenessProbe.periodSeconds Period seconds for livenessProbe
## @param taskmanager.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
## @param taskmanager.livenessProbe.failureThreshold Failure threshold for livenessProbe
## @param taskmanager.livenessProbe.successThreshold Success threshold for livenessProbe
##
livenessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 20
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
## @param taskmanager.startupProbe.enabled Enable startupProbe on taskmanager containers
## @param taskmanager.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
## @param taskmanager.startupProbe.periodSeconds Period seconds for startupProbe
## @param taskmanager.startupProbe.timeoutSeconds Timeout seconds for startupProbe
## @param taskmanager.startupProbe.failureThreshold Failure threshold for startupProbe
## @param taskmanager.startupProbe.successThreshold Success threshold for startupProbe
##
startupProbe:
enabled: true
initialDelaySeconds: 20
periodSeconds: 10
timeoutSeconds: 1
failureThreshold: 15
successThreshold: 1
## @param taskmanager.readinessProbe.enabled Enable readinessProbe
## @param taskmanager.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
## @param taskmanager.readinessProbe.periodSeconds Period seconds for readinessProbe
## @param taskmanager.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
## @param taskmanager.readinessProbe.failureThreshold Failure threshold for readinessProbe
## @param taskmanager.readinessProbe.successThreshold Success threshold for readinessProbe
##
readinessProbe:
enabled: true
initialDelaySeconds: 20
periodSeconds: 10
timeoutSeconds: 1
failureThreshold: 15
successThreshold: 1
## @param taskmanager.customLivenessProbe Custom livenessProbe that overrides the default one
##
customLivenessProbe: {}
## @param taskmanager.customStartupProbe [object] Override default startup probe
##
customStartupProbe: {}
## @param taskmanager.customReadinessProbe [object] Override default readiness probe
##
customReadinessProbe: {}
## Apache Flink pods' resource requests and limits
## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
## Minimum memory for development is 4GB and 2 CPU cores
## Minimum memory for production is 8GB and 4 CPU cores
## ref: http://docs.datastax.com/en/archived/flink/2.0/flink/architecture/architecturePlanningHardware_c.html
##
## We usually recommend not to specify default resources and to leave this as a conscious
## choice for the user. This also increases chances charts run on environments with little
## resources, such as Minikube. If you do want to specify resources, uncomment the following
## lines, adjust them as necessary, and remove the curly braces after 'resources:'.
## @param taskmanager.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if taskmanager.resources is set (taskmanager.resources is recommended for production).
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
##
resourcesPreset: "small"
## @param taskmanager.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
## Example:
## resources:
## requests:
## cpu: 2
## memory: 512Mi
## limits:
## cpu: 3
## memory: 1024Mi
##
resources:
requests:
cpu: 200Mi
memory: 1Gi
limits:
cpu: 500Mi
memory: 2Gi
## @param taskmanager.extraVolumeMounts Optionally specify extra list of additional volumeMounts for flink container
##
extraVolumeMounts: []
## Container ports to expose
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
## @param taskmanager.containerPorts.data data exchange port
## @param taskmanager.containerPorts.rpc Port for RPC
## @param taskmanager.containerPorts.internalMetrics Port for internal metrics query service
##
containerPorts:
data: 6121
rpc: 6122
internalMetrics: 6126
## Apache Flink taskmanager.service parameters
##
service:
## @param taskmanager.service.type Apache Flink service type
##
type: ClusterIP
## Ports to expose
## @param taskmanager.service.ports.data data exchange port
## @param taskmanager.service.ports.rpc Port for RPC
## @param taskmanager.service.ports.internalMetrics Port for internal metrics query service
##
ports:
data: 6121
rpc: 6122
internalMetrics: 6126
## Node ports to expose
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
## @param taskmanager.service.nodePorts.data data exchange port
## @param taskmanager.service.nodePorts.rpc Port for RPC
## @param taskmanager.service.nodePorts.internalMetrics Port for internal metrics query service
##
nodePorts:
data: ""
rpc: ""
internalMetrics: ""
## @param taskmanager.service.extraPorts Extra ports to expose in the service (normally used with the `sidecar` value)
##
extraPorts: []
## @param taskmanager.service.loadBalancerIP LoadBalancerIP if service type is `LoadBalancer`
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
loadBalancerIP: ""
## @param taskmanager.service.loadBalancerSourceRanges Service Load Balancer sources
## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
## e.g:
## loadBalancerSourceRanges:
## - 10.10.10.0/24
##
loadBalancerSourceRanges: []
## @param taskmanager.service.clusterIP Service Cluster IP
## e.g.:
## clusterIP: None
##
clusterIP: ""
## @param taskmanager.service.externalTrafficPolicy Service external traffic policy
## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
##
externalTrafficPolicy: Cluster
## @param taskmanager.service.annotations Provide any additional annotations which may be required.
## This can be used to set the LoadBalancer service type to internal only.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
annotations: {}
## @param taskmanager.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
## If "ClientIP", consecutive client requests will be directed to the same Pod
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
##
sessionAffinity: None
## @param taskmanager.service.sessionAffinityConfig Additional settings for the sessionAffinity
## sessionAffinityConfig:
## clientIP:
## timeoutSeconds: 300
##
sessionAffinityConfig: {}
## Network Policies
## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
##
networkPolicy:
## @param taskmanager.networkPolicy.enabled Specifies whether a NetworkPolicy should be created
##
enabled: true
## @param taskmanager.networkPolicy.allowExternal Don't require server label for connections
## The Policy model to apply. When set to false, only pods with the correct
## server label will have network access to the ports server is listening
## on. When true, server will accept connections from any source
## (with the correct destination port).
##
allowExternal: true
## @param taskmanager.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations.
##
allowExternalEgress: true
## @param taskmanager.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy
## e.g:
## extraIngress:
## - ports:
## - port: 1234
## from:
## - podSelector:
## - matchLabels:
## - role: frontend
## - podSelector:
## - matchExpressions:
## - key: role
## operator: In
## values:
## - frontend
extraIngress: []
## @param taskmanager.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy
## e.g:
## extraEgress:
## - ports:
## - port: 1234
## to:
## - podSelector:
## - matchLabels:
## - role: frontend
## - podSelector:
## - matchExpressions:
## - key: role
## operator: In
## values:
## - frontend
##
extraEgress: []
## @param taskmanager.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces
## @param taskmanager.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces
##
ingressNSMatchLabels: {}
ingressNSPodMatchLabels: {}
## Apache Flink taskmanager serviceAccount parameters
##
serviceAccount:
## @param taskmanager.serviceAccount.create Enables ServiceAccount
##
create: true
## @param taskmanager.serviceAccount.name ServiceAccount name
##
name: ""
## @param taskmanager.serviceAccount.annotations Annotations to add to all deployed objects
##
annotations: {}
## @param taskmanager.serviceAccount.automountServiceAccountToken Automount API credentials for a service account.
##
automountServiceAccountToken: false
## Pod security context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
## @param taskmanager.podSecurityContext.enabled Enabled Apache Flink pods' Security Context
## @param taskmanager.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy
## @param taskmanager.podSecurityContext.sysctls Set kernel settings using the sysctl interface
## @param taskmanager.podSecurityContext.supplementalGroups Set filesystem extra groups
## @param taskmanager.podSecurityContext.fsGroup Set Apache Flink pod's Security Context fsGroup
##
podSecurityContext:
enabled: true
fsGroupChangePolicy: Always
sysctls: []
supplementalGroups: []
fsGroup: 1001
## Configure Container Security Context (only main container)
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
## @param taskmanager.containerSecurityContext.enabled Enabled Apache Flink containers' Security Context
## @param taskmanager.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
## @param taskmanager.containerSecurityContext.runAsUser Set Apache Flink container's Security Context runAsUser
## @param taskmanager.containerSecurityContext.runAsGroup Set Apache Flink container's Security Context runAsGroup
## @param taskmanager.containerSecurityContext.runAsNonRoot Force the container to be run as non root
## @param taskmanager.containerSecurityContext.privileged Set primary container's Security Context privileged
## @param taskmanager.containerSecurityContext.allowPrivilegeEscalation Allows privilege escalation
## @param taskmanager.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem
## @param taskmanager.containerSecurityContext.capabilities.drop List of capabilities to be dropped
## @param taskmanager.containerSecurityContext.seccompProfile.type Rules specifying actions to take based on the requested syscall
##
containerSecurityContext:
enabled: true
seLinuxOptions: {}
runAsUser: 1001
runAsGroup: 1001
runAsNonRoot: true
privileged: false
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
capabilities:
drop: ["ALL"]
seccompProfile:
type: "RuntimeDefault"
## @param taskmanager.podAnnotations Additional pod annotations
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
##
podAnnotations: {}
## @param taskmanager.podLabels Additional pod labels
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
##
podLabels: {}
## @param taskmanager.podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
##
podAffinityPreset: ""
## @param taskmanager.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
##
podAntiAffinityPreset: soft
## Node affinity preset
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
##
nodeAffinityPreset:
## @param taskmanager.nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
##
type: ""
## @param taskmanager.nodeAffinityPreset.key Node label key to match. Ignored if `affinity` is set
##
key: ""
## @param taskmanager.nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set
## E.g.
## values:
## - e2e-az1
## - e2e-az2
##
values: []
## @param taskmanager.priorityClassName Server priorityClassName
## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
##
priorityClassName: ""
## @param taskmanager.affinity Affinity for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
## NOTE: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set
##
affinity: {}
## @param taskmanager.nodeSelector Node labels for pod assignment
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
##
nodeSelector: {}
## @param taskmanager.tolerations Tolerations for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## @param taskmanager.topologySpreadConstraints Topology Spread Constraints for pod assignment
## https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
## The value is evaluated as a template
##
topologySpreadConstraints: []
## @param taskmanager.schedulerName Alternative scheduler
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
schedulerName: ""
## @param taskmanager.podManagementPolicy Pod management policy for the Apache Flink taskmanager statefulset
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies
##
podManagementPolicy: Parallel
## @param taskmanager.updateStrategy.type Apache Flink taskmanager statefulset strategy type
## @param taskmanager.updateStrategy.rollingUpdate [object,nullable] Apache Flink taskmanager statefulset rolling update configuration parameters
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
##
updateStrategy:
type: RollingUpdate
rollingUpdate: null
## @param taskmanager.extraVolumes Optionally specify extra list of additional volumes for flink container
##
extraVolumes: []
## @param taskmanager.initContainers Add additional init containers to the flink pods
##
initContainers: []
## @param taskmanager.sidecars Add additional sidecar containers to the flink pods
##
sidecars: []
## @param taskmanager.pdb.create Enable/disable a Pod Disruption Budget creation
## @param taskmanager.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled
## @param taskmanager.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable.Defaults to `1` if both `secondary.pdb.minAvailable` and `secondary.pdb.maxUnavailable` are empty.
##
pdb:
create: true
minAvailable: ""
maxUnavailable: ""

View File

@ -37,10 +37,11 @@ global:
## @param kubeVersion Override Kubernetes version ## @param kubeVersion Override Kubernetes version
## ##
kubeVersion: "" kubeVersion: "1.31.4"
## @param apiVersions Override Kubernetes API versions reported by .Capabilities ## @param apiVersions Override Kubernetes API versions reported by .Capabilities
## ##
apiVersions: [] apiVersions:
- "autoscaling.k8s.io/v1"
## @param nameOverride String to partially override common.names.fullname ## @param nameOverride String to partially override common.names.fullname
## ##
nameOverride: "" nameOverride: ""
@ -998,7 +999,7 @@ controller:
vpa: vpa:
## @param controller.autoscaling.vpa.enabled Enable VPA ## @param controller.autoscaling.vpa.enabled Enable VPA
## ##
enabled: false enabled: true
## @param controller.autoscaling.vpa.annotations Annotations for VPA resource ## @param controller.autoscaling.vpa.annotations Annotations for VPA resource
## ##
annotations: {} annotations: {}
@ -1008,11 +1009,15 @@ controller:
## @param controller.autoscaling.vpa.maxAllowed VPA Max allowed resources for the pod ## @param controller.autoscaling.vpa.maxAllowed VPA Max allowed resources for the pod
## cpu: 200m ## cpu: 200m
## memory: 100Mi ## memory: 100Mi
maxAllowed: {} maxAllowed:
cpu: 1000m
memory: 2048Mi
## @param controller.autoscaling.vpa.minAllowed VPA Min allowed resources for the pod ## @param controller.autoscaling.vpa.minAllowed VPA Min allowed resources for the pod
## cpu: 200m ## cpu: 200m
## memory: 100Mi ## memory: 100Mi
minAllowed: {} minAllowed:
cpu: 500m
memory: 1024Mi
updatePolicy: updatePolicy:
## @param controller.autoscaling.vpa.updatePolicy.updateMode Autoscaling update policy Specifies whether recommended updates are applied when a Pod is started and whether recommended updates are applied during the life of a Pod ## @param controller.autoscaling.vpa.updatePolicy.updateMode Autoscaling update policy Specifies whether recommended updates are applied when a Pod is started and whether recommended updates are applied during the life of a Pod
## Possible values are "Off", "Initial", "Recreate", and "Auto". ## Possible values are "Off", "Initial", "Recreate", and "Auto".

View File

@ -0,0 +1,24 @@
apiVersion: autoscaling.k8s.io/v1
kind: VerticalPodAutoscaler
metadata:
name: kafka-controller-vpa
namespace: freeleaps-data-platform
spec:
resourcePolicy:
containerPolicies:
- containerName: '*'
controlledResources:
- cpu
- memory
maxAllowed:
cpu: 1000m
memory: 2048Mi
minAllowed:
cpu: 200m
memory: 512Mi
targetRef:
apiVersion: apps/v1
kind: StatefulSet
name: kafka-controller
updatePolicy:
updateMode: "Auto"

View File

@ -1,13 +0,0 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: pinot-dot-mathmast-dot-com
namespace: freeleaps-data-platform
spec:
commonName: pinot.mathmast.com
dnsNames:
- pinot.mathmast.com
issuerRef:
kind: ClusterIssuer
name: mathmast-dot-com
secretName: pinot-dot-mathmast-dot-com-tls

View File

@ -19,326 +19,110 @@
# Default values for Pinot. # Default values for Pinot.
namespaceOverride:
namespaceAnnotations: {}
image: image:
repository: apachepinot/pinot repository: apachepinot/pinot
# Pinot docker images are available at https://hub.docker.com/r/apachepinot/pinot/tags tag: 1.3.0
# - `latest` tag is always available and points to the nightly pinot master branch build pullPolicy: IfNotPresent
# - `release-x.y.z` or `x.y.z` tags are available for each release, e.g. release-1.0.0, release-0.12.1, 1.0.0, 0.12.1, etc.
#
# Default JDK comes with Amazon Corretto 11, here are also images with different JDKs:
# - Amazon Corretto 11, e.g. `latest-11`, `1.0.0-11`, `latest-11-amazoncorretto`, `1.0.0-11-amazoncorretto`
# - Amazon Corretto 17, e.g. `latest-17-amazoncorretto`, `1.0.0-17-amazoncorretto`
# - MS OpenJDK 11, e.g. `latest-11-ms-openjdk`, `1.0.0-11-ms-openjdk`
# - MS OpenJDK 17, e.g. `latest-17-ms-openjdk`, `1.0.0-17-ms-openjdk`
# - OpenJDK 21, e.g. `latest-21-openjdk`, `1.0.0-21-openjdk`
tag: latest # 1.0.0, 0.12.1, latest
pullPolicy: Always # Use IfNotPresent when you pinged a version of image tag
cluster: cluster:
name: freeleaps name: freeleaps-pinot
imagePullSecrets: []
terminationGracePeriodSeconds: 30
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
# default values of the probes i.e. liveness and readiness.
# customization of values is present at the component level.
probes:
initialDelaySeconds: 60
periodSeconds: 10
failureThreshold: 10
# should be 1 for liveness and startup probe, as per K8s doc.
successThreshold: 1
timeoutSeconds: 10
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
additionalMatchLabels: {}
pinotAuth:
enabled: false
controllerFactoryClass: org.apache.pinot.controller.api.access.BasicAuthAccessControlFactory
brokerFactoryClass: org.apache.pinot.broker.broker.BasicAuthAccessControlFactory
configs:
# - access.control.principals=admin,user
# - access.control.principals.admin.password=verysecret
# - access.control.principals.user.password=secret
# - access.control.principals.user.tables=baseballStats,otherstuff
# - access.control.principals.user.permissions=READ
# ------------------------------------------------------------------------------
# Pinot Controller:
# ------------------------------------------------------------------------------
controller: controller:
name: controller name: controller
# Controls whether controller.port is included in the configuration. port: 9000
# Set to false to exclude controller.port when using TLS-only mode or when
# you want to specify the port in controller.access.protocols.https.port instead.
configureControllerPort: true
replicaCount: 1 replicaCount: 1
podManagementPolicy: Parallel
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
startCommand: "StartController"
probes:
endpoint: "/health"
livenessEnabled: false
readinessEnabled: false
startupEnabled: false
liveness:
initialDelaySeconds: 60
failureThreshold: 10
timeoutSeconds: 10
successThreshold: 1
periodSeconds: 10
readiness:
initialDelaySeconds: 60
failureThreshold: 10
timeoutSeconds: 10
successThreshold: 1
periodSeconds: 10
startup:
initialDelaySeconds: 60
failureThreshold: 10
timeoutSeconds: 10
successThreshold: 1
periodSeconds: 10
persistence: persistence:
enabled: true enabled: true
accessMode: ReadWriteOnce accessMode: ReadWriteOnce
size: 1G size: 5G
mountPath: /var/pinot/controller/data mountPath: /var/pinot/controller/data
storageClass: "" storageClass: "azure-disk-std-lrs"
extraVolumes: []
extraVolumeMounts: []
data: data:
dir: /var/pinot/controller/data dir: /var/pinot/controller/data
vip: vip:
enabled: false
host: pinot-controller host: pinot-controller
port: 9000 port: 9000
jvmOpts: "-XX:ActiveProcessorCount=2 -Xms256M -Xmx1G -XX:+UseG1GC -XX:MaxGCPauseMillis=200 -Xlog:gc*:file=/opt/pinot/gc-pinot-controller.log -Djute.maxbuffer=4000000" jvmOpts: "-Xms256M -Xmx1G"
log4j2ConfFile: /opt/pinot/etc/conf/pinot-controller-log4j2.xml log4j2ConfFile: /opt/pinot/conf/pinot-controller-log4j2.xml
pluginsDir: /opt/pinot/plugins pluginsDir: /opt/pinot/plugins
pdb:
enabled: false
minAvailable: ""
maxUnavailable: 50%
service: service:
annotations: {} annotations: {}
clusterIP: "None" clusterIP: ""
externalIPs: [] externalIPs: []
loadBalancerIP: "" loadBalancerIP: ""
loadBalancerSourceRanges: [] loadBalancerSourceRanges: []
type: ClusterIP type: ClusterIP
port: 9000 port: 9000
nodePort: "" nodePort: ""
protocol: TCP
name: controller
extraPorts: []
# - containerPort: 1234
# protocol: PROTOCOL
# name: extra-port
external: external:
enabled: true enabled: false
type: LoadBalancer type: LoadBalancer
port: 9000 port: 9000
annotations: {}
ingress:
v1beta1:
enabled: false
annotations: { }
tls: { }
path: /
hosts: [ ]
# port: 9433
v1:
enabled: false
ingressClassName: ""
annotations: {}
tls: []
path: /
hosts: []
# port: 9433
resources: resources:
requests: requests:
memory: "1.25Gi" cpu: 200m
memory: 256Mi
limits:
cpu: 500m
memory: 1Gi
nodeSelector: {} nodeSelector: {}
tolerations: [] tolerations: []
initContainers: []
affinity: {} affinity: {}
podAnnotations: {} podAnnotations: {}
# set enabled as true, to automatically roll controller stateful set for configmap change
automaticReload:
enabled: false
updateStrategy: updateStrategy:
type: RollingUpdate type: RollingUpdate
# Use envFrom to define all of the ConfigMap or Secret data as container environment variables.
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/#configure-all-key-value-pairs-in-a-configmap-as-container-environment-variables
# ref: https://kubernetes.io/docs/tasks/inject-data-application/distribute-credentials-secure/#configure-all-key-value-pairs-in-a-secret-as-container-environment-variables
envFrom: []
# - configMapRef:
# name: special-config
# - secretRef:
# name: test-secret
# Use extraEnv to add individual key value pairs as container environment variables.
# ref: https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/
extraEnv:
- name: LOG4J_CONSOLE_LEVEL
value: info
# - name: PINOT_CUSTOM_ENV
# value: custom-value
# Extra configs will be appended to pinot-controller.conf file
extra:
configs: |-
pinot.set.instance.id.to.hostname=true
controller.task.scheduler.enabled=true
# ------------------------------------------------------------------------------
# Pinot Broker:
# ------------------------------------------------------------------------------
broker: broker:
name: broker name: broker
# Controls whether pinot.broker.client.queryPort is included in the configuration.
# Set to false to exclude pinot.broker.client.queryPort when using TLS-only mode or when port: 8099
# you want to specify the port in pinot.broker.access.protocols.https.port instead.
configureBrokerPort: true
replicaCount: 1 replicaCount: 1
podManagementPolicy: Parallel
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
startCommand: "StartBroker"
jvmOpts: "-XX:ActiveProcessorCount=2 -Xms256M -Xmx1G -XX:+UseG1GC -XX:MaxGCPauseMillis=200 -Xlog:gc*:file=/opt/pinot/gc-pinot-broker.log -Djute.maxbuffer=4000000" jvmOpts: "-Xms256M -Xmx1G"
log4j2ConfFile: /opt/pinot/etc/conf/pinot-broker-log4j2.xml log4j2ConfFile: /opt/pinot/conf/pinot-broker-log4j2.xml
pluginsDir: /opt/pinot/plugins pluginsDir: /opt/pinot/plugins
routingTable: routingTable:
builderClass: random builderClass: random
probes:
endpoint: "/health"
livenessEnabled: true
readinessEnabled: true
startupEnabled: false
liveness:
initialDelaySeconds: 60
failureThreshold: 10
timeoutSeconds: 10
successThreshold: 1
periodSeconds: 10
readiness:
initialDelaySeconds: 60
failureThreshold: 10
timeoutSeconds: 10
successThreshold: 1
periodSeconds: 10
startup:
initialDelaySeconds: 60
failureThreshold: 10
timeoutSeconds: 10
successThreshold: 1
periodSeconds: 10
persistence:
extraVolumes: []
extraVolumeMounts: []
pdb:
enabled: false
minAvailable: ""
maxUnavailable: 50%
service: service:
annotations: {} annotations: {}
clusterIP: "None" clusterIP: ""
externalIPs: [] externalIPs: []
loadBalancerIP: "" loadBalancerIP: ""
loadBalancerSourceRanges: [] loadBalancerSourceRanges: []
type: ClusterIP type: ClusterIP
protocol: TCP
port: 8099 port: 8099
name: broker
nodePort: "" nodePort: ""
extraPorts: []
# - containerPort: 1234
# protocol: PROTOCOL
# name: extra-port
external: external:
enabled: true enabled: false
type: LoadBalancer type: LoadBalancer
port: 8099 port: 8099
# For example, in private GKE cluster, you might add cloud.google.com/load-balancer-type: Internal
annotations: {}
ingress:
v1beta1:
enabled: false
annotations: {}
tls: {}
path: /
hosts: []
# port: 8443
v1:
enabled: false
ingressClassName: ""
annotations: {}
tls: []
path: /
hosts: []
# port: 8443
resources: resources:
requests: requests:
memory: "1.25Gi" cpu: 200m
memory: 256Mi
limits:
cpu: 500m
memory: 1Gi
nodeSelector: {} nodeSelector: {}
@ -346,85 +130,19 @@ broker:
tolerations: [] tolerations: []
initContainers: []
podAnnotations: {} podAnnotations: {}
# set enabled as true, to automatically roll broker stateful set for configmap change
automaticReload:
enabled: false
updateStrategy: updateStrategy:
type: RollingUpdate type: RollingUpdate
# Use envFrom to define all of the ConfigMap or Secret data as container environment variables.
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/#configure-all-key-value-pairs-in-a-configmap-as-container-environment-variables
# ref: https://kubernetes.io/docs/tasks/inject-data-application/distribute-credentials-secure/#configure-all-key-value-pairs-in-a-secret-as-container-environment-variables
envFrom: []
# - configMapRef:
# name: special-config
# - secretRef:
# name: test-secret
# Use extraEnv to add individual key value pairs as container environment variables.
# ref: https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/
extraEnv:
- name: LOG4J_CONSOLE_LEVEL
value: info
# - name: PINOT_CUSTOM_ENV
# value: custom-value
# Extra configs will be appended to pinot-broker.conf file
extra:
configs: |-
pinot.set.instance.id.to.hostname=true
pinot.query.server.port=7321
pinot.query.runner.port=7732
# ------------------------------------------------------------------------------
# Pinot Server:
# ------------------------------------------------------------------------------
server: server:
name: server name: server
# Controls whether pinot.server.netty.port is included in the configuration.
# Set to false to exclude pinot.server.netty.port when using TLS-only mode or when ports:
# you want to specify the port in pinot.server.nettytls.port instead. netty: 8098
configureServerPort: true admin: 8097
replicaCount: 1 replicaCount: 1
podManagementPolicy: Parallel
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
startCommand: "StartServer"
probes:
endpoint: "/health"
livenessEnabled: false
readinessEnabled: false
startupEnabled: false
liveness:
endpoint: "/health/liveness"
initialDelaySeconds: 60
failureThreshold: 10
timeoutSeconds: 10
successThreshold: 1
periodSeconds: 10
readiness:
endpoint: "/health/readiness"
initialDelaySeconds: 60
failureThreshold: 10
timeoutSeconds: 10
successThreshold: 1
periodSeconds: 10
startup:
endpoint: "/health/liveness"
initialDelaySeconds: 60
failureThreshold: 10
timeoutSeconds: 10
successThreshold: 1
periodSeconds: 10
dataDir: /var/pinot/server/data/index dataDir: /var/pinot/server/data/index
segmentTarDir: /var/pinot/server/data/segment segmentTarDir: /var/pinot/server/data/segment
@ -432,23 +150,15 @@ server:
persistence: persistence:
enabled: true enabled: true
accessMode: ReadWriteOnce accessMode: ReadWriteOnce
size: 4G size: 5G
mountPath: /var/pinot/server/data mountPath: /var/pinot/server/data
storageClass: "" storageClass: "azure-disk-std-lrs"
#storageClass: "ssd"
extraVolumes: []
extraVolumeMounts: []
jvmOpts: "-Xms512M -Xmx1G -XX:+UseG1GC -XX:MaxGCPauseMillis=200 -Xlog:gc*:file=/opt/pinot/gc-pinot-server.log -Djute.maxbuffer=4000000" jvmOpts: "-Xms512M -Xmx1G"
log4j2ConfFile: /opt/pinot/etc/conf/pinot-server-log4j2.xml log4j2ConfFile: /opt/pinot/conf/pinot-server-log4j2.xml
pluginsDir: /opt/pinot/plugins pluginsDir: /opt/pinot/plugins
pdb:
enabled: false
minAvailable: ""
maxUnavailable: 1
service: service:
annotations: {} annotations: {}
clusterIP: "" clusterIP: ""
@ -456,21 +166,16 @@ server:
loadBalancerIP: "" loadBalancerIP: ""
loadBalancerSourceRanges: [] loadBalancerSourceRanges: []
type: ClusterIP type: ClusterIP
nettyPort: 8098 port: 8098
nettyPortName: netty
adminPort: 8097
adminExposePort: 80
adminPortName: admin
nodePort: "" nodePort: ""
protocol: TCP
extraPorts: []
# - containerPort: 1234
# protocol: PROTOCOL
# name: extra-port
resources: resources:
requests: requests:
memory: "1.25Gi" cpu: 200m
memory: 512Mi
limits:
cpu: 500m
memory: 1Gi
nodeSelector: {} nodeSelector: {}
@ -478,320 +183,46 @@ server:
tolerations: [] tolerations: []
initContainers: []
podAnnotations: {} podAnnotations: {}
# set enabled as true, to automatically roll server stateful set for configmap change
automaticReload:
enabled: false
updateStrategy: updateStrategy:
type: RollingUpdate type: RollingUpdate
# Use envFrom to define all of the ConfigMap or Secret data as container environment variables.
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/#configure-all-key-value-pairs-in-a-configmap-as-container-environment-variables
# ref: https://kubernetes.io/docs/tasks/inject-data-application/distribute-credentials-secure/#configure-all-key-value-pairs-in-a-secret-as-container-environment-variables
envFrom: []
# - configMapRef:
# name: special-config
# - secretRef:
# name: test-secret
# Use extraEnv to add individual key value pairs as container environment variables.
# ref: https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/
extraEnv:
- name: LOG4J_CONSOLE_LEVEL
value: info
# - name: PINOT_CUSTOM_ENV
# value: custom-value
# Extra configs will be appended to pinot-server.conf file
extra:
configs: |-
pinot.set.instance.id.to.hostname=true
pinot.server.instance.realtime.alloc.offheap=true
pinot.query.server.port=7321
pinot.query.runner.port=7732
# ------------------------------------------------------------------------------
# Pinot Minion:
# ------------------------------------------------------------------------------
minion:
enabled: false
name: minion
# Controls whether pinot.minion.port is included in the configuration.
# Set to false to exclude pinot.minion.port when using TLS-only mode
# or when you're configuring ports through another mechanism.
configureMinionPort: true
replicaCount: 0
podManagementPolicy: Parallel
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
startCommand: "StartMinion"
probes:
endpoint: "/health"
livenessEnabled: true
readinessEnabled: true
startupEnabled: false
liveness:
initialDelaySeconds: 60
failureThreshold: 10
timeoutSeconds: 10
successThreshold: 1
periodSeconds: 10
readiness:
initialDelaySeconds: 60
failureThreshold: 10
timeoutSeconds: 10
successThreshold: 1
periodSeconds: 10
startup:
initialDelaySeconds: 60
failureThreshold: 10
timeoutSeconds: 10
successThreshold: 1
periodSeconds: 10
dataDir: /var/pinot/minion/data
jvmOpts: "-XX:ActiveProcessorCount=2 -Xms256M -Xmx1G -XX:+UseG1GC -XX:MaxGCPauseMillis=200 -Xlog:gc*:file=/opt/pinot/gc-pinot-minion.log -Djute.maxbuffer=4000000"
log4j2ConfFile: /opt/pinot/etc/conf/pinot-minion-log4j2.xml
pluginsDir: /opt/pinot/plugins
persistence:
enabled: true
accessMode: ReadWriteOnce
size: 4G
mountPath: /var/pinot/minion/data
storageClass: ""
#storageClass: "ssd"
extraVolumes: []
extraVolumeMounts: []
service:
annotations: {}
clusterIP: ""
externalIPs: []
loadBalancerIP: ""
loadBalancerSourceRanges: []
type: ClusterIP
port: 9514
nodePort: ""
protocol: TCP
name: minion
extraPorts: []
# - containerPort: 1234
# protocol: PROTOCOL
# name: extra-port
resources:
requests:
memory: "1.25Gi"
nodeSelector: {}
affinity: {}
tolerations: []
initContainers: []
podAnnotations: {}
automaticReload:
enabled: false
updateStrategy:
type: RollingUpdate
# Use envFrom to define all of the ConfigMap or Secret data as container environment variables.
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/#configure-all-key-value-pairs-in-a-configmap-as-container-environment-variables
# ref: https://kubernetes.io/docs/tasks/inject-data-application/distribute-credentials-secure/#configure-all-key-value-pairs-in-a-secret-as-container-environment-variables
envFrom: []
# - configMapRef:
# name: special-config
# - secretRef:
# name: test-secret
# Use extraEnv to add individual key value pairs as container environment variables.
# ref: https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/
extraEnv:
- name: LOG4J_CONSOLE_LEVEL
value: info
# - name: PINOT_CUSTOM_ENV
# value: custom-value
# Extra configs will be appended to pinot-minion.conf file
extra:
configs: |-
pinot.set.instance.id.to.hostname=true
# ------------------------------------------------------------------------------
# Pinot Minion Stateless:
# ------------------------------------------------------------------------------
minionStateless:
enabled: true
name: minion-stateless
# Controls whether pinot.minion.port is included in the configuration.
# Set to false to exclude pinot.minion.port when using TLS-only mode
# or when you're configuring ports through another mechanism.
configureMinionStatelessPort: true
replicaCount: 1
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
startCommand: "StartMinion"
probes:
endpoint: "/health"
livenessEnabled: true
readinessEnabled: true
startupEnabled: true
liveness:
initialDelaySeconds: 60
failureThreshold: 10
timeoutSeconds: 10
successThreshold: 1
periodSeconds: 10
readiness:
initialDelaySeconds: 60
failureThreshold: 10
timeoutSeconds: 10
successThreshold: 1
periodSeconds: 10
startup:
initialDelaySeconds: 60
failureThreshold: 10
timeoutSeconds: 10
successThreshold: 1
periodSeconds: 10
dataDir: /var/pinot/minion/data
jvmOpts: "-XX:ActiveProcessorCount=2 -Xms256M -Xmx1G -XX:+UseG1GC -XX:MaxGCPauseMillis=200 -Xlog:gc*:file=/opt/pinot/gc-pinot-minion.log -Djute.maxbuffer=4000000"
log4j2ConfFile: /opt/pinot/etc/conf/pinot-minion-log4j2.xml
pluginsDir: /opt/pinot/plugins
persistence:
enabled: false
pvcName: minion-data-vol
accessMode: ReadWriteOnce
size: 4G
mountPath: /var/pinot/minion/data
storageClass: ""
#storageClass: "ssd"
extraVolumes: []
extraVolumeMounts: []
service:
port: 9514
protocol: TCP
name: minion
extraPorts: []
# - containerPort: 1234
# protocol: PROTOCOL
# name: extra-port
resources:
requests:
memory: "1.25Gi"
nodeSelector: {}
affinity: {}
tolerations: []
initContainers: []
podAnnotations: {}
# Use envFrom to define all of the ConfigMap or Secret data as container environment variables.
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/#configure-all-key-value-pairs-in-a-configmap-as-container-environment-variables
# ref: https://kubernetes.io/docs/tasks/inject-data-application/distribute-credentials-secure/#configure-all-key-value-pairs-in-a-secret-as-container-environment-variables
envFrom: []
# - configMapRef:
# name: special-config
# - secretRef:
# name: test-secret
# Use extraEnv to add individual key value pairs as container environment variables.
# ref: https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/
extraEnv:
- name: LOG4J_CONSOLE_LEVEL
value: info
# - name: PINOT_CUSTOM_ENV
# value: custom-value
# Extra configs will be appended to pinot-minion.conf file
extra:
configs: |-
pinot.set.instance.id.to.hostname=true
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Zookeeper: # Zookeeper:
# NOTE: IN PRODUCTION USE CASES, IT's BEST TO USE ZOOKEEPER K8S OPERATOR
# ref: https://github.com/pravega/zookeeper-operator#install-the-operator
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
zookeeper: zookeeper:
## If true, install the Zookeeper chart alongside Pinot ## If true, install the Zookeeper chart alongside Pinot
## ref: https://github.com/bitnami/charts/tree/master/bitnami/zookeeper ## ref: https://github.com/kubernetes/charts/tree/master/incubator/zookeeper
enabled: true enabled: false
## If the Zookeeper Chart is disabled a URL override is required to connect
urlOverride: "my-zookeeper:2181/my-pinot"
## Zookeeper port
port: 2181
## Configure Zookeeper resource requests and limits ## Configure Zookeeper resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/ ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
resources: resources: {}
requests:
memory: "1.25Gi"
## Replicas ## Replicas
replicaCount: 1 replicaCount: 1
## Ongoing data directory cleanup configuration ## Environmental variables to set in Zookeeper
autopurge: env:
## The JVM heap size to allocate to Zookeeper
## The time interval (in hours) for which the purge task has to be triggered ZK_HEAP_SIZE: "256M"
## Set to a positive integer to enable the auto purging
purgeInterval: 1
## The most recent snapshots amount (and corresponding transaction logs) to retain
snapRetainCount: 5
## Size (in MB) for the Java Heap options (Xmx and Xms)
## This env var is ignored if Xmx an Xms are configured via `zookeeper.jvmFlags`
heapSize: "1024"
## Extra JVM Flags for Zookeeper
jvmFlags: "-Djute.maxbuffer=4000000"
persistence: persistence:
enabled: true enabled: true
storageClass: ""
#storageClass: "ssd"
## The amount of PV storage allocated to each Zookeeper pod in the statefulset ## The amount of PV storage allocated to each Zookeeper pod in the statefulset
size: "8Gi" # size: "2Gi"
## Specify a Zookeeper imagePullPolicy ## Specify a Zookeeper imagePullPolicy
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
image: image:
PullPolicy: "IfNotPresent" PullPolicy: "IfNotPresent"
## If the Zookeeper Chart is disabled a URL and port are required to connect
url: "zookeeper-headless.freeleaps-data-platform.svc.freeleaps.cluster"
port: 2181
## Pod scheduling preferences (by default keep pods within a release on separate nodes). ## Pod scheduling preferences (by default keep pods within a release on separate nodes).
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
## By default we don't set affinity: ## By default we don't set affinity:

View File

@ -0,0 +1,87 @@
apiVersion: autoscaling.k8s.io/v1
kind: VerticalPodAutoscaler
metadata:
name: pinot-controller-vpa
namespace: freeleaps-data-platform
spec:
resourcePolicy:
containerPolicies:
- containerName: '*'
controlledResources:
- cpu
- memory
maxAllowed:
cpu: 500m
memory: 1Gi
targetRef:
apiVersion: apps/v1
kind: StatefulSet
name: pinot-controller
updatePolicy:
updateMode: "Auto"
---
apiVersion: autoscaling.k8s.io/v1
kind: VerticalPodAutoscaler
metadata:
name: pinot-broker-vpa
namespace: freeleaps-data-platform
spec:
resourcePolicy:
containerPolicies:
- containerName: '*'
controlledResources:
- cpu
- memory
maxAllowed:
cpu: 500m
memory: 1.5Gi
targetRef:
apiVersion: apps/v1
kind: StatefulSet
name: pinot-broker
updatePolicy:
updateMode: "Auto"
---
apiVersion: autoscaling.k8s.io/v1
kind: VerticalPodAutoscaler
metadata:
name: pinot-server-vpa
namespace: freeleaps-data-platform
spec:
resourcePolicy:
containerPolicies:
- containerName: '*'
controlledResources:
- cpu
- memory
maxAllowed:
cpu: 500m
memory: 1Gi
targetRef:
apiVersion: apps/v1
kind: StatefulSet
name: pinot-server
updatePolicy:
updateMode: "Auto"
---
apiVersion: autoscaling.k8s.io/v1
kind: VerticalPodAutoscaler
metadata:
name: pinot-minion-stateless-vpa
namespace: freeleaps-data-platform
spec:
resourcePolicy:
containerPolicies:
- containerName: '*'
controlledResources:
- cpu
- memory
maxAllowed:
cpu: 500m
memory: 1Gi
targetRef:
apiVersion: apps/v1
kind: Deployment
name: pinot-minion-stateless
updatePolicy:
updateMode: "Auto"

View File

@ -0,0 +1,108 @@
# Default values for operator.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
global:
rbac:
# if set true, the clusterrole, clusterrolebinding, serviceaccount resources will be created for
# operator. If changed to false later, these resources will be deleted.
# Note: By default the operator will watch all namespaces, so it needs clusterrole, clusterrolebinding to access resources.
# If .Values.starrocksOperator.watchNamespace is set, the role and rolebinding will be created for the specified namespace.
create: true
serviceAccount:
name: "starrocks"
# Optional annotations to add to serviceaccount manifest
annotations: {}
# Optional labels to add to serviceaccount manifest
labels: {}
# TimeZone is used to set the environment variable TZ for pod, with Asia/Shanghai as the default.
timeZone: Asia/Shanghai
# set the nameOverride values for creating the same resources with parent chart.
# In version v1.7.1 or before, there is only one chart called kube-starrocks, and the chart name is the prefix
# of some resources created by the chart.
# In version v1.8.0, the kube-starrocks chart is split into two charts, and to keep backward compatibility, the
# nameOverride is used to set the prefix of the resources created by operator chart.
nameOverride: "kube-starrocks"
starrocksOperator:
# If enabled, the operator releated resources will be created, including the operator deployment, service account,
# clusterrole, clusterrolebinding, and service account.
enabled: true
# annotations for starrocks operator.
annotations: {}
namespaceOverride: ""
image:
# image sliced by "repository:tag"
repository: starrocks/operator
tag: v1.10.2
imagePullPolicy: Always
replicaCount: 1
resources:
limits:
cpu: 500m
memory: 800Mi
requests:
cpu: 500m
memory: 400Mi
# By default, the operator will only set runAsNonRoot to true, allowPrivilegeEscalation to false, readOnlyRootFilesystem to true.
# You can customize the securityContext for operator pod, e.g. drop capabilities, seccompProfile, etc.
securityContext:
runAsNonRoot: true
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
#capabilities:
# drop:
# - ALL
#seccompProfile:
# type: "RuntimeDefault"
# imagePullSecrets allows you to use secrets to pull images for pods.
imagePullSecrets: []
# - name: "image-pull-secret"
# If specified, the pod's nodeSelectordisplayName="Map of nodeSelectors to match when scheduling pods on nodes"
# Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
nodeSelector: {}
# affinity for operator pod scheduling.
affinity: {}
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchFields:
# - key: metadata.name
# operator: In
# values:
# - target-host-name
tolerations: []
# - key: "key"
# operator: "Equal|Exists"
# value: "value"
# effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
# If specified, it will restrict operator to watch resources in the specified namespace. And
# 1. You must deploy your StarRocks cluster to the specified namespace.
# 2. You can not assign multiple namespaces to watchNamespace field.
# Note: In most cases, you should not set this value. If your kubernetes cluster manages too many nodes, and
# operator watching all namespaces use too many memory resources, you can set this value.
# Defaults to all namespaces.
watchNamespace: "freeleaps-data-platform"
# Additional operator container environment variables
# You specify this manually like you would a raw deployment manifest.
# Ref: https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/
# operator supports the following environment variables:
# KUBE_STARROCKS_UNSUPPORTED_ENVS: "XXX,YYY" # The environment variables that will not be passed to starrocks container.
env: []
# setting log related parameter
log:
- --zap-time-encoding=iso8601
- --zap-encoder=console
# if you want open debug log, open this option
# - --zap-log-level 4
# Operator need to specify the FQDN in nginx.conf when it set up fe-proxy service.
# By default, Operator will use cluster.local as the dnsDomainSuffix.
# If you set up a kubernetes cluster with a different dnsDomainSuffix, you need to set this value.
dnsDomainSuffix: ""
# By default, the volume name of secret and configmap created by operator for the FE/BE/CN pods has a hash suffix.
# If users plan to use a sidecar or init container to mount the same volume, it will be difficult to get the volume name.
# In this situation, you can set this value to false.
volumeNameWithHash: true

View File

@ -0,0 +1,43 @@
apiVersion: autoscaling.k8s.io/v1
kind: VerticalPodAutoscaler
metadata:
name: starrocks-fe-vpa
namespace: freeleaps-data-platform
spec:
resourcePolicy:
containerPolicies:
- containerName: '*'
controlledResources:
- cpu
- memory
maxAllowed:
cpu: 500m
memory: 2Gi
targetRef:
apiVersion: apps/v1
kind: StatefulSet
name: freeleaps-starrocks-fe
updatePolicy:
updateMode: "Auto"
---
apiVersion: autoscaling.k8s.io/v1
kind: VerticalPodAutoscaler
metadata:
name: starrocks-be-vpa
namespace: freeleaps-data-platform
spec:
resourcePolicy:
containerPolicies:
- containerName: '*'
controlledResources:
- cpu
- memory
maxAllowed:
cpu: 500m
memory: 2Gi
targetRef:
apiVersion: apps/v1
kind: StatefulSet
name: freeleaps-starrocks-be
updatePolicy:
updateMode: "Auto"

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,21 @@
apiVersion: autoscaling.k8s.io/v1
kind: VerticalPodAutoscaler
metadata:
name: zookeeper-vpa
namespace: freeleaps-data-platform
spec:
resourcePolicy:
containerPolicies:
- containerName: '*'
controlledResources:
- cpu
- memory
maxAllowed:
cpu: 200m
memory: 512Mi
targetRef:
apiVersion: apps/v1
kind: StatefulSet
name: zookeeper
updatePolicy:
updateMode: "Auto"

View File

@ -34,7 +34,7 @@ freeleaps:
port: 8001 port: 8001
initialDelaySeconds: 5 initialDelaySeconds: 5
periodSeconds: 30 periodSeconds: 30
timeoutSeconds: 3 timeoutSeconds: 60
successThreshold: 1 successThreshold: 1
failureThreshold: 3 failureThreshold: 3
liveness: liveness:
@ -44,7 +44,7 @@ freeleaps:
port: 8001 port: 8001
initialDelaySeconds: 5 initialDelaySeconds: 5
periodSeconds: 15 periodSeconds: 15
timeoutSeconds: 3 timeoutSeconds: 60
successThreshold: 1 successThreshold: 1
failureThreshold: 3 failureThreshold: 3
terminationGracePeriodSeconds: 30 terminationGracePeriodSeconds: 30