fix: update Jenkins token in gitea webhook configuration

Signed-off-by: zhenyus <zhenyus@mathmast.com>
This commit is contained in:
zhenyus 2025-07-24 16:51:35 +08:00
parent 6ac6b41168
commit c2d2fa6345
26 changed files with 2118 additions and 1 deletions

View File

@ -0,0 +1,171 @@
<mxfile host="65bd71144e">
<diagram id="grFnYG0Qw2MXFLnat0tl" name="Page-1">
<mxGraphModel dx="2065" dy="1924" grid="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="1" pageScale="1" pageWidth="850" pageHeight="1100" math="0" shadow="0">
<root>
<mxCell id="0"/>
<mxCell id="1" parent="0"/>
<mxCell id="29" style="edgeStyle=none;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;entryX=0.5;entryY=0;entryDx=0;entryDy=0;" edge="1" parent="1" source="6" target="28">
<mxGeometry relative="1" as="geometry"/>
</mxCell>
<mxCell id="45" value="ref" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="29">
<mxGeometry x="0.0947" y="-2" relative="1" as="geometry">
<mxPoint as="offset"/>
</mxGeometry>
</mxCell>
<mxCell id="31" style="edgeStyle=none;html=1;exitX=0.5;exitY=0;exitDx=0;exitDy=0;entryX=0.5;entryY=1;entryDx=0;entryDy=0;" edge="1" parent="1" source="6" target="30">
<mxGeometry relative="1" as="geometry"/>
</mxCell>
<mxCell id="51" value="ref" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="31">
<mxGeometry x="-0.0413" relative="1" as="geometry">
<mxPoint as="offset"/>
</mxGeometry>
</mxCell>
<mxCell id="66" style="edgeStyle=none;html=1;exitX=0;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="6" target="63">
<mxGeometry relative="1" as="geometry"/>
</mxCell>
<mxCell id="67" value="auhtorized by foo" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="66">
<mxGeometry x="0.0635" y="-2" relative="1" as="geometry">
<mxPoint as="offset"/>
</mxGeometry>
</mxCell>
<mxCell id="6" value="Product&lt;div&gt;(Team &amp;amp; Tenancy)&lt;/div&gt;" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
<mxGeometry x="270" y="70" width="120" height="60" as="geometry"/>
</mxCell>
<mxCell id="22" style="edgeStyle=none;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;entryX=1;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="12" target="18">
<mxGeometry relative="1" as="geometry">
<mxPoint x="510" y="120" as="sourcePoint"/>
</mxGeometry>
</mxCell>
<mxCell id="23" value="manage" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="22">
<mxGeometry x="-0.048" relative="1" as="geometry">
<mxPoint as="offset"/>
</mxGeometry>
</mxCell>
<mxCell id="47" style="edgeStyle=none;html=1;exitX=0;exitY=0.5;exitDx=0;exitDy=0;entryX=1;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="12" target="6">
<mxGeometry relative="1" as="geometry"/>
</mxCell>
<mxCell id="48" value="owner ref" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="47">
<mxGeometry x="0.2" y="1" relative="1" as="geometry">
<mxPoint as="offset"/>
</mxGeometry>
</mxCell>
<mxCell id="12" value="&lt;div&gt;foo&lt;/div&gt;(Freeleaps ACC)" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
<mxGeometry x="530" y="80" width="120" height="40" as="geometry"/>
</mxCell>
<mxCell id="19" style="edgeStyle=none;html=1;exitX=0.5;exitY=0;exitDx=0;exitDy=0;entryX=0.5;entryY=1;entryDx=0;entryDy=0;" edge="1" parent="1" source="18" target="6">
<mxGeometry relative="1" as="geometry"/>
</mxCell>
<mxCell id="50" value="ref" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="19">
<mxGeometry x="-0.059" y="1" relative="1" as="geometry">
<mxPoint as="offset"/>
</mxGeometry>
</mxCell>
<mxCell id="18" value="Gitea Org" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
<mxGeometry x="270" y="180" width="120" height="60" as="geometry"/>
</mxCell>
<mxCell id="21" style="edgeStyle=none;html=1;exitX=0.5;exitY=0;exitDx=0;exitDy=0;entryX=0.5;entryY=1;entryDx=0;entryDy=0;" edge="1" parent="1" source="20" target="18">
<mxGeometry relative="1" as="geometry"/>
</mxCell>
<mxCell id="44" value="belongs to" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="21">
<mxGeometry x="-0.0098" y="2" relative="1" as="geometry">
<mxPoint as="offset"/>
</mxGeometry>
</mxCell>
<mxCell id="20" value="Gitea Repo" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
<mxGeometry x="270" y="300" width="120" height="60" as="geometry"/>
</mxCell>
<mxCell id="28" value="Project&lt;div&gt;(Requirements)&lt;/div&gt;" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
<mxGeometry x="50" y="180" width="140" height="60" as="geometry"/>
</mxCell>
<mxCell id="36" style="edgeStyle=none;html=1;exitX=0.5;exitY=0;exitDx=0;exitDy=0;entryX=0.5;entryY=1;entryDx=0;entryDy=0;" edge="1" parent="1" source="30" target="32">
<mxGeometry relative="1" as="geometry"/>
</mxCell>
<mxCell id="52" value="manage" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="36">
<mxGeometry x="-0.006" y="1" relative="1" as="geometry">
<mxPoint as="offset"/>
</mxGeometry>
</mxCell>
<mxCell id="37" style="edgeStyle=none;html=1;exitX=0.5;exitY=0;exitDx=0;exitDy=0;entryX=0.5;entryY=1;entryDx=0;entryDy=0;" edge="1" parent="1" source="30" target="33">
<mxGeometry relative="1" as="geometry"/>
</mxCell>
<mxCell id="53" value="manage" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="37">
<mxGeometry x="-0.107" y="-2" relative="1" as="geometry">
<mxPoint as="offset"/>
</mxGeometry>
</mxCell>
<mxCell id="38" style="edgeStyle=none;html=1;exitX=0.5;exitY=0;exitDx=0;exitDy=0;entryX=0.5;entryY=1;entryDx=0;entryDy=0;" edge="1" parent="1" source="30" target="34">
<mxGeometry relative="1" as="geometry"/>
</mxCell>
<mxCell id="54" value="manage" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="38">
<mxGeometry x="0.062" relative="1" as="geometry">
<mxPoint as="offset"/>
</mxGeometry>
</mxCell>
<mxCell id="39" style="edgeStyle=none;html=1;exitX=0.5;exitY=0;exitDx=0;exitDy=0;entryX=0.5;entryY=1;entryDx=0;entryDy=0;" edge="1" parent="1" source="30" target="35">
<mxGeometry relative="1" as="geometry"/>
</mxCell>
<mxCell id="55" value="manage" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="39">
<mxGeometry x="-0.0427" y="2" relative="1" as="geometry">
<mxPoint as="offset"/>
</mxGeometry>
</mxCell>
<mxCell id="30" value="DevOpsProject" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
<mxGeometry x="270" y="-50" width="120" height="60" as="geometry"/>
</mxCell>
<mxCell id="32" value="Argo Settings" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
<mxGeometry y="-170" width="120" height="60" as="geometry"/>
</mxCell>
<mxCell id="33" value="Jenkins Settings" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
<mxGeometry x="180" y="-170" width="120" height="60" as="geometry"/>
</mxCell>
<mxCell id="34" value="Container Registry" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
<mxGeometry x="360" y="-170" width="120" height="60" as="geometry"/>
</mxCell>
<mxCell id="35" value=".. Infra Objects" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
<mxGeometry x="540" y="-170" width="120" height="60" as="geometry"/>
</mxCell>
<mxCell id="58" style="edgeStyle=none;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="56" target="28">
<mxGeometry relative="1" as="geometry"/>
</mxCell>
<mxCell id="59" value="ref" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="58">
<mxGeometry x="-0.1724" y="-2" relative="1" as="geometry">
<mxPoint as="offset"/>
</mxGeometry>
</mxCell>
<mxCell id="56" value="Quote&lt;div&gt;(Plan &amp;amp; Quotes etc)&lt;/div&gt;" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
<mxGeometry x="-160" y="180" width="140" height="60" as="geometry"/>
</mxCell>
<mxCell id="61" style="edgeStyle=none;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="60" target="56">
<mxGeometry relative="1" as="geometry"/>
</mxCell>
<mxCell id="64" style="edgeStyle=none;html=1;exitX=0.5;exitY=0;exitDx=0;exitDy=0;entryX=0.5;entryY=1;entryDx=0;entryDy=0;" edge="1" parent="1" source="60" target="63">
<mxGeometry relative="1" as="geometry"/>
</mxCell>
<mxCell id="68" value="ref" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="64">
<mxGeometry x="-0.0944" y="-4" relative="1" as="geometry">
<mxPoint x="-4" as="offset"/>
</mxGeometry>
</mxCell>
<mxCell id="60" value="&lt;div&gt;bar&lt;/div&gt;(Freeleaps ACC)" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
<mxGeometry x="-350" y="190" width="120" height="40" as="geometry"/>
</mxCell>
<mxCell id="69" style="edgeStyle=none;html=1;exitX=0;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="63" target="20">
<mxGeometry relative="1" as="geometry">
<Array as="points">
<mxPoint x="-440" y="100"/>
<mxPoint x="-440" y="330"/>
</Array>
</mxGeometry>
</mxCell>
<mxCell id="70" value="read &amp;amp; write permissions" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="69">
<mxGeometry x="0.2451" y="2" relative="1" as="geometry">
<mxPoint as="offset"/>
</mxGeometry>
</mxCell>
<mxCell id="63" value="&lt;div&gt;Project Contributor&lt;/div&gt;&lt;div&gt;(role)&lt;/div&gt;" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
<mxGeometry x="-350" y="80" width="120" height="40" as="geometry"/>
</mxCell>
</root>
</mxGraphModel>
</diagram>
</mxfile>

Binary file not shown.

After

Width:  |  Height:  |  Size: 58 KiB

View File

@ -16,7 +16,7 @@ data:
jenkins:
url: "http://jenkins.freeleaps-devops-system.svc.freeleaps.cluster:8080"
username: "admin"
token: "115127e693f1bc6b7194f58ff6d6283bd0"
token: "11c25b2a96454a14a49b748db47dd587a9"
timeout: 30
gitea:

View File

@ -0,0 +1,122 @@
apiVersion: v1
data:
jcasc-default-config.yaml: |-
jenkins:
authorizationStrategy:
loggedInUsersCanDoAnything:
allowAnonymousRead: false
securityRealm:
local:
allowsSignup: false
enableCaptcha: false
users:
- id: "admin"
name: "Jenkins Admin"
password: "r6Y@QTb*7BQN@hDGsN"
disableRememberMe: false
mode: NORMAL
numExecutors: 5
labelString: ""
projectNamingStrategy: "standard"
markupFormatter:
plainText
clouds:
- kubernetes:
containerCapStr: "10"
defaultsProviderTemplate: ""
connectTimeout: "5"
readTimeout: "15"
jenkinsUrl: "http://jenkins.freeleaps-devops-system.svc.freeleaps.cluster:8080"
name: "kubernetes"
namespace: "freeleaps-devops-system"
podLabels:
- key: "jenkins/jenkins-agent"
value: "true"
serverUrl: "https://kubernetes.default"
skipTlsVerify: false
templates:
- containers:
- args: "^{computer.jnlpmac} ^{computer.name}"
envVars:
- envVar:
key: "JENKINS_URL"
value: "http://jenkins.freeleaps-devops-system.svc.freeleaps.cluster:8080/"
image: "jenkins/inbound-agent:3273.v4cfe589b_fd83-1"
name: "jnlp"
resourceLimitCpu: 512m
resourceLimitMemory: 512Mi
resourceRequestCpu: 50m
resourceRequestMemory: 64Mi
workingDir: "/home/jenkins/agent"
id: 6a_919e0c82_7f68_0d_e4_e_51614_2a_fc
label: "jenkins-agent"
name: "default"
namespace: "freeleaps-devops-system"
nodeUsageMode: "NORMAL"
podRetention: never
serviceAccount: "default"
slaveConnectTimeout: 100
slaveConnectTimeoutStr: "100"
yamlMergeStrategy: override
crumbIssuer:
standard:
excludeClientIPFromCrumb: true
globalNodeProperties: []
credentials:
system:
domainCredentials:
- credentials:
- azure:
azureEnvironmentName: "Azure"
clientId: "7f115646-6a0a-445f-9976-b3832dd77a43"
clientSecret: "Cia8Q~T-r-r5MftqCAJDOCmFckeMOKuo6xPvRcZT"
description: "Freeleaps Jenkins System Principal"
id: "freeleaps-jenkins-system-azure-principal"
subscriptionId: "0a280068-dec4-4bf0-9f04-65b64f412b50"
tenant: "cf151ee8-5c2c-4fe7-a1c4-809ba43c9f24"
scope: "SYSTEM"
- azureStorageAccount:
blobEndpointURL: "https://freeleaps.blob.core.windows.net/"
id: "freeleaps-azure-storage-account"
scope: "GLOBAL"
storageAccountName: "freeleaps"
storageKey: "ma7vlPvKrJkEU/oDCEF3CbCIZD31INoDykmxcChbzhGnh1laTjlFLTrUatnhuwoy/Csx9/UpkEce+AStZoO+/A=="
security:
apiToken:
creationOfLegacyTokenEnabled: false
tokenGenerationOnCreationEnabled: false
usageStatisticsEnabled: true
scriptApproval:
forceSandbox: true
unclassified:
location:
adminAddress: "address not configured yet <nobody@nowhere>"
url: "https://jenkins.mathmast.com/"
azureKeyVault:
keyVaultUrl: "https://freeleaps-secrets.vault.azure.net"
credentialID: "freeleaps-jenkins-system-azure-principal"
globalItemStorage:
storage:
azure:
containerName: "freeleaps-devops-caches"
credentialsId: "freeleaps-azure-storage-account"
globalLibraries:
libraries:
- defaultVersion: "master"
name: "first-class-pipeline"
retriever:
legacySCM:
clone: true
libraryPath: "first-class-pipeline/"
scm:
scmGit:
branches:
- name: "master"
buildChooser: "default"
userRemoteConfigs:
- credentialsId: "freeleaps-ops-git-credentials"
url: "https://gitea.freeleaps.mathmast.com/freeleaps/freeleaps-ops.git"
kind: ConfigMap
metadata:
name: jenkins-jcasc-config
namespace: freeleaps-devops-system

View File

@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -0,0 +1,9 @@
annotations:
artifacthub.io/license: Apache-2.0
apiVersion: v2
appVersion: 2.23.4-rc.0
description: A chart for deploying the server-side components of Telepresence
icon: https://raw.githubusercontent.com/telepresenceio/telepresence.io/master/src/assets/images/telepresence-edgy.svg
name: telepresence-oss
type: application
version: 2.23.4-rc.0

View File

@ -0,0 +1,176 @@
# Telepresence
[Telepresence](https://telepresence.io/) is a tool
that allows for local development of microservices running in a remote
Kubernetes cluster.
This chart manages the server-side components of Telepresence so that an
operations team can give limited access to the cluster for developers to work on
their services.
## Install
The telepresence binary embeds the helm chart, so the easiest way to install is:
```sh
$ telepresence helm install [--set x=y | --values <values file>]
```
## Configuration
The following tables lists the configurable parameters of the Telepresence chart and their default values.
| Parameter | Description | Default |
|------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------|
| affinity | Define the `Node` Affinity and Anti-Affinity for the Traffic Manager. | `{}` |
| agent.appProtocolStrategy | The strategy to use when determining the application protocol to use for intercepts | `http2Probe` |
| agent.image.name | The name of the injected agent image | `""` |
| agent.image.pullPolicy | Pull policy in the webhook for the traffic agent image | `IfNotPresent` |
| agent.image.tag | The tag for the injected agent image | `""` (Defined in `appVersion` Chart.yaml) |
| agent.image.registry | The registry for the injected agent image | `ghcr.io/telepresenceio` |
| agent.initResources | The resources for the injected init container | |
| agent.logLevel | The logging level for the traffic-agent | defaults to logLevel |
| agent.mountPolicies | The policies for the agents. Key is either volume name or path prefix starting with '/' | `/tmp`: Local |
| agent.resources | The resources for the injected agent container | |
| agent.securityContext | The security context to use for the injected agent container | defaults to the securityContext of the first container of the app |
| agent.initSecurityContext | The security context to use for the injected init container | `{}
`
| agent.initContainer.enabled | Whether to enable/disable injection of the initContainer | true
| agentInjector.certificate.accessMethod | Method used by the agent injector to access the certificate (watch or mount). | `watch` |
| agentInjector.certificate.certmanager.commonName | The common name of the generated Certmanager certificate. | `agent-injector` |
| agentInjector.certificate.certmanager.duration | The certificate validity duration. (optional value) | `2160h0m0s` |
| agentInjector.certificate.certmanager.issuerRef.kind | The Issuer kind to use to generate the self signed certificate. (Issuer of ClusterIssuer) | `Issuer` |
| agentInjector.certificate.certmanager.issuerRef.name | The Issuer name to use to generate the self signed certificate. | `telepresence` |
| agentInjector.certificate.method | Method used when generating the certificate used for mutating webhook (helm, supplied, or certmanager). | `helm` |
| agentInjector.certificate.regenerate | Whether the certificate used for the mutating webhook should be regenerated. | `false` |
| agentInjector.enabled | Enable/Disable the agent-injector and its webhook. | `true` |
| agentInjector.name | Name to use with objects associated with the agent-injector. | `agent-injector` |
| agentInjector.injectPolicy | Determines when an agent is injected, possible values are `OnDemand` and `WhenEnabled` | `OnDemand` |
| agentInjector.secret.name | The name of the secret the agent-injector webhook uses for authorization with the kubernetes api will expose. | `mutator-webhook-tls` |
| agentInjector.service.type | Type of service for the agent-injector. | `ClusterIP` |
| agentInjector.webhook.admissionReviewVersions: | List of supported admissionReviewVersions. | `["v1"]` |
| agentInjector.webhook.failurePolicy: | Action to take on unexpected failure or timeout of webhook. | `Ignore` |
| agentInjector.webhook.name | The name of the agent-injector webhook | `agent-injector-webhook` |
| ~~agentInjector.webhook.namespaceSelector~~: | The namespaceSelector used by the agent-injector webhook when the traffic-manager is not namespaced. Deprecated, use top level `namespaces` or `namespaceSelector` | {} |
| agentInjector.webhook.port: | Port for the service that provides the admission webhook | `443` |
| agentInjector.webhook.reinvocationPolicy: | Specify if the webhook may be called again after the initial webhook call. Possible values are `Never` and `IfNeeded`. | `IfNeeded` |
| agentInjector.webhook.servicePath: | Path to the service that provides the admission webhook | `/traffic-agent` |
| agentInjector.webhook.sideEffects: | Any side effects the admission webhook makes outside of AdmissionReview. | `None` |
| agentInjector.webhook.timeoutSeconds: | Timeout of the admission webhook | `5` |
| apiPort | The port used by the Traffic Manager gRPC API | 8081 |
| client.connectionTTL | Deprecated: using grpc.connectionTTL | `24h` |
| client.dns.excludeSuffixes | Suffixes for which the client DNS resolver will always fail (or fallback in case of the overriding resolver) | `[".com", ".io", ".net", ".org", ".ru"]` |
| client.dns.includeSuffixes | Suffixes for which the client DNS resolver will always attempt to do a lookup. Includes have higher priority than excludes. | `[]` |
| client.routing.allowConflictingSubnets | Allow the specified subnets to be routed even if they conflict with other routes on the local machine. | `[]` |
| client.routing.alsoProxySubnets | The virtual network interface of connected clients will also proxy these subnets | `[]` |
| client.routing.neverProxySubnets | The virtual network interface of connected clients never proxy these subnets | `[]` |
| clientRbac.create | Create RBAC resources for non-admin users with this release. | `false` |
| ~~clientRbac.namespaced~~ | Restrict the users to specific namespaces. Deprecated and no longer used. | `false` |
| clientRbac.namespaces | The namespaces to give users access to. | Traffic Manager's namespaces (unless dynamic) |
| clientRbac.subjects | The user accounts to tie the created roles to. | `{}` |
| grpc.connectionTTL | The time that the traffic-manager will retain a client connection without any sign of life from the workstation | `24h` |
| grpc.maxReceiveSize | Max size of a gRCP message | `4Mi` |
| hooks.busybox.image | The name of the image to use for busybox. | `busybox` |
| hooks.busybox.imagePullSecrets | The `Secret` storing any credentials needed to access the image in a private registry. | `[]` |
| hooks.busybox.registry | The registry to download the image from. | `docker.io` |
| hooks.busybox.tag | Override the version of busybox to be installed. | `latest` |
| hooks.curl.registry | The repository to download the image from. | `docker.io` |
| hooks.curl.image | The name of the image to use for curl. | `curlimages/curl` |
| hooks.curl.imagePullSecrets | The `Secret` storing any credentials needed to access the image in a private registry. | `[]` |
| hooks.curl.pullPolicy | Pull policy used when pulling the curl image. | `IfNotPresent` |
| hooks.curl.tag | Override the version of busybox to be installed. | `latest` |
| hooks.podSecurityContext | The Kubernetes SecurityContext for the chart hooks `Pod` | `{}` |
| image.registry | The repository to download the image from. Set `TELEPRESENCE_REGISTRY=image.registry` locally if changing this value. | `ghcr.io/telepresenceio` |
| hooks.resources | Define resource requests and limits for the chart hooks | `{}` |
| hooks.securityContext | The Kubernetes SecurityContext for the chart hooks `Container` | securityContext |
| image.imagePullSecrets | The `Secret` storing any credentials needed to access the image in a private registry. | `[]` |
| image.name | The name of the image to use for the traffic-manager | `tel2` |
| image.pullPolicy | How the `Pod` will attempt to pull the image. | `IfNotPresent` |
| image.tag | Override the version of the Traffic Manager to be installed. | `""` (Defined in `appVersion` Chart.yaml) |
| livenessProbe | Define livenessProbe for the Traffic Manger. | `{}` |
| logLevel | Define the logging level of the Traffic Manager | `debug` |
| managerRbac.create | Create RBAC resources for traffic-manager with this release. | `true` |
| ~~managerRbac.namespaced~~ | Whether the traffic manager should be restricted to specific namespaces. Deprecated and no longer used. | `false` |
| ~~managerRbac.namespaces~~ | Which namespaces the traffic manager should be restricted to. Deprecated, use top level `namespaces` or `namespaceSelector` | `[]` |
| maxNamespaceSpecificWatchers | Threshold controlling when the traffic-manager switches from using watchers for each managed namespace to using cluster-wide watchers. | `10` |
| namespaces | Declares a fixed set of managed namespaces. Mutually exclusive to `namespaceSelector` | `[]` |
| namespaceSelector | Declares the managed namespace using `matchLabels` and `matchExpressions`. Mutually exclusive to `namespaces` | `{}` |
| nodeSelector | Define which `Node`s you want to the Traffic Manager to be deployed to. | `{}` |
| podAnnotations | Annotations for the Traffic Manager `Pod` | `{}` |
| podLabels | Labels for the Traffic Manager `Pod` | `{}` |
| podCIDRs | Verbatim list of CIDRs that the cluster uses for pods. Only valid together with `podCIDRStrategy: environment` | `[]` |
| podCIDRStrategy | Define the strategy that the traffic-manager uses to discover what CIDRs the cluster uses for pods | `auto` |
| podSecurityContext | The Kubernetes SecurityContext for the `Pod` | `{}` |
| priorityClassName | Name of the existing priority class to be used | `""` |
| rbac.only | Only create the RBAC resources and omit the traffic-manger. | `false` |
| readinessProbe | Define readinessProbe for the Traffic Manger. | `{}` |
| resources | Define resource requests and limits for the Traffic Manger. | `{}` |
| schedulerName | Specify a scheduler for Traffic Manager `Pod` and hooks `Pod`. | |
| securityContext | The Kubernetes SecurityContext for the `Deployment` | `{"readOnlyRootFilesystem": true, "runAsNonRoot": true, "runAsUser": 1000}` |
| service.type | The type of `Service` for the Traffic Manager. | `ClusterIP` |
| telepresenceAPI.port | The port on agent's localhost where the Telepresence API server can be found | |
| timeouts.agentArrival | The time that the traffic-manager will wait for the traffic-agent to arrive | `30s` |
| tolerations | Define tolerations for the Traffic Manager to ignore `Node` taints. | `[]` |
| workloads.argoRollouts.enabled | Enable/Disable the argo-rollouts integration. | `false` |
| workloads.deployments.enabled | Enable/Disable the support for Deployments. | `true` |
| workloads.replicaSets.enabled | Enable/Disable the support for ReplicaSets. | `true` |
| workloads.statefulSets.enabled | Enable/Disable the support for StatefulSets. | `true` |
### RBAC
Telepresence requires a cluster for installation but restricted RBAC roles can
be used to give users access to create intercepts if they are not cluster
admins.
The chart gives you the ability to create these RBAC roles for your users and
give access to the entire cluster or restrict to certain namespaces.
You can also create a separate release for managing RBAC by setting
`Values.rbac.only: true`.
### Namespace-scoped traffic manager
Telepresence's Helm chart supports installing a Traffic Manager at the namespace scope.
You might want to do this if you have multiple namespaces, say representing multiple different environments, and would like their Traffic Managers to be isolated from one another.
To do this, set `managerRbac.namespaced=true` and `managerRbac.namespaces={a,b,c}` to manage namespaces `a`, `b` and `c`.
**NOTE** Do not install namespace-scoped traffic managers and a cluster-scoped traffic manager in the same cluster!
#### Namespace collision detection
The Telepresence Helm chart will try to prevent namespace-scoped Traffic Managers from managing the same namespaces.
It will do this by creating a ConfigMap, called `traffic-manager-claim`, in each namespace that a given install manages.
So, for example, suppose you install one Traffic Manager to manage namespaces `a` and `b`, as:
```bash
$ telepresence helm install --namespace a --set 'managerRbac.namespaced=true' --set 'managerRbac.namespaces={a,b}'
```
You might then attempt to install another Traffic Manager to manage namespaces `b` and `c`:
```bash
$ telepresence helm install --namespace c --set 'managerRbac.namespaced=true' --set 'managerRbac.namespaces={b,c}'
```
This would fail with an error:
```
Error: rendered manifests contain a resource that already exists. Unable to continue with install: ConfigMap "traffic-manager-claim" in namespace "b" exists and cannot be imported into the current release: invalid ownership metadata; annotation validation error: key "meta.helm.sh/release-namespace" must equal "c": current value is "a"
```
To fix this error, fix the overlap either by removing `b` from the first install, or from the second.
#### Pod CIDRs
The traffic manager is responsible for keeping track of what CIDRs the cluster uses for the pods. The Telepresence client uses this
information to configure the network so that it provides access to the pods. In some cases, the traffic-manager will not be able to retrieve
this information, or will do it in a way that is inefficient. To remedy this, the strategy that the traffic manager uses can be configured
using the `podCIDRStrategy`.
| Value | Meaning |
| -------------- | ------------------------------------------------------------------------------------------------------------------------- |
| `auto` | First try `nodePodCIDRs` and if that fails, try `coverPodIPs` |
| `coverPodIPs` | Obtain all IPs from the `podIP` and `podIPs` of all `Pod` resource statuses and calculate the CIDRs needed to cover them. |
| `environment` | Pick the CIDRs from the traffic manager's `POD_CIDRS` environment variable. Use `podCIDRs` to set that variable. |
| `nodePodCIDRs` | Obtain the CIDRs from the`podCIDR` and `podCIDRs` of all `Node` resource specifications. |

View File

@ -0,0 +1,18 @@
--------------------------------------------------------------------------------
Congratulations!
You have successfully installed the Traffic Manager component of Telepresence!
Now your users will be able to `telepresence connect` to this Cluster and create
intercepts for their services!
--------------------------------------------------------------------------------
Next Steps
--------------------------------------------------------------------------------
- Take a look at our RBAC documentation for setting up the minimal required RBAC
roles for your users at https://www.telepresence.io/docs/reference/rbac
- Ensure that you are keeping up to date with Telepresence releases
https://github.com/telepresenceio/telepresence/releases so that your Traffic
Manager is the same version as the telepresence client your users are running!

View File

@ -0,0 +1,220 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "telepresence.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Traffic Manager deployment/service name - as of v2.20.3, must be "traffic-manager" to align with code base.
*/}}
{{- define "traffic-manager.name" -}}
{{- $name := default "traffic-manager" }}
{{- print $name }}
{{- end -}}
{{- /*
Traffic Manager Namespace
*/}}
{{- define "traffic-manager.namespace" -}}
{{- if .Values.isCI }}
{{- print "ambassador" }}
{{- else }}
{{- printf "%s" .Release.Namespace }}
{{- end }}
{{- end -}}
{{- /*
traffic-manager.namespace-list extracts the list of namespace names from the namespaces variable.
For backward compatibility, it will also consider names from the deprecated managerRbac.namespaces.
It's an error if namespaces and managerRbac.namespaces both have values.
*/}}
{{- define "private.namespace-list" }}
{{- $names := .Values.namespaces }}
{{- if .Values.managerRbac.namespaces }}
{{- if $names }}
{{- fail "namespaces and managerRbac.namespaces are mutually exclusive" }}
{{- end }}
{{- $names = .Values.managerRbac.namespaces }}
{{- end }}
{{- range $names }}
{{- if not (regexMatch `^[a-z0-9]([a-z0-9-]*[a-z0-9])?$` .) }}
{{- fail (printf "namespace %q is not a valid RFC 1123 namespace name" .) }}
{{- end }}
{{- else }}
{{ $names = list }}
{{- end }}
{{- toJson (uniq ($names)) }}
{{- end }}
{{- define "private.namespaceSelector" }}
{{- $labels := list }}
{{- $matches := list }}
{{- with .Values.namespaceSelector }}
{{- with .matchLabels }}
{{- $labels = . }}
{{- end }}
{{- with .matchExpressions }}
{{- $matches = . }}
{{- end }}
{{- end }}
{{- with fromJsonArray (include "private.namespace-list" $) }}
{{- if (or $labels $matches) }}{{ fail "namespaces and namespaceSelector are mutually exclusive" }}{{ end }}
{{- $matches = append $matches (dict "key" "kubernetes.io/metadata.name" "operator" "In" "values" .) }}
{{- end }}
{{- $selector := dict }}
{{- with $labels }}
{{- $selector = set $selector "matchLabels" . }}
{{- end }}
{{- with $matches }}
{{- $selector = set $selector "matchExpressions" . }}
{{- end }}
{{- toJson $selector }}
{{- end }}
{{- /*
traffic-manager.namespaceSelector extracts the selector to use when selecting namespaces.
This selector will either include the namespaceSelector variable or include namespaces returned by the
private.namespace-list definition. It will fail if both of them have values.
The selector will default to the deprecated agentInjector.webhook.namespaceSelector when neither the namespaceSelector
nor the private.namespace-list definition has any value.
A selector can be dynamic or static. This in turn controls if telepresence is "cluster-wide" or "namespaced". A dynamic
selector requires cluster-wide access for the traffic-manager, and only a static selector can serve as base when
installing Role/RoleBinding pairs.
A selector is considered static if it meets the following conditions:
- The selector must have exactly one element in the `matchLabels` or the `matchExpression`
list (if the element is in the `matchLabels` list, it is normalized into "key in [value]").
- The element must meet the following criteria:
The `key` of the match expression must be "kubernetes.io/metadata.name".
The `operator` of the match expression must be "In" (case sensitive).
The `values` list of the match expression must contain at least one value.
*/}}
{{- define "traffic-manager.namespaceSelector" }}
{{- $selector := mustFromJson (include "private.namespaceSelector" $) }}
{{- $legacy := false }}
{{- if not $selector }}
{{- with .Values.agentInjector.webhook.namespaceSelector }}
{{- $legacy = true }}
{{- $selector = . }}
{{- end }}
{{- end }}
{{- if not (or $legacy (fromJsonArray (include "traffic-manager.namespaces" $))) }}
{{- /*Ensure that his dynamic selector rejects "kube-system" and "kube-node-lease" */}}
{{- $mes := $selector.matchExpressions }}
{{- if not $mes }}
{{- $mes = list }}
{{- end }}
{{- $selector = set $selector "matchExpressions" (append $mes
(dict "key" "kubernetes.io/metadata.name" "operator" "NotIn" "values" (list "kube-system" "kube-node-lease")))
}}
{{- end }}
{{- toJson $selector }}
{{- end }}
{{- /*
traffic-manager.namespaced will yield the string "true" if the traffic-manager.namespaceSelector that is static.
*/}}
{{- define "traffic-manager.namespaced" }}
{{- if fromJsonArray (include "traffic-manager.namespaces" $) }}
{{- true }}
{{- end }}
{{- end }}
{{- /*
traffic-manager.namespaces will return a list of namespaces, provided that the traffic-manager.namespaceSelector is static.
*/}}
{{- define "traffic-manager.namespaces" }}
{{- $namespaces := list }}
{{- with mustFromJson (include "private.namespaceSelector" $) }}
{{- if and .matchExpressions (eq (len .matchExpressions) 1) (not .matchLabels) }}
{{- with index .matchExpressions 0}}
{{- if (and (eq .operator "In") (eq .key "kubernetes.io/metadata.name")) }}
{{- $namespaces = .values }}
{{- end }}
{{- end }}
{{- end }}
{{- if and .matchLabels (eq (len .matchLabels) 1) (not .matchExpressions) }}
{{- with get .matchLabels "kubernetes.io/metadata.name" }}
{{- $namespaces = list . }}
{{- end }}
{{- end }}
{{- end }}
{{- toJson $namespaces }}
{{- end }}
{{- /*
Create chart name and version as used by the chart label.
*/}}
{{- define "telepresence.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- /*
Common labels
*/}}
{{- define "telepresence.labels" -}}
{{ include "telepresence.selectorLabels" $ }}
helm.sh/chart: {{ include "telepresence.chart" $ }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- /* This value is intentionally undocumented -- it's used by the telepresence binary to determine ownership of the release */}}
{{- if .Values.createdBy }}
app.kubernetes.io/created-by: {{ .Values.createdBy }}
{{- else }}
app.kubernetes.io/created-by: {{ .Release.Service }}
{{- end }}
{{- end }}
{{- /*
Selector labels
*/}}
{{- define "telepresence.selectorLabels" -}}
app: traffic-manager
telepresence: manager
{{- end }}
{{- /*
Client RBAC name suffix
*/}}
{{- define "telepresence.clientRbacName" -}}
{{ printf "%s-%s" (include "telepresence.name" $) (include "traffic-manager.namespace" $) }}
{{- end -}}
{{- /*
RBAC rules required to create an intercept in a namespace; excludes any rules that are always cluster wide.
*/}}
{{- define "telepresence.clientRbacInterceptRules" -}}
{{- /* Mandatory. Controls namespace access command completion experience */}}
- apiGroups: [""]
resources: ["pods"]
verbs: ["get","list"] {{- /* "list" is only necessary if the client should be able to gather the pod logs */}}
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get"]
{{- /* All traffic will be routed via the traffic-manager unless a portforward can be created directly to a pod */}}
- apiGroups: [""]
resources: ["pods/portforward"]
verbs: ["create"]
{{- if and .Values.clientRbac .Values.clientRbac.ruleExtras }}
{{ template "clientRbac-ruleExtras" . }}
{{- end }}
{{- end }}
{{/*
Kubernetes version
*/}}
{{- define "kube.version.major" }}
{{- $version := regexFind "^[0-9]+" .Capabilities.KubeVersion.Major -}}
{{- printf "%s" $version -}}
{{- end -}}
{{- define "kube.version.minor" }}
{{- $version := regexFind "^[0-9]+" .Capabilities.KubeVersion.Minor -}}
{{- printf "%s" $version -}}
{{- end -}}

View File

@ -0,0 +1,140 @@
{{- if and (not (and .Values.rbac .Values.rbac.only)) .Values.agentInjector.enabled }}
{{- $namespaceSelector := mustFromJson (include "traffic-manager.namespaceSelector" $) }}
{{- /*
Perform a check that the new namespaceSelector doesn't select namespaces that are
already managed by some other traffic-manager.
*/}}
{{- $namespaces := (lookup "v1" "Namespace" "" "").items }}
{{- $configs := dict }}
{{- $cmName := include "traffic-manager.name" $ }}
{{- $cmNs := include "traffic-manager.namespace" $}}
{{- /* Find all existing traffic-manager configmaps and their namespaceSelectors */}}
{{- range $namespaces }}
{{- $ns := .metadata.name }}
{{- $cm := lookup "v1" "ConfigMap" $ns $cmName }}
{{- with $cm }}
{{- with fromYaml (get .data "namespace-selector.yaml" ) }}
{{- $configs = set $configs $ns . }}
{{- end }}
{{- end }}
{{- end }}
{{- /* No use testing if the added selector is the only one */}}
{{- if $configs }}
{{- $configs = set $configs $cmNs $namespaceSelector }}
{{- /* Validate that no selector overlaps with another */}}
{{- $allManagedNamespaces := dict }}
{{- range $configNs, $config := $configs }}
{{- $rqs := $config.matchExpressions }}
{{- /* Normalise the selector, i.e. turn each matchLabel into a machRequirement */}}
{{- range $key, $value := $config.matchLabels }}
{{- $rqs = append $rqs (dict "key" $key "operator" "In" "values" (list $value))}}
{{- end }}
{{- /* Figure out what namespaces this selector selects, and for each one, assert that it's not selected already */}}
{{- range $namespaces }}
{{- $ns := .metadata.name }}
{{- $labels := .metadata.labels }}
{{- $isMatch := true }}
{{- range $rqs }}
{{- $rqMatch := false }}
{{- $val := get $labels .key }}
{{- if eq .operator "In" }}
{{- $rqMatch = has $val .values }}
{{- else if eq .operator "NotIn" }}
{{- $rqMatch = not (has $val .values) }}
{{- else if eq .operator "Exists" }}
{{- $rqMatch = not (eq $val "") }}
{{- else if eq .operator "DoesNotExist" }}
{{- $rqMatch = eq $val "" }}
{{- else }}
{{- fail printf "unsupported labelSelectorOperator %s" .operator}}
{{- end }}
{{- if not $rqMatch }}
{{- $isMatch = false }}
{{- break }}
{{- end }}
{{- end }}
{{- if $isMatch }}
{{- $conflictingConfig := get $allManagedNamespaces $ns }}
{{- if $conflictingConfig }}
{{- if eq $conflictingConfig $cmNs }}
{{- $conflictingConfig = $configNs }}
{{- end }}
{{- fail (printf "traffic-manager in namespace %s already manages namespace %s" $conflictingConfig $ns) }}
{{- end }}
{{- $allManagedNamespaces = set $allManagedNamespaces $ns $configNs }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- $altNames := list ( printf "agent-injector.%s" (include "traffic-manager.namespace" $)) ( printf "agent-injector.%s.svc" (include "traffic-manager.namespace" $)) -}}
{{- $genCA := genCA "agent-injector-ca" 365 -}}
{{- $genCert := genSignedCert "agent-injector" nil $altNames 365 $genCA -}}
{{- $secretData := (lookup "v1" "Secret" (include "traffic-manager.namespace" $) .Values.agentInjector.secret.name).data -}}
---
apiVersion: admissionregistration.k8s.io/v1
kind: MutatingWebhookConfiguration
metadata:
{{- if eq .Values.agentInjector.certificate.method "certmanager" }}
annotations:
cert-manager.io/inject-ca-from: {{ include "traffic-manager.namespace" $}}/{{ .Values.agentInjector.secret.name }}
{{- end }}
name: {{ .Values.agentInjector.webhook.name }}-{{ include "traffic-manager.namespace" $ }}
labels:
{{- include "telepresence.labels" $ | nindent 4 }}
webhooks:
{{- with .Values.agentInjector.webhook.admissionReviewVersions }}
- admissionReviewVersions:
{{- toYaml . | nindent 2 }}
{{- end }}
clientConfig:
{{- if not (eq .Values.agentInjector.certificate.method "certmanager") }}
{{- if and ($secretData) (or (not .Values.agentInjector.certificate.regenerate) (eq .Values.agentInjector.certificate.method "supplied") )}}
caBundle: {{ or (get $secretData "ca.crt") (get $secretData "ca.pem") }}
{{- else }}
caBundle: {{ $genCA.Cert | b64enc }}
{{- end }}
{{- end }}
service:
name: {{ .Values.agentInjector.name }}
namespace: {{ include "traffic-manager.namespace" $ }}
path: {{ .Values.agentInjector.webhook.servicePath }}
port: {{ .Values.agentInjector.webhook.port }}
rules:
- apiGroups:
- ""
apiVersions:
- v1
operations:
- CREATE
- DELETE
resources:
- pods
scope: '*'
failurePolicy: {{ .Values.agentInjector.webhook.failurePolicy }}
reinvocationPolicy: {{ .Values.agentInjector.webhook.reinvocationPolicy }}
name: agent-injector-{{ include "traffic-manager.namespace" $ }}.telepresence.io
sideEffects: {{ .Values.agentInjector.webhook.sideEffects }}
timeoutSeconds: {{ .Values.agentInjector.webhook.timeoutSeconds }}
namespaceSelector:
{{- toYaml $namespaceSelector | nindent 4 }}
{{- if not (or (eq .Values.agentInjector.certificate.method "certmanager") (eq .Values.agentInjector.certificate.method "supplied")) }}
---
apiVersion: v1
kind: Secret
metadata:
name: {{ .Values.agentInjector.secret.name }}
namespace: {{ include "traffic-manager.namespace" $ }}
labels:
{{- include "telepresence.labels" $ | nindent 4 }}
data:
{{- if and ($secretData) (not .Values.agentInjector.certificate.regenerate) }}
ca.crt: {{ or (get $secretData "ca.crt") (get $secretData "ca.pem") }}
tls.crt: {{ or (get $secretData "tls.crt") (get $secretData "crt.pem") }}
tls.key: {{ or (get $secretData "tls.key") (get $secretData "key.pem") }}
{{- else }}
ca.crt: {{ $genCA.Cert | b64enc }}
tls.crt: {{ $genCert.Cert | b64enc }}
tls.key: {{ $genCert.Key | b64enc }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,14 @@
{{- if and (eq .Values.agentInjector.certificate.method "certmanager") .Values.agentInjector.enabled }}
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: {{ .Values.agentInjector.secret.name }}
spec:
secretName: {{ .Values.agentInjector.secret.name }}
dnsNames:
- {{ (printf "%s.%s" .Values.agentInjector.name .Release.Namespace ) }}
- {{ (printf "%s.%s.svc" .Values.agentInjector.name .Release.Namespace ) }}
{{- with .Values.agentInjector.certificate.certmanager }}
{{- toYaml . | nindent 2 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,38 @@
{{- /*
These are the cluster-wide rbac roles + bindings that will be used by users
who want to use telepresence once its components have been set
up in the cluster.
*/}}
{{- with .Values.clientRbac }}
{{- if (and .create (not (or .namespaces (include "traffic-manager.namespaced" $)))) }}
{{- $roleName := include "telepresence.clientRbacName" $ }}
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ $roleName }}
labels:
{{- include "telepresence.labels" $ | nindent 4 }}
rules:
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["get", "list", "watch"]
{{- include "telepresence.clientRbacInterceptRules" $ }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ $roleName }}
labels:
{{- include "telepresence.labels" $ | nindent 4 }}
subjects:
{{ toYaml .subjects }}
roleRef:
kind: ClusterRole
name: {{ $roleName }}
apiGroup: rbac.authorization.k8s.io
{{- end }}
{{- end }}

View File

@ -0,0 +1,43 @@
{{- with .Values.clientRbac }}
{{- if .create }}
{{- /*
Client must have the following RBAC in the traffic-manager.namespace to establish
a port-forward to the traffic-manager pod.
*/}}
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: traffic-manager-connect
namespace: {{ include "traffic-manager.namespace" $ }}
labels:
{{- include "telepresence.labels" $ | nindent 4 }}
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["services"]
resourceNames:
- {{ include "traffic-manager.name" $ }}
verbs: ["get"]
- apiGroups: [""]
resources: ["pods/portforward"]
verbs: ["create"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: traffic-manager-connect
namespace: {{ include "traffic-manager.namespace" $ }}
labels:
{{- include "telepresence.labels" $ | nindent 4 }}
subjects:
{{ toYaml .subjects }}
roleRef:
apiGroup: rbac.authorization.k8s.io
name: traffic-manager-connect
kind: Role
{{- end }}
{{- end }}

View File

@ -0,0 +1,85 @@
{{- /*
These are the namespace-scoped rbac roles + bindings that will be used by users
who want to use telepresence once its components have been set
up in the cluster.
*/}}
{{- with .Values.clientRbac }}
{{- if .create }}
{{- $subjects := .subjects }}
{{- if (not $subjects) }}
{{- /* fail comes out really ugly if we just do fail "the message here" */}}
{{- $msg := "You must set clientRbac.subjects to a list of valid rbac subjects. See the kubernetes docs for more: https://kubernetes.io/docs/reference/access-authn-authz/rbac/#referring-to-subjects" }}
{{- fail $msg }}
{{- end }}
{{- $namespaces := .namespaces }}
{{- if not $namespaces }}
{{ $namespaces = fromJsonArray (include "traffic-manager.namespaces" $) }}
{{- end }}
{{- $name := include "telepresence.clientRbacName" $ }}
{{- $labels := include "telepresence.labels" $ | nindent 4 }}
{{- range $namespaces }}
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ $name }}
namespace: {{ . }}
labels:
{{- $labels }}
rules:
{{ include "telepresence.clientRbacInterceptRules" $ }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ $name }}
namespace: {{ . }}
labels:
{{- $labels }}
subjects:
{{- toYaml $subjects | nindent 0}}
roleRef:
kind: Role
name: {{ $name }}
apiGroup: rbac.authorization.k8s.io
{{- end }}
{{- $managerNamespace := include "traffic-manager.namespace" $ }}
{{- if and $namespaces (not (has $managerNamespace $namespaces)) }}
{{- /*
This is required only if the client should be permitted to gather the traffic-manager logs, and it
is only required when the traffic-manager isn't managing its own namespace.
*/}}
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: traffic-manager-logs
namespace: {{ $managerNamespace }}
labels:
{{- $labels }}
rules:
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: traffic-manager-logs
namespace: {{ $managerNamespace }}
labels:
{{- $labels }}
subjects:
{{ toYaml $subjects }}
roleRef:
kind: Role
name: traffic-manager-logs
apiGroup: rbac.authorization.k8s.io
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,324 @@
{{- with .Values }}
{{- if not (and .rbac .rbac.only) }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "traffic-manager.name" $ }}
namespace: {{ include "traffic-manager.namespace" $ }}
labels:
{{- include "telepresence.labels" $ | nindent 4 }}
spec:
replicas: {{ .replicaCount }}
selector:
matchLabels:
{{- include "telepresence.selectorLabels" $ | nindent 6 }}
template:
metadata:
{{- with .podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "telepresence.selectorLabels" $ | nindent 8 }}
{{- with .podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
{{- with .image.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
securityContext:
{{- toYaml .podSecurityContext | nindent 8 }}
{{- if .hostNetwork }}
hostNetwork: true
{{- end }}
containers:
- name: {{ include "traffic-manager.name" $ }}
securityContext:
{{- toYaml .securityContext | nindent 12 }}
{{- with .image }}
image: "{{ .registry }}/{{ .name }}:{{ .tag | default $.Chart.AppVersion }}"
imagePullPolicy: {{ .pullPolicy }}
{{- end }}
env:
- name: LOG_LEVEL
value: {{ .logLevel }}
{{- with .image }}
- name: REGISTRY
value: "{{ .registry }}"
{{- end }}
- name: SERVER_PORT
value: {{ .apiPort | quote }}
- name: POD_CIDR_STRATEGY
value: {{ .podCIDRStrategy }}
{{- with .podCIDRs }}
- name: POD_CIDRS
value: "{{ join " " . }}"
{{- end }}
{{- if .agentInjector.enabled }}
- name: MUTATOR_WEBHOOK_PORT
value: {{ .agentInjector.webhook.port | quote }}
- name: AGENT_INJECTOR_SECRET
{{- if eq .agentInjector.certificate.accessMethod "mount" }}
value: /var/run/secrets/tls
{{- else }}
value: {{ .agentInjector.secret.name }}
{{- end }}
{{- end }}
{{- with .telepresenceAPI }}
{{- if .port }}
- name: AGENT_REST_API_PORT
value: {{ .port | quote }}
{{- end }}
{{- end }}
{{- with .grpc }}
{{- if .maxReceiveSize }}
- name: GRPC_MAX_RECEIVE_SIZE
value: {{ .maxReceiveSize }}
{{- if and .connectionTTL (not $.Values.client.connectionTTL) }}
- name: CLIENT_CONNECTION_TTL
value: {{ .connectionTTL }}
{{- end }}
{{- end }}
{{- end }}
{{- if .workloads }}
{{- with .workloads }}
- name: ENABLED_WORKLOAD_KINDS
value: >-
{{- if or (not .deployments) .deployments.enabled }}
Deployment
{{- end }}
{{- if or (not .statefulSets) .statefulSets.enabled }}
StatefulSet
{{- end }}
{{- if or (not .replicaSets) .replicaSets.enabled }}
ReplicaSet
{{- end }}
{{- if and .argoRollouts .argoRollouts.enabled }}
Rollout
{{- end }}
{{- end }}
{{- else }}
- name: ENABLED_WORKLOAD_KINDS
value: Deployment StatefulSet ReplicaSet
{{- end }}
{{- if .agentInjector.enabled }}
{{- /*
Traffic agent injector configuration
*/}}
- name: AGENT_ARRIVAL_TIMEOUT
value: {{ quote (default "30s" .timeouts.agentArrival) }}
{{- with .agentInjector }}
- name: AGENT_INJECT_POLICY
value: {{ .injectPolicy }}
- name: AGENT_INJECTOR_NAME
value: {{ .name | quote }}
{{- end }}
{{- /*
Traffic agent configuration
*/}}
{{- with .agent }}
{{- if .logLevel }}
- name: AGENT_LOG_LEVEL
value: {{ .logLevel }}
{{- end }}
{{- if .port }}
- name: AGENT_PORT
value: {{ .port | quote }}
{{- end }}
{{- if .appProtocolStrategy }}
- name: AGENT_APP_PROTO_STRATEGY
value: {{ .appProtocolStrategy }}
{{- end }}
{{- if .resources }}
- name: AGENT_RESOURCES
value: '{{ toJson .resources }}'
{{- end }}
{{- if .initResources }}
- name: AGENT_INIT_RESOURCES
value: '{{ toJson .initResources }}'
{{- end }}
{{- if .mountPolicies }}
- name: AGENT_MOUNT_POLICIES
value: '{{ toJson .mountPolicies }}'
{{- end }}
{{- with .initContainer }}
- name: AGENT_INIT_CONTAINER_ENABLED
value: {{ .enabled | quote }}
{{- end }}
{{- with .image }}
{{- if .name }}
- name: AGENT_IMAGE_NAME
value: {{ .name }}
{{- end }}
{{- if .tag }}
- name: AGENT_IMAGE_TAG
value: {{ .tag }}
{{- end }}
{{- if .registry }}
- name: AGENT_REGISTRY
value: {{ .registry }}
{{- end }}
{{- with .pullSecrets }}
- name: AGENT_IMAGE_PULL_SECRETS
value: '{{ toJson . }}'
{{- end }}
- name: AGENT_IMAGE_PULL_POLICY
value: {{ .pullPolicy }}
{{- end }}
{{- /* must check against nil. An empty security context is a valid override */}}
{{- if not (eq .securityContext nil) }}
- name: AGENT_SECURITY_CONTEXT
value: '{{ toJson .securityContext }}'
{{- end }}
{{- /* must check against nil. An empty security context is a valid override */}}
{{- if not (eq .initSecurityContext nil) }}
- name: AGENT_INIT_SECURITY_CONTEXT
value: '{{ toJson .initSecurityContext }}'
{{- end }}
{{- end }}
{{- with fromJsonArray (include "traffic-manager.namespaces" $) }}
{{- /*
This environment variable is not used, it's here to force a redeploy of the traffic manager when the list
changes, because it updates roles and rolebindings and potentially also changes from roles to clusterroles or
vice versa.
*/}}
- name: NOT_USED_NSS
value: {{ toJson . | quote }}
{{- end }}
{{- end }}
{{- if .prometheus.port }} # 0 is false
- name: PROMETHEUS_PORT
value: "{{ .prometheus.port }}"
{{- end }}
- name: MAX_NAMESPACE_SPECIFIC_WATCHERS
value: {{.maxNamespaceSpecificWatchers | quote }}
- name: MANAGER_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
{{- /*
Client configuration
*/}}
{{- with .client }}
{{- if .connectionTTL }}
- name: CLIENT_CONNECTION_TTL
value: {{ .connectionTTL }}
{{- end }}
{{- with .routing }}
{{- if .alsoProxySubnets }}
- name: CLIENT_ROUTING_ALSO_PROXY_SUBNETS
value: "{{ join " " .alsoProxySubnets }}"
{{- end }}
{{- if .neverProxySubnets }}
- name: CLIENT_ROUTING_NEVER_PROXY_SUBNETS
value: "{{ join " " .neverProxySubnets }}"
{{- end }}
{{- if .allowConflictingSubnets }}
- name: CLIENT_ROUTING_ALLOW_CONFLICTING_SUBNETS
value: "{{ join " " .allowConflictingSubnets }}"
{{- end }}
{{- end }}
{{- with .dns }}
{{- with .excludeSuffixes }}
- name: CLIENT_DNS_EXCLUDE_SUFFIXES
value: "{{ join " " . }}"
{{- end }}
{{- with .includeSuffixes }}
- name: CLIENT_DNS_INCLUDE_SUFFIXES
value: "{{ join " " . }}"
{{- end }}
{{- end }}
{{- end }}
{{- with .compatibility }}
{{- if .version }}
- name: COMPATIBILITY_VERSION
value: {{ .version }}
{{- end }}
{{- end }}
{{- if and .trafficManager .trafficManager.envTemplate }}
{{- template "traffic-manager-env" . }}
{{- end }}
ports:
- name: api
containerPort: {{ .apiPort }}
- name: https
containerPort: {{ .agentInjector.webhook.port }}
{{- if .prometheus.port }} # 0 is false
- name: prometheus
containerPort: {{ .prometheus.port }}
{{- end }}
{{- with .livenessProbe }}
livenessProbe:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .readinessProbe }}
readinessProbe:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- if eq .agentInjector.certificate.accessMethod "mount" }}
volumeMounts:
{{- if .agentInjector.enabled }}
- name: tls
mountPath: /var/run/secrets/tls
readOnly: true
{{- end }}
{{- if and .trafficManager .trafficManager.mountsTemplate }}
{{- template "traffic-manager-mounts" . }}
{{- end }}
{{- else }}
{{- if and .trafficManager .trafficManager.mountsTemplate }}
volumeMounts:
{{- template "traffic-manager-mounts" . }}
{{- end }}
{{- end }}
{{- with .schedulerName }}
schedulerName: {{ . }}
{{- end }}
{{- with .nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .priorityClassName }}
priorityClassName: {{ . | quote }}
{{- end }}
{{- if eq .agentInjector.certificate.accessMethod "mount" }}
volumes:
{{- if .agentInjector.enabled }}
- name: tls
secret:
defaultMode: 420
secretName: {{ .agentInjector.secret.name }}
{{- end }}
{{- if and .trafficManager .trafficManager.volsTemplate }}
{{- template "traffic-manager-vols" . }}
{{- end }}
{{- else }}
{{- if and .trafficManager .trafficManager.volsTemplate }}
volumes:
{{- template "traffic-manager-vols" . }}
{{- end }}
{{- end }}
serviceAccount: traffic-manager
serviceAccountName: traffic-manager
{{- end }}
{{- end }}

View File

@ -0,0 +1,8 @@
{{- if and (eq .Values.agentInjector.certificate.method "certmanager") .Values.agentInjector.enabled }}
apiVersion: cert-manager.io/v1
kind: {{ .Values.agentInjector.certificate.certmanager.issuerRef.kind }}
metadata:
name: {{ .Values.agentInjector.certificate.certmanager.issuerRef.name }}
spec:
selfSigned: {}
{{- end }}

View File

@ -0,0 +1,76 @@
{{- if and (not (and .Values.rbac .Values.rbac.only)) .Values.agentInjector.enabled }}
apiVersion: batch/v1
kind: Job
metadata:
name: uninstall-agents
namespace: {{ include "traffic-manager.namespace" $ }}
labels:
app.kubernetes.io/managed-by: {{ .Release.Service | quote }}
app.kubernetes.io/instance: {{ .Release.Name | quote }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
annotations:
{{- /* This is what defines this resource as a hook. Without this line, the job is considered part of the release. */}}
"helm.sh/hook": pre-delete
"helm.sh/hook-weight": "-5"
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
spec:
backoffLimit: 1
template:
metadata:
name: uninstall-agents
labels:
app.kubernetes.io/managed-by: {{ .Release.Service | quote }}
app.kubernetes.io/instance: {{ .Release.Name | quote }}
helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
spec:
securityContext:
{{- toYaml .Values.hooks.podSecurityContext | nindent 8 }}
restartPolicy: Never
{{- with .Values.hooks.curl.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
containers:
- name: uninstall-agents
securityContext:
{{- if .Values.hooks.securityContext }}
{{- toYaml .Values.hooks.securityContext | nindent 12 }}
{{- else }}
{{- toYaml .Values.securityContext | nindent 12 }}
{{- end }}
image: "{{ .Values.hooks.curl.registry }}/{{ .Values.hooks.curl.image }}:{{ .Values.hooks.curl.tag }}"
imagePullPolicy: {{ .Values.hooks.curl.pullPolicy }}
volumeMounts:
- name: secret-volume
mountPath: /secret
env:
- name: CURL_CA_BUNDLE
value: /secret/ca.crt
resources:
{{- toYaml .Values.hooks.resources | nindent 12 }}
command:
- sh
- -c
args:
- 'curl --fail --connect-timeout 5 --max-time 60 --request DELETE https://{{ .Values.agentInjector.name }}.{{ include "traffic-manager.namespace" $ }}:{{ .Values.agentInjector.webhook.port }}/uninstall || exit 0'
volumes:
- name: secret-volume
secret:
secretName: {{ .Values.agentInjector.secret.name }}
{{- with .Values.schedulerName }}
schedulerName: {{ . }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,56 @@
{{- with .Values }}
{{- if not (and .rbac .rbac.only) }}
apiVersion: v1
kind: Service
metadata:
name: {{ include "traffic-manager.name" $ }}
namespace: {{ include "traffic-manager.namespace" $ }}
labels:
{{- include "telepresence.labels" $ | nindent 4 }}
spec:
type: {{ .service.type }}
clusterIP: None
ports:
- name: api
port: {{ .apiPort }}
targetPort: api
selector:
{{- include "telepresence.selectorLabels" $ | nindent 4 }}
{{- if .agentInjector.enabled }}
---
apiVersion: v1
kind: Service
metadata:
name: {{ .agentInjector.name }}
namespace: {{ include "traffic-manager.namespace" $ }}
labels:
{{- include "telepresence.labels" $ | nindent 4 }}
spec:
type: {{ .service.type }}
ports:
- name: https
port: {{ .agentInjector.webhook.port }}
targetPort: https
selector:
{{- include "telepresence.selectorLabels" $ | nindent 4 }}
{{- end }}
{{- if .prometheus.port }} # 0 is false
---
apiVersion: v1
kind: Service
metadata:
name: telepresence-prometheus
namespace: {{ include "traffic-manager.namespace" $ }}
labels:
name: telepresence-prometheus
spec:
type: {{ .service.type }}
ports:
- name: telepresence-prometheus
port: 80
targetPort: prometheus
selector:
{{- include "telepresence.selectorLabels" $ | nindent 4 }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,22 @@
{{- if not (and .Values.rbac .Values.rbac.only) }}
apiVersion: v1
kind: Pod
metadata:
name: "{{ include "traffic-manager.name" $ }}-test-connection"
namespace: {{ include "traffic-manager.namespace" $ }}
labels:
{{- include "telepresence.labels" $ | nindent 4 }}
annotations:
"helm.sh/hook": test-success
spec:
{{- with .Values.hooks.busybox.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 2 }}
{{- end }}
containers:
- name: wget
image: "{{ .Values.hooks.busybox.registry }}/{{ .Values.hooks.busybox.image }}:{{ .Values.hooks.busybox.tag }}"
command: ['wget']
args: ['{{ include "traffic-manager.name" $ }}:8081']
restartPolicy: Never
{{- end }}

View File

@ -0,0 +1,20 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "traffic-manager.name" $ }}
namespace: {{ include "traffic-manager.namespace" $ }}
labels:
{{- include "telepresence.labels" $ | nindent 4 }}
data:
{{- if .Values.client }}
client.yaml: |
{{- toYaml .Values.client | nindent 4 }}
{{- end }}
{{- with .Values.intercept }}
{{- if .environment }}
agent-env.yaml: |
{{- toYaml .environment | nindent 4 }}
{{- end }}
{{- end }}
namespace-selector.yaml: |
{{- toYaml (mustFromJson (include "traffic-manager.namespaceSelector" $)) | nindent 4 }}

View File

@ -0,0 +1,104 @@
{{- with .Values }}
{{- if and .managerRbac.create (not (include "traffic-manager.namespaced" $)) }}
{{- /*
This file contains all cluster-scoped permissions that the traffic manager needs.
This will be larger if namespaced: false, or smaller if it is true
This will also likely expand over time as we move more things from the clients
domain into the traffic-manager. But the good news there is that it will
require less permissions in clientRbac.yaml
*/}}
{{- $roleName := (printf "traffic-manager-%s" (include "traffic-manager.namespace" $)) }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ $roleName }}
labels:
{{- include "telepresence.labels" $ | nindent 4 }}
rules:
- apiGroups:
- ""
resources:
- services
verbs:
- update {{/* Only needed for upgrade of older versions */}}
- apiGroups:
- ""
resources:
- nodes
- services
- namespaces
- pods
verbs:
- list
- get
- watch
{{- if .agentInjector.enabled }}
- apiGroups:
- ""
resources:
- pods/eviction
verbs:
- create
{{- end }}
- apiGroups:
- ""
resources:
- pods/log
verbs:
- get
- apiGroups:
- "apps"
resources:
- deployments
- replicasets
- statefulsets
verbs:
- get
- list
- watch
{{- if .agentInjector.enabled }}
- patch
{{- end }}
{{- if .workloads.argoRollouts.enabled }}
- apiGroups:
- "argoproj.io"
resources:
- rollouts
verbs:
- get
- list
- watch
{{- if .agentInjector.enabled }}
- patch
{{- end }}
{{- end }}
- apiGroups:
- "events.k8s.io"
resources:
- events
verbs:
- get
- watch
- apiGroups:
- "networking.k8s.io"
resources:
- servicecidrs
verbs:
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ $roleName }}
labels:
{{- include "telepresence.labels" $ | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ $roleName }}
subjects:
- kind: ServiceAccount
name: traffic-manager
namespace: {{ include "traffic-manager.namespace" $ }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,216 @@
{{- if .Values.managerRbac.create }}
{{- /*
This file contains the various namespace-scoped roles + bindings that the traffic-manager needs.
This will likely expand over time as we move more things from the clients
domain into the traffic-manager. But the good news there is that it will
require less permissions in clientRbac.yaml
*/}}
{{- $managerNamespace := include "traffic-manager.namespace" $}}
{{- $namespaces := fromJsonArray (include "traffic-manager.namespaces" $)}}
{{- if $namespaces }}
{{- $interceptEnabled := .Values.agentInjector.enabled}}
{{- $argoRolloutsEnabled := .Values.workloads.argoRollouts.enabled}}
{{- $allNamespaces := uniq (append $namespaces $managerNamespace)}}
{{- range $allNamespaces }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: traffic-manager
namespace: {{ . }}
labels:
{{- include "telepresence.labels" $ | nindent 4 }}
rules:
- apiGroups:
- ""
resources:
- services
verbs:
- update {{/* Only needed for upgrade of older versions */}}
- apiGroups:
- ""
resources:
- services
- pods
verbs:
- list
- get
- watch
{{- if $interceptEnabled }}
- apiGroups:
- ""
resources:
- pods/eviction
verbs:
- create
{{- end }}
- apiGroups:
- ""
resources:
- pods/log
verbs:
- get
- apiGroups:
- ""
resources:
- configmaps
verbs:
- list
- get
- watch
resourceNames:
{{- if eq . $managerNamespace }}
- {{ include "traffic-manager.name" $ }}
{{- end }}
- apiGroups:
- "apps"
resources:
- deployments
- replicasets
- statefulsets
verbs:
- get
- list
- watch
{{- if $interceptEnabled }}
- patch
{{- end }}
{{- if $argoRolloutsEnabled }}
- apiGroups:
- "argoproj.io"
resources:
- rollouts
verbs:
- get
- list
- watch
{{- if $interceptEnabled }}
- patch
{{- end }}
{{- end }}
- apiGroups:
- "events.k8s.io"
resources:
- events
verbs:
- get
- watch
{{- if eq . $managerNamespace }}
{{- /* Must be able to get the manager namespace in order to get the install-id */}}
- apiGroups:
- ""
resources:
- namespaces
resourceNames:
- {{ . }}
verbs:
- get
{{- if and (eq (int $.Capabilities.KubeVersion.Major) 1) (lt (int $.Capabilities.KubeVersion.Minor) 33) }}
{{- /*
Must be able to make an unsuccessful attempt to create a dummy service in order to receive
the error message containing correct service CIDR
*/}}
- apiGroups:
- ""
resources:
- services
verbs:
- create
{{- end }}
{{- end }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: traffic-manager
namespace: {{ . }}
labels:
{{- include "telepresence.labels" $ | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: traffic-manager
subjects:
- kind: ServiceAccount
name: traffic-manager
namespace: {{ $managerNamespace }}
{{- end }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: traffic-manager-cluster-wide-{{ $managerNamespace }}
labels:
{{- include "telepresence.labels" $ | nindent 4 }}
rules:
- apiGroups:
- "networking.k8s.io"
resources:
- servicecidrs
verbs:
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: traffic-manager-cluster-wide-{{ $managerNamespace }}
labels:
{{- include "telepresence.labels" $ | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: traffic-manager-cluster-wide-{{ $managerNamespace }}
subjects:
- kind: ServiceAccount
name: traffic-manager
namespace: {{ $managerNamespace }}
{{- else }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
namespace: {{ $managerNamespace }}
name: traffic-manager
labels:
{{- include "telepresence.labels" $ | nindent 4 }}
rules:
- apiGroups:
- ""
resources:
- services
verbs:
- create
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
resourceNames:
- {{ include "traffic-manager.name" $ }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: traffic-manager
namespace: {{ $managerNamespace }}
labels:
{{- include "telepresence.labels" $ | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: traffic-manager
subjects:
- kind: ServiceAccount
name: traffic-manager
namespace: {{ $managerNamespace }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,11 @@
{{- if .Values.managerRbac.create }}
{{- /* This file contains the serviceAccount used for the traffic-manager deployment. */}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: traffic-manager
namespace: {{ include "traffic-manager.namespace" $ }}
labels:
{{- include "telepresence.labels" $ | nindent 4 }}
{{- end }}

View File

@ -0,0 +1,34 @@
{{- if and (not (eq .Values.agentInjector.certificate.accessMethod "mount")) .Values.agentInjector.enabled }}
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
namespace: {{ include "traffic-manager.namespace" $ }}
name: agent-injector-webhook-secret
labels: {{- include "telepresence.labels" $ | nindent 4 }}
rules:
- apiGroups:
- ""
resources:
- secrets
resourceNames: [ {{ .Values.agentInjector.secret.name }} ]
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: agent-injector-webhook-secret
namespace: {{ include "traffic-manager.namespace" $ }}
labels: {{- include "telepresence.labels" $ | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: agent-injector-webhook-secret
subjects:
- kind: ServiceAccount
name: traffic-manager
namespace: {{ include "traffic-manager.namespace" $ }}
{{- end }}

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,186 @@
################################################################################
## Deployment Configuration
################################################################################
# The Traffic Manager only support running with one replica at the moment.
# Configuring the replicaCount will be added in future versions of Telepresence
replicaCount: 1
# The Telepresence client will try to ensure that the Traffic Manager image is
# up to date and from the right registry. If you are changing the value below,
# ensure that the tag is the same as the client version and that the
# TELEPRESENCE_REGISTRY environment variable is equal to image.repository.
#
# The client will default to ghcr.io/telepresenceio/tel2:{{CLIENT_VERSION}}
image:
registry: ghcr.io/telepresenceio
name: tel2
pullPolicy: IfNotPresent
apiPort: 8081
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
################################################################################
## Traffic Manager Service Configuration
################################################################################
service:
type: ClusterIP
################################################################################
## Traffic Manager Configuration
################################################################################
# The log level of the Traffic Manager.
logLevel: info
# GRPC configuration for the Traffic Manager.
# This is identical to the grpc configuration for local clients.
# See https://www.telepresence.io/docs/latest/reference/config/#grpc for more info
grpc:
# Max time that the traffic-manager or traffic-agent will keep an idle client connection alive
connectionTTL: 24h
# maxReceiveSize is a quantity that configures the maximum message size that the traffic
# manager will service.
maxReceiveSize: 4Mi
# podCIDRStrategy controls what strategy the traffic-manager will use for finding out what
# CIDRs the cluster is using for its pods. Valid values are:
#
# nodePodCIDRs extract CIDRs from the podCIDR and podCIDRs field of the Node Spec.
# coverPodIPs extract IPs from the podIP and podIPs field of the Pod Status and compute the CIDRs needed to cover those IPs.
# environment use CIDRs listed in the space separated POD_CIDRS environment variable verbatim.
# auto first try nodePodCIDRs and if that fails, tru coverPodIPs
#
# Default: auto
podCIDRStrategy: auto
# maxNamespaceSpecificWatchers configures the threshold for when the traffic-manager switches from using one set of
# watchers for each managed namespace to using cluster-wide watchers. This threshold only applies when using a
# namespaceSelector, and only when the traffic-manager is permitted to list the cluster's namespaces.
maxNamespaceSpecificWatchers: 10
managerRbac:
# Default: true
create: true
timeouts:
# The duration the traffic manager should wait for an agent to arrive (i.e., to be registered in the traffic manager's state)
# Default: 30s
agentArrival: 30s
################################################################################
## Agent Injector Configuration
################################################################################
agentInjector:
enabled: true
name: agent-injector
secret:
name: mutator-webhook-tls
certificate:
# The method used by the agent-injector to access the generated secret.
# Possible options: watch or mount
#
# Default watch
accessMethod: watch
# The method used to generate the TLS certificate for the agent-injector.
#
# Possible options: helm, supplied, or certmanager.
#
# If set to `supplied`, ensure your Secret is in the same namespace as the traffic-manager,
# and that `.agentInjector.secret.name` is set to its name.
# See the Secret in `agentInjectorWebhook.yaml` for the expected structure of the data.
# NOTE: If the Secret values update, the helm chart MUST be re-applied to ensure the
# MutatingWebhookConfiguration uses the new values.
#
# Default: helm
method: helm
# The certmanager configuration block
#
certmanager:
commonName: agent-injector
duration: 2160h0m0s
issuerRef:
name: telepresence
kind: Issuer
injectPolicy: OnDemand
webhook:
name: agent-injector-webhook
admissionReviewVersions: ["v1"]
servicePath: /traffic-agent
port: 443
failurePolicy: Ignore
reinvocationPolicy: IfNeeded
sideEffects: None
timeoutSeconds: 5
################################################################################
## Telepresence traffic-agent configuration
################################################################################
agent:
appProtocolStrategy: http2Probe
port: 9900
mountPolicies:
"/tmp": Local
image:
pullPolicy: IfNotPresent
initContainer:
enabled: true
################################################################################
## Telepresence API Server Configuration
################################################################################
telepresenceAPI: {}
# The port on agent's localhost where the API service can be found
# Default: 0
# port: 0
################################################################################
## Prometheus Server Configuration
################################################################################
prometheus: {}
# Set this port number to enable a prometheus metrics http server for the
# traffic manager
# Default: 0
# port: 0
# Values specific to the helm chart hooks for managing upgrade/deleting
hooks:
busybox:
registry: docker.io
image: busybox
tag: latest
imagePullSecrets: []
curl:
registry: docker.io
image: "curlimages/curl"
tag: 8.1.1
imagePullSecrets: []
pullPolicy: IfNotPresent
client:
dns:
# Tell client's DNS resolver to never send names with these suffixes to the cluster side resolver
excludeSuffixes: [".com", ".io", ".net", ".org", ".ru"]
# Controls which workload kinds are recognized by Telepresence
workloads:
deployments:
enabled: true
replicaSets:
enabled: true
statefulSets:
enabled: true
argoRollouts:
enabled: false