diff --git a/blog/2024-12-10-telepresence-2.21.md b/blog/2024-12-10-telepresence-2.21.md index 3e5a8852..24d7a2a3 100644 --- a/blog/2024-12-10-telepresence-2.21.md +++ b/blog/2024-12-10-telepresence-2.21.md @@ -2,17 +2,11 @@ title: Telepresence 2.21 description: What's new in Telepresence 2.21. slug: telepresence-2.21 -authors: - - name: Thomas Hallgren - title: Maintainer of Telepresence OSS - url: https://github.com/thallgren - image_url: https://github.com/thallgren.png - socials: - linkedin: https://www.linkedin.com/in/thallgren/ +authors: thallgren --- -Telepresence 2.21.0 has been released, and here is a walkthrough of its many new features, such as automatic VPN -conflict avoidance, the new `telepresence ingest` command, and the improved docker support provided by commands like +Telepresence 2.21.0 has been released, and here is a walkthrough of its many new features, such as automatic VPN +conflict avoidance, the new `telepresence ingest` command, and the improved docker support provided by commands like `telepresence curl` and `telepresence docker-run`. diff --git a/blog/2025-03-14-telepresence-2.22.md b/blog/2025-03-14-telepresence-2.22.md new file mode 100644 index 00000000..4d980126 --- /dev/null +++ b/blog/2025-03-14-telepresence-2.22.md @@ -0,0 +1,109 @@ +--- +title: Telepresence 2.22 +description: What's new in Telepresence 2.22. +slug: telepresence-2.22 +authors: thallgren +--- + +Telepresence 2.22 introduces several compelling features, notably the telepresence replace command, a JSON schema for Helm chart values, greater control over Traffic Manager's namespace scope, and improved customization of remote mount behavior. + + + +## Telepresence Replace: Container-Level Substitution + +Telepresence introduces `telepresence replace`, a new command that offers a distinct method for interacting with containers, complementing `telepresence intercept` and `telepresence ingest`. + +### Distinguishing Features of `replace` + +While superficially similar to `intercept`, `replace` operates with fundamental differences: + +1. **Container-Wide Scope:** `replace` substitutes an entire container, redirecting all its traffic, whereas `intercept` focuses on specific services or service/container ports. +2. **Direct Container Port Mapping:** The `--port` flag in `replace` directly maps to container ports. +3. **Optional Port Interception:** Unlike `intercept`, `replace` can function without intercepting any specific ports. +4. **Complete Container Replacement:** During a `replace`, the original container within the cluster is deactivated and fully substituted. + +### Rationale: Addressing Container-Level Substitution Needs + +The existing `telepresence intercept --replace` flag, which targets specific ports, prompted the development of the dedicated `replace` command. + +`intercept` inherently focuses on port-level traffic redirection. However, scenarios arise where container-level substitution is required without specific port interception. For instance, a message-queue consumer container might not expose any ports. Introducing a `--no-ports` flag to `intercept --replace` would create a logical contradiction, as "intercept" implies port-based traffic redirection. + +Therefore, `replace` was introduced to provide a clear and consistent mechanism for container-level substitution. + +### Deprecation of the `--replace` flag +The `telepresence intercept --replace` flag is now deprecated. While it remains functional, a warning will be issued, recommending the use of `telepresence replace` instead. + +### Generic term for Ingest, Intercept, and Replace + +Client engagement of a container is defined as the act of ingesting, intercepting, or replacing that container. Consequently, the term 'engage' will appear frequently throughout the documentation. + +## Traffic Manager Namespaces + +The namespaces that the Telepresence Traffic Manager cares about can be declared in the Helm chart, and multiple Traffic Managers can be installed that deal with their own set of namespaces as long as there's no overlap. This has been the case before this release. Two things are new though: + +1. **Namespaces are consistently declared:** Prior to this release, the namespace declaration was scattered into several Helm chart values, such as `manager.Rbac.namespaces`, `client.Rbac.namespaces`, and `agentInjector.webhook.namespaceSelector`. The definition is now unified to the mutual exclusive top-level Helm chart values `namespaces` and `namespaceSelector`. +2. **The conflict detection is all-encompassing**: The old way of declaring namespaces implied that the Traffic Manager was either "cluster wide" (no declaration existed) or "namespaced". A conflict would be detected between two "namespaced" installation, but not between a "namespaced" and a "cluster wide" installation. This is no longer the case. All conflicting namespace selectors will yield errors and Helm will refuse to install them. + +### Dynamic or Static Selector + +A namespace selector can be dynamic or static. Telepresence will consider a selector that just targets the namespace name as a _static_ selector. Selectors using other labels, or negation of labels (using operator `NotIn`) are considered _dynamic_. The Difference is that the former isn't sensitive to changes (except, of course, the removal of a namespace) whereas the latter will take effect immediately if the set of matching namespaces changes. + +Sample _static_ selector, selecting the namespaces "alpha" and "beta": +```yaml +namespaceSelector: + matchExpressions: + - key: kubernetes.io/metadata.name + operator: In + values: + - alpha + - beta +``` + +This type of selector can also be declared using the short form `namespaces`: +```yaml +namespaces: +- alpha +- beta +``` + +Sample _dynamic_ selector, selecting namespaces where the label "example.com/managed" is "true" and "example.com/environment" is "dev" or "qa": + +```yaml +namespaceSelector: + matchLabels: + example.com/managed: "true" + matchExpressions: + - key: example.com/environment + operator: In + values: + - dev + - qa +``` + +## Volume Mount Policies + +This feature gives the user control over what volumes that the traffic-agent will share with the client, how those volumes are shared (read-write or read-only), and also give the client a hint about volumes that are not shared but will be expected by the locally running container. + +Mount policies can be configured globally for a Traffic Manager using Helm chart values, or individually for a workload using template annotations. + +A volume policy can be `Ignore`, `Local`, `Remote`, or `RemoteReadOnly`. See the [Control Volume Sharing](/docs/reference/cluster-config#control-volume-sharing) section in the reference documentation for detailed information on the Helm chart value and annotation. + +## JSON-Schema for the Helm Chart Values + +It is no longer possible to enter invalid, misspelled, or otherwise non-existent configuration parameters when doing +a `helm install|upgrade` or `telepresence helm install|upgrade`, because all values are now guarded using a JSON-schema. + +A new `telepresence helm lint` command was also added, which corresponds to [helm lint](https://helm.sh/docs/helm/helm_lint/), but uses the Helm chart embedded in the `telepresence` binary. + +## Other Notable Changes + +### Annotation Prefix Change +All annotations now use the prefix `telepresence.io/`. This prefix was already used by labels added by Telepresence. The former annotation prefix `telepresence.getambassador.io/` will continue to work, but a warning will be printed in the logs. + +### Configmap `telepresence-agents` is no longer used +The Traffic Agent configurations stored in the `telepresence-agents` configmap are now instead added as pod-annotations. The configmap is no longer used, and can be safely removed. + +### Rollbacks Triggered Using Pod Eviction +When the configuration for a pod changes as the result of a client engaging with the pod, the pod will now be evicted in order to trigger the mutating webhook. This vouches for simpler logic and faster response times when engaging and ending an engagement. In earlier releases, Telepresence would instead patch the workload. The workload might still be triggered if the eviction fails due to the pod's disruption policy. + +Please check out the [Release Notes](/docs/release-notes) for a full list of all new features, changes, and bug-fixes. \ No newline at end of file diff --git a/blog/authors.yml b/blog/authors.yml new file mode 100644 index 00000000..27b26713 --- /dev/null +++ b/blog/authors.yml @@ -0,0 +1,7 @@ +thallgren: + name: Thomas Hallgren + title: Maintainer of Telepresence OSS + url: https://github.com/thallgren + image_url: https://github.com/thallgren.png + socials: + linkedin: https://www.linkedin.com/in/thallgren/ \ No newline at end of file diff --git a/docusaurus.config.ts b/docusaurus.config.ts index cfdbc552..796963d9 100644 --- a/docusaurus.config.ts +++ b/docusaurus.config.ts @@ -78,6 +78,13 @@ const config: Config = { const linkToItem = ((linkItem: LinkItem) => { if (linkItem.link) { idSet.delete(linkItem.link); + if(linkItem.link.startsWith("https://")) { + return { + type: 'link', + label: linkItem.title, + href: linkItem.link + } + } return { type: 'doc', label: linkItem.title, diff --git a/versioned_docs/version-2.21/reference/rbac.md b/versioned_docs/version-2.21/reference/rbac.md index 0eba0721..810bfcf0 100644 --- a/versioned_docs/version-2.21/reference/rbac.md +++ b/versioned_docs/version-2.21/reference/rbac.md @@ -266,4 +266,4 @@ roleRef: apiGroup: rbac.authorization.k8s.io ``` -The user will also need the [Traffic Manager connect permission](#traffic-manager-connect-permission) described above. +The user will also need the [Traffic Manager connect permission](#cluster-wide-installation) described above. diff --git a/versioned_docs/version-2.22/CONTRIBUTING.md b/versioned_docs/version-2.22/CONTRIBUTING.md new file mode 100644 index 00000000..fdbcee10 --- /dev/null +++ b/versioned_docs/version-2.22/CONTRIBUTING.md @@ -0,0 +1,23 @@ +# Telepresence Documentation + +This folder contains the Telepresence documentation in a format suitable for a versioned folder in the +telepresenceio/telepresence.io repository. The folder will show up in that repository when a new minor revision +tag is created here. + +Assuming that a 2.20.0 release is pending, and that a release/v2.20.0 branch has been created, then: +```console +$ export TELEPRESENCE_VERSION=v2.20.0 +$ make prepare-release +$ git push origin {,rpc/}v2.20.0 release/v2.20.0 +``` + +will result in a `docs/v2.20` folder with this folder's contents in the telepresenceio/telepresence.io repository. + +Subsequent bugfix tags for the same minor tag, i.e.: +```console +$ export TELEPRESENCE_VERSION=v2.20.1 +$ make prepare-release +$ git push origin {,rpc/}v2.20.1 release/v2.20.1 +``` +will not result in a new folder when it is pushed, but it will update the content of the `docs/v2.20` folder to +reflect this folder's content for that tag. diff --git a/versioned_docs/version-2.22/README.md b/versioned_docs/version-2.22/README.md new file mode 100644 index 00000000..b18160b8 --- /dev/null +++ b/versioned_docs/version-2.22/README.md @@ -0,0 +1,48 @@ +--- +description: Main menu when using plain markdown. Excluded when generating the website +--- +# Telepresence Documentation +raw markdown version, more bells and whistles at [telepresence.io](https://telepresence.io) + +- [Quick start](quick-start.md) +- Install Telepresence + - [Install Client](install/client.md) + - [Upgrade Client](install/upgrade.md) + - [Install Traffic Manager](install/manager.md) + - [Cloud Provider Prerequisites](install/cloud.md) +- Core concepts + - [The developer experience and the inner dev loop](concepts/devloop.md) + - [Making the remote local: Faster feedback, collaboration and debugging](concepts/faster.md) + - [Intercepts](concepts/intercepts.md) +- How do I... + - [Code and debug an application locally](howtos/engage.md) + - [Use Telepresence with Docker](howtos/docker.md) + - [Work with large clusters](howtos/large-clusters.md) + - [Host a cluster in Docker or a VM](howtos/cluster-in-vm.md) + - [Use Telepresence with Azure (Microsoft Learn)](https://learn.microsoft.com/en-us/azure/aks/use-telepresence-aks.md) +- Technical reference + - [Architecture](reference/architecture.md) + - [Client reference](reference/client.md) + - [Laptop-side configuration](reference/config.md) + - [Cluster-side configuration](reference/cluster-config.md) + - [Using Docker for engagements](reference/docker-run.md) + - [Running Telepresence in a Docker container](reference/inside-container.md) + - [Environment variables](reference/environment.md) + - Engagements + - [Configure intercept using CLI](reference/engagements/cli.md) + - [Traffic Agent Sidecar](reference/engagements/sidecar.md) + - [Target a specific container](reference/engagements/container.md) + - [Volume mounts](reference/volume.md) + - [DNS resolution](reference/dns.md) + - [RBAC](reference/rbac.md) + - [Telepresence and VPNs](reference/vpn.md) + - [Networking through Virtual Network Interface](reference/tun-device.md) + - [Connection Routing](reference/routing.md) + - [Monitoring](reference/monitoring.md) +- Comparisons + - [Telepresence vs mirrord](compare/mirrord.md) +- [FAQs](faqs.md) +- [Troubleshooting](troubleshooting.md) +- [Community](community.md) +- [Release Notes](release-notes.md) +- [Licenses](licenses.md) diff --git a/versioned_docs/version-2.22/community.md b/versioned_docs/version-2.22/community.md new file mode 100644 index 00000000..b264c917 --- /dev/null +++ b/versioned_docs/version-2.22/community.md @@ -0,0 +1,13 @@ +--- +title: Community +hide_table_of_contents: true +--- + +# Community + +## Contributor's guide +Please review our [contributor's guide](https://github.com/telepresenceio/telepresence/blob/release/v2/CONTRIBUTING.md) +on GitHub to learn how you can help make Telepresence better. + +## Meetings +Check out our community [meeting schedule](https://github.com/telepresenceio/telepresence/blob/release/v2/MEETING_SCHEDULE.md) for opportunities to interact with Telepresence developers. diff --git a/versioned_docs/version-2.22/compare/mirrord.md b/versioned_docs/version-2.22/compare/mirrord.md new file mode 100644 index 00000000..9985099a --- /dev/null +++ b/versioned_docs/version-2.22/compare/mirrord.md @@ -0,0 +1,76 @@ +--- +title: "Telepresence vs mirrord" +hide_table_of_contents: true +--- + +## Telepresence + +Telepresence is a very feature rich tool, designed to handle a large majority of use-cases. You can use it as a cluster VPN only, or use one of its three different ways (replace, intercept, or ingest) to engage with the cluster's resources. + +Telepresence is intended to be installed in the cluster by an administrator and then let clients connect with a very limited set of permissions. This model is generally required by larger companies. + +The client can be either completely contained in Docker or run directly on the workstation. The latter will require the creation of a virtual network device, and hence admin access. + +## mirrord + +Mirrord was designed with simplicity in mind. You install the CLI tool, and that's it. It will do the rest automatically under the hood. + +Mirrord solves the same problem as Telepresence, but in a different way. Instead of providing a proper network +device and remotely mounted filesystems, mirrord will link the client application with a `mirrord-layer` shared library. This library will intercept accesses to the network, file system, and environment variables, and reroute them to a corresponding process in the cluster (the `mirrord-agent`) which then interacts with the targeted pod. + +### Limitations + +While mirrotd is simple to set up, the chosen approach has several limitations, both on the client and the cluster side. + +### Limitations when using dynamic loading: + +1. It will only work on Linux and macOS platforms. There's no native support on Windows. +2. It will only work with dynamically linked executables. +3. It cannot be used with docker unless you rebuild the container and inject the `mirrord-layer` into it. +4. `DYLD_INSERT_LIBRARIES` causes various problems on macOS (SIP prevents it from being used), especially on silicon-based machines where mirrord will require Rosetta. +5. Should Apple decide to protect their intel-based platform the same way as the silicon-based one in a future release, then mirrord will likely be problematic to use on that platform. + +### Cluster Permissions + +Mirrord does not require a sidecar. Instead they install a the `mirror-agent` into the namespace of the pod that it impersonates. That agent requires several permissions that a cluster admin might consider a security risk: + +* `CAP_NET_ADMIN` and `CAP_NET_RAW` - required for modifying routing tables +* `CAP_SYS_PTRACE` - required for reading target pod environment +* `CAP_SYS_ADMIN` - required for joining target pod network namespace + +Unless using "mirrord for Teams" (proprietary), all users must have permissions to create the job running the `mirror-agent` in the cluster. + +## Comparison Telepresence vs mirrord + +This comparison chart applies to the Open Source editions of both products. + +| Feature | Telepresence | mirrord | +|----------------------------------------------------------------------------|--------------|---------| +| Run or Debug your cluster containers locally | ✅ | ✅ | +| Does not need administrative permission on workstation | ✅ [^1] | ✅ | +| Can be used with very large clusters | ✅ | ✅ | +| Works without interrupting the remote service | ✅ [^2] | ✅ | +| Doesn't require injection of a sidecar | ✅ [^3] | ✅ | +| Supports connecting to clusters over a corporate VPN | ✅ | ✅ | +| Can intercept traffic | ✅ | ✅ | +| Can ingest a container | ✅ | ❌ | +| Can replace a container | ✅ | ❌ | +| Can mirror traffic | ❌ | ✅ | +| Can act as a cluster VPN only | ✅ | ❌ | +| Will work with statically linked binaries | ✅ | ❌ | +| Runs natively on windows | ✅ | ❌ | +| Can intercept traffic to and from pod's localhost | ✅ | ❌ | +| Remotely mounted file system available from all applications | ✅ | ❌ | +| Cluster network available to all applications (including browser) | ✅ | ❌ | +| Can run the same docker container locally without rebuilding it | ✅ | ❌ | +| Provides remote mounts as volumes in docker | ✅ | ❌ | +| Does not require special capabilities such as CAP_SYS_ADMIN in the cluster | ✅ | ❌ | +| Centralized client configuration using Helm chart | ✅ | ❌ | +| Installed using a JSON-schema validated Helm chart | ✅ | ❌ | +| Client need no special RBAC permissions | ✅ | ❌ | + +[^1]: Telepresence will not require root access on the workstation when running in docker mode. + +[^2]: The remote service will only restart when a traffic-agent sidecar is installed. Pod disruption budgets or pre-installed agents can be used to avoid interruptions. + +[^3]: A traffic-agent is necessary when engaging with a pod. It is unnecessary when using Telepresence as a VPN. diff --git a/versioned_docs/version-2.22/concepts/devloop.md b/versioned_docs/version-2.22/concepts/devloop.md new file mode 100644 index 00000000..d97f27af --- /dev/null +++ b/versioned_docs/version-2.22/concepts/devloop.md @@ -0,0 +1,55 @@ +--- +title: The developer experience and the inner dev loop +hide_table_of_contents: true +--- + +# The developer experience and the inner dev loop + +## How is the developer experience changing? + +The developer experience is the workflow a developer uses to develop, test, deploy, and release software. + +Typically this experience has consisted of both an inner dev loop and an outer dev loop. The inner dev loop is where the individual developer codes and tests, and once the developer pushes their code to version control, the outer dev loop is triggered. + +The outer dev loop is _everything else_ that happens leading up to release. This includes code merge, automated code review, test execution, deployment, controlled (canary) release, and observation of results. The modern outer dev loop might include, for example, an automated CI/CD pipeline as part of a GitOps workflow and a progressive delivery strategy relying on automated canaries, i.e. to make the outer loop as fast, efficient and automated as possible. + +Cloud-native technologies have fundamentally altered the developer experience in two ways: one, developers now have to take extra steps in the inner dev loop; two, developers need to be concerned with the outer dev loop as part of their workflow, even if most of their time is spent in the inner dev loop. + +Engineers now must design and build distributed service-based applications _and_ also assume responsibility for the full development life cycle. The new developer experience means that developers can no longer rely on monolithic application developer best practices, such as checking out the entire codebase and coding locally with a rapid “live-reload” inner development loop. Now developers have to manage external dependencies, build containers, and implement orchestration configuration (e.g. Kubernetes YAML). This may appear trivial at first glance, but this adds development time to the equation. + +## What is the inner dev loop? + +The inner dev loop is the single developer workflow. A single developer should be able to set up and use an inner dev loop to code and test changes quickly. + +Even within the Kubernetes space, developers will find much of the inner dev loop familiar. That is, code can still be written locally at a level that a developer controls and committed to version control. + +In a traditional inner dev loop, if a typical developer codes for 360 minutes (6 hours) a day, with a traditional local iterative development loop of 5 minutes — 3 coding, 1 building, i.e. compiling/deploying/reloading, 1 testing inspecting, and 10-20 seconds for committing code — they can expect to make ~70 iterations of their code per day. Any one of these iterations could be a release candidate. The only “developer tax” being paid here is for the commit process, which is negligible. + +![traditional inner dev loop](../images/trad-inner-dev-loop.png#devloop) + +## In search of lost time: How does containerization change the inner dev loop? + +The inner dev loop is where writing and testing code happens, and time is critical for maximum developer productivity and getting features in front of end users. The faster the feedback loop, the faster developers can refactor and test again. + +Changes to the inner dev loop process, i.e., containerization, threaten to slow this development workflow down. Coding stays the same in the new inner dev loop, but code has to be containerized. The _containerized_ inner dev loop requires a number of new steps: + +* packaging code in containers +* writing a manifest to specify how Kubernetes should run the application (e.g., YAML-based configuration information, such as how much memory should be given to a container) +* pushing the container to the registry +* deploying containers in Kubernetes + +Each new step within the container inner dev loop adds to overall development time, and developers are repeating this process frequently. If the build time is incremented to 5 minutes — not atypical with a standard container build, registry upload, and deploy — then the number of possible development iterations per day drops to ~40. At the extreme that’s a 40% decrease in potential new features being released. This new container build step is a hidden tax, which is quite expensive. + + +![container inner dev loop](../images/container-inner-dev-loop.png#devloop) + +## Tackling the slow inner dev loop + +A slow inner dev loop can negatively impact frontend and backend teams, delaying work on individual and team levels and slowing releases into production overall. + +For example: + +* Frontend developers have to wait for previews of backend changes on a shared dev/staging environment (for example, until CI/CD deploys a new version) and/or rely on mocks/stubs/virtual services when coding their application locally. These changes are only verifiable by going through the CI/CD process to build and deploy within a target environment. +* Backend developers have to wait for CI/CD to build and deploy their app to a target environment to verify that their code works correctly with cluster or cloud-based dependencies as well as to share their work to get feedback. + +New technologies and tools can facilitate cloud-native, containerized development. And in the case of a sluggish inner dev loop, developers can accelerate productivity with tools that help speed the loop up again. diff --git a/versioned_docs/version-2.22/concepts/faster.md b/versioned_docs/version-2.22/concepts/faster.md new file mode 100644 index 00000000..f8afb503 --- /dev/null +++ b/versioned_docs/version-2.22/concepts/faster.md @@ -0,0 +1,29 @@ +--- +title: "Making the remote local: Faster feedback, collaboration and debugging" +hide_table_of_contents: true +--- + +--- +# Making the remote local: Faster feedback, collaboration and debugging + +With the goal of achieving fast, efficient development, developers need a set of approaches to bridge the gap between remote Kubernetes clusters and local development, and reduce time to feedback and debugging. + +## How should I set up a Kubernetes development environment? + +Setting up a development environment for Kubernetes can be much more complex than the setup for traditional web applications. Creating and maintaining a Kubernetes development environment relies on a number of external dependencies, such as databases or authentication. + +While there are several ways to set up a Kubernetes development environment, most introduce complexities and impediments to speed. The dev environment should be set up to easily code and test in conditions where a service can access the resources it depends on. + +A good way to meet the goals of faster feedback, possibilities for collaboration, and scale in a realistic production environment is the "single service local, all other remote" environment. Developing in a fully remote environment offers some benefits, but for developers, it offers the slowest possible feedback loop. With local development in a remote environment, the developer retains considerable control while using tools like [Telepresence](../quick-start.md) to facilitate fast feedback, debugging and collaboration. + +## What is Telepresence? + +Telepresence is an open source tool that lets developers [code and test microservices locally against a remote Kubernetes cluster](../quick-start.md). Telepresence facilitates more efficient development workflows while relieving the need to worry about other service dependencies. + +## How can I get fast, efficient local development? + +The dev loop can be jump-started with the right development environment and Kubernetes development tools to support speed, efficiency and collaboration. Telepresence is designed to let Kubernetes developers code as though their laptop is in their Kubernetes cluster, enabling the service to run locally and be proxied into the remote cluster. Telepresence runs code locally and forwards requests to and from the remote Kubernetes cluster, bypassing the much slower process of waiting for a container to build, pushing it to registry, and deploying to production. + +A rapid and continuous feedback loop is essential for productivity and speed; Telepresence enables the fast, efficient feedback loop to ensure that developers can access the rapid local development loop they rely on without disrupting their own or other developers' workflows. Telepresence safely intercepts traffic from the production cluster and enables near-instant testing of code and local debugging in production. + +Telepresence works by deploying a two-way network proxy in a pod running in a Kubernetes cluster. This pod proxies data from the Kubernetes environment (e.g., TCP/UDP connections, environment variables, volumes) to the local process. This proxy can intercept traffic meant for the service and reroute it to a local copy, which is ready for further (local) development. diff --git a/versioned_docs/version-2.22/concepts/intercepts.md b/versioned_docs/version-2.22/concepts/intercepts.md new file mode 100644 index 00000000..e3496699 --- /dev/null +++ b/versioned_docs/version-2.22/concepts/intercepts.md @@ -0,0 +1,64 @@ +--- +title: "Intercepts" +description: "Short demonstration of global intercepts" +hide_table_of_contents: true +--- + +import Admonition from '@theme/Admonition'; +import Paper from '@mui/material/Paper'; +import Tab from '@mui/material/Tab'; +import TabContext from '@mui/lab/TabContext'; +import TabList from '@mui/lab/TabList'; +import TabPanel from '@mui/lab/TabPanel'; +import TabsContainer from '@site/src/components/TabsContainer'; +import Animation from '@site/src/components/InterceptAnimation'; + + + + +# No intercept + + + + +This is the normal operation of your cluster without Telepresence. + + + + + + + +# Intercept + + + +**Intercepts** replace the Kubernetes "Orders" service with the +Orders service running on your laptop. The users see no change, but +with all the traffic coming to your laptop, you can observe and debug +with all your dev tools. + +### Creating and using intercepts + + 1. Creating the intercept: Intercept your service from your CLI: + + ```shell + telepresence intercept SERVICENAME + ``` + + + + Make sure your current kubectl context points to the target + cluster. If your service is running in a different namespace than + your current active context, use or change the `--namespace` flag. + + + + 2. Using the intercept: Send requests to your service: + + All requests will be sent to the version of your service that is + running in the local development environment. + + + + diff --git a/versioned_docs/version-2.22/doc-links.yml b/versioned_docs/version-2.22/doc-links.yml new file mode 100644 index 00000000..41eaaf76 --- /dev/null +++ b/versioned_docs/version-2.22/doc-links.yml @@ -0,0 +1,84 @@ +- title: Quick start + link: quick-start +- title: Install Telepresence + items: + - title: Install Client + link: install/client + - title: Upgrade Client + link: install/upgrade + - title: Install Traffic Manager + link: install/manager + - title: Cloud Provider Prerequisites + link: install/cloud +- title: Core concepts + items: + - title: The developer experience and the inner dev loop + link: concepts/devloop + - title: "Making the remote local: Faster feedback, collaboration and debugging" + link: concepts/faster + - title: Intercepts + link: concepts/intercepts +- title: How do I... + items: + - title: Code and debug an application locally + link: howtos/engage + - title: Use Telepresence with Docker + link: howtos/docker + - title: Work with large clusters + link: howtos/large-clusters + - title: Host a cluster in Docker or a VM + link: howtos/cluster-in-vm + - title: Use Telepresence with Azure (Microsoft Learn) + link: https://learn.microsoft.com/en-us/azure/aks/use-telepresence-aks +- title: Technical reference + items: + - title: Architecture + link: reference/architecture + - title: Client reference + link: reference/client + - title: Laptop-side configuration + link: reference/config + - title: Cluster-side configuration + link: reference/cluster-config + - title: Using Docker for engagements + link: reference/docker-run + - title: Running Telepresence in a Docker container + link: reference/inside-container + - title: Environment variables + link: reference/environment + - title: Engagements + items: + - title: Configure intercept using CLI + link: reference/engagements/cli + - title: Traffic Agent Sidecar + link: reference/engagements/sidecar + - title: Target a specific container + link: reference/engagements/container + - title: Volume mounts + link: reference/volume + - title: DNS resolution + link: reference/dns + - title: RBAC + link: reference/rbac + - title: Telepresence and VPNs + link: reference/vpn + - title: Networking through Virtual Network Interface + link: reference/tun-device + - title: Connection Routing + link: reference/routing + - title: Monitoring + link: reference/monitoring +- title: Comparisons + items: + - title: Telepresence vs mirrord + link: compare/mirrord +- title: FAQs + link: faqs +- title: Troubleshooting + link: troubleshooting +- title: Community + link: community +- title: Release Notes + link: release-notes +- title: Licenses + link: licenses \ No newline at end of file diff --git a/versioned_docs/version-2.22/faqs.md b/versioned_docs/version-2.22/faqs.md new file mode 100644 index 00000000..f35901f6 --- /dev/null +++ b/versioned_docs/version-2.22/faqs.md @@ -0,0 +1,101 @@ +--- +title: FAQs +description: "Learn how Telepresence helps with fast development and debugging in your Kubernetes cluster." +hide_table_of_contents: true +--- + +# FAQs + +### Why Telepresence + +Modern microservices-based applications that are deployed into Kubernetes often consist of tens or hundreds of services. The resource constraints and number of these services means that it is often difficult to impossible to run all of this on a local development machine, which makes fast development and debugging very challenging. The fast [inner development loop](concepts/devloop.md) from previous software projects is often a distant memory for cloud developers. + +Telepresence enables you to connect your local development machine seamlessly to the cluster via a two-way proxying mechanism. This enables you to code locally and run the majority of your services within a remote Kubernetes cluster — which in the cloud means you have access to effectively unlimited resources. + +Ultimately, this empowers you to develop services locally and still test integrations with dependent services or data stores running in the remote cluster. + +Telepresence provides three different ways for you to code, debug, and test your service locally using your favourite local IDE and in-process debugger. + +First off, you can "replace" the service with your own local version. This means even though you run your service locally, you can see how it interacts with the rest of the services in the cluster. It's like swapping out a piece of a puzzle and seeing how the whole picture changes. Your local process will have access to the same network, environment, and volumes as the service that it replaces. + +You can also "intercept" any requests made to a service. This is similar to replacing the service, but the remote service will keep running, perform background tasks, and handle traffic that isn't intercepted. + +Finally, you can "ingest" a service. Again, similar to a "replace", but nothing changes in the cluster during an "ingest", and no traffic is routed to the workstation. + +#### What operating systems does Telepresence work on? + +Telepresence currently works natively on macOS, Linux, and Windows. + +#### What architecture does Telepresence work on? + +All Telepresence binaries are released for both AMD (Intel) and ARM (Apple Silicon) chips. + +#### What protocols can be intercepted by Telepresence? + +Both TCP and UDP are supported. + +#### When using Telepresence to run a cluster service locally, are the Kubernetes cluster environment variables proxied on my local machine? + +Yes, you can either set the container's environment variables on your machine or write the variables to a file to use with Docker or another build process. You can also directly pass the environments to a handler that runs locally. Please see [the environment variable reference doc](reference/environment.md) for more information. + +#### When using Telepresence to run a cluster service locally, can the associated container volume mounts also be mounted by my local machine? + +Yes, and when running Docker, they can be used as docker volumes. Please see [the volume mounts reference doc](reference/volume.md) for more information. + +#### When connected to a Kubernetes cluster via Telepresence, can I access cluster-based services via their DNS name? + +Yes. After you have successfully connected to your cluster via `telepresence connect -n ` you will be able to access any service in the connected namespace in your cluster via their DNS name. + +This means you can curl endpoints directly e.g. `curl :8080/mypath`. + +You can also access services in other namespaces using their namespaced qualified name, e.g.`curl .:8080/mypath`. + +In essence, Telepresence makes the DNS of the connected namespace available locally. This means that you can connect to all databases or middleware running in the cluster, such as MySQL, PostgreSQL and RabbitMQ, via their service name. + +#### When connected to a Kubernetes cluster via Telepresence, can I access cloud-based services and data stores via their DNS name? + +You can connect to cloud-based data stores and services that are directly addressable within the cluster (e.g. when using an [ExternalName](https://kubernetes.io/docs/concepts/services-networking/service/#externalname) Service type), such as AWS RDS, Google pub-sub, or Azure SQL Database. + +#### Will Telepresence be able to engage with workloads running on a private cluster or cluster running within a virtual private cloud (VPC)? + +Yes, but it doesn't need to have a publicly accessible IP address. + +The cluster must also have access to an external registry to be able to download the traffic-manager and traffic-agent images that are deployed when connecting with Telepresence. + +#### Why does running Telepresence require sudo access for the local daemon unless it runs in a Docker container? + +The local daemon needs sudo to create a VIF (Virtual Network Interface) for outbound routing and DNS. Root access is needed to do that unless the daemon runs in a Docker container. + +#### What components get installed in the cluster when running Telepresence? + +A `traffic-manager` service is deployed in a namespace of your choice (default 'ambassador') within your cluster, and this manages resilient intercepts and connections between your local machine and the cluster. + +A Traffic Agent container is injected per pod that is being engaged. The injection happens the first time a `replace`, an `ingest`, or an `intercept` is made on a workload, unless you choose to control the injection using an annotation, in which case the injection happens when the `traffic-manager` is installed. + +#### How can I remove all the Telepresence components installed within my cluster? + +You can run the command `telepresence helm uninstall` to remove everything from the cluster, including the `traffic-manager`, and all the `traffic-agent` containers injected into each pod being engaged. + +You also can run the command `telepresence uninstall ` to remove the injected `traffic-agent` containers injected into each pod for that workload. + +Also run `telepresence quit -s` to stop all local daemons running. + +#### What language is Telepresence written in? + +All components of the Telepresence application and cluster components are written using Go. + +#### How does Telepresence connect and tunnel into the Kubernetes cluster? + +The connection between your laptop and cluster is established by using +the `kubectl port-forward` machinery (though without actually spawning +a separate program) to establish a TLS encrypted connection to Telepresence +Traffic Manager and Traffic Agents in the cluster, and running Telepresence's custom VPN +protocol over that connection. + +#### Is Telepresence OSS open source? + +Yes, it is! You'll find both source code and documentation in the [Telepresence GitHub repository](https://github.com/telepresenceio/telepresence), licensed using the [apache License Version 2.0](https://github.com/telepresenceio/telepresence?tab=License-1-ov-file#readme). + +#### How do I share my feedback on Telepresence? + +Your feedback is always appreciated and helps us build a product that provides as much value as possible for our community. You can chat with us directly on our #telepresence-oss channel at the [CNCF Slack](https://slack.cncf.io), and also report issues or create pull-requests on the GitHub repository. diff --git a/versioned_docs/version-2.22/howtos/cluster-in-vm.md b/versioned_docs/version-2.22/howtos/cluster-in-vm.md new file mode 100644 index 00000000..cd4ba131 --- /dev/null +++ b/versioned_docs/version-2.22/howtos/cluster-in-vm.md @@ -0,0 +1,198 @@ +--- +title: Host a cluster in Docker or a VM +description: Use Telepresence to engage with services in a cluster running in a hosted docker container or virtual + machine. +hide_table_of_contents: true +--- + +# Network considerations for locally hosted clusters + +## The problem +Telepresence creates a Virtual Network Interface ([VIF](../reference/tun-device.md)) that maps the cluster subnets to the host machine when it connects. If you're running Kubernetes locally (e.g., Docker Desktop, Kind, Minikube, k3s), you may encounter network problems because the devices in the host are also accessible from the cluster's nodes. + +### Example: +A k3s cluster runs in a headless VirtualBox machine that uses a "host-only" network. This network will allow both host-to-guest and guest-to-host connections. In other words, the cluster will have access to the host's network and, while Telepresence is connected, also to its VIF. This means that from the cluster's perspective, there will now be more than one interface that maps the cluster's subnets; the ones already present in the cluster's nodes, and then the Telepresence VIF, mapping them again. + +Now, if a request arrives to Telepresence covered by a subnet mapped by the VIF, the request is routed to the cluster. If the cluster for some reason doesn't find a corresponding listener that can handle the request, it will eventually try the host network, and find the VIF. The VIF routes the request to the cluster and now the recursion is in motion. The final outcome of the request will likely be a timeout but since the recursion is very resource intensive (a large amount of very rapid connection requests), this will likely also affect other connections in a bad way. + +## Solution + +### Prevent recursion in the VIF +To prevent recursive connections within the VIF, set the client configuration property `routing.recursionBlockDuration` to a short timeout value. +A value of `1ms` is typically sufficient. This configuration will temporarily block new connections to a specific IP:PORT pair immediately after a +connection has been established, thereby preventing looped connections back into the VIF. The block remains in effect for the specified duration. + +### Create a bridge network +An alternative to using the `routing.recursionBlockDuration` can be to create a bridge network. It acts as a Link Layer (L2) device that forwards traffic between network segments. By creating a bridge network, you can bypass the host's network stack, and instead make the Kubernetes cluster to connect directly to the same router as your host. + +To create a bridge network, you need to change the network settings of the guest running a cluster's node so that it connects directly to a physical network device on your host. The details on how to configure the bridge depend on what type of virtualization solution you're using. + +#### Vagrant + Virtualbox + k3s example +Here's a sample `Vagrantfile` that will spin up a server node and two agent nodes in three headless instances using a bridged network. It also adds the configuration needed for the cluster to host a docker repository (very handy in case you want to save bandwidth). The Kubernetes registry manifest must be applied using `kubectl -f registry.yaml` once the cluster is up and running. + +##### Vagrantfile +```ruby +# -*- mode: ruby -*- +# vi: set ft=ruby : + +# bridge is the name of the host's default network device +$bridge = 'wlp5s0' + +# default_route should be the IP of the host's default route. +$default_route = '192.168.1.1' + +# nameserver must be the IP of an external DNS, such as 8.8.8.8 +$nameserver = '8.8.8.8' + +# server_name should also be added to the host's /etc/hosts file and point to the server_ip +# for easy access when pushing docker images +server_name = 'multi' + +# static IPs for the server and agents. Those IPs must be on the default router's subnet +server_ip = '192.168.1.110' +agents = { + 'agent1' => '192.168.1.111', + 'agent2' => '192.168.1.112', +} + +# Extra parameters in INSTALL_K3S_EXEC variable because of +# K3s picking up the wrong interface when starting server and agent +# https://github.com/alexellis/k3sup/issues/306 +server_script = <<-SHELL + sudo -i + apk add curl + export INSTALL_K3S_EXEC="--bind-address=#{server_ip} --node-external-ip=#{server_ip} --flannel-iface=eth1" + mkdir -p /etc/rancher/k3s + cat <<-'EOF' > /etc/rancher/k3s/registries.yaml +mirrors: + "multi:5000": + endpoint: + - "http://#{server_ip}:5000" +EOF + curl -sfL https://get.k3s.io | sh - + echo "Sleeping for 5 seconds to wait for k3s to start" + sleep 5 + cp /var/lib/rancher/k3s/server/token /vagrant_shared + cp /etc/rancher/k3s/k3s.yaml /vagrant_shared + cp /etc/rancher/k3s/registries.yaml /vagrant_shared + SHELL + +agent_script = <<-SHELL + sudo -i + apk add curl + export K3S_TOKEN_FILE=/vagrant_shared/token + export K3S_URL=https://#{server_ip}:6443 + export INSTALL_K3S_EXEC="--flannel-iface=eth1" + mkdir -p /etc/rancher/k3s + cat <<-'EOF' > /etc/rancher/k3s/registries.yaml +mirrors: + "multi:5000": + endpoint: + - "http://#{server_ip}:5000" +EOF + curl -sfL https://get.k3s.io | sh - + SHELL + +def config_vm(name, ip, script, vm) + # The network_script has two objectives: + # 1. Ensure that the guest's default route is the bridged network (bypass the network of the host) + # 2. Ensure that the DNS points to an external DNS service, as opposed to the DNS of the host that + # the NAT network provides. + network_script = <<-SHELL + sudo -i + ip route delete default 2>&1 >/dev/null || true; ip route add default via #{$default_route} + cp /etc/resolv.conf /etc/resolv.conf.orig + sed 's/^nameserver.*/nameserver #{$nameserver}/' /etc/resolv.conf.orig > /etc/resolv.conf + SHELL + + vm.hostname = name + vm.network 'public_network', bridge: $bridge, ip: ip + vm.synced_folder './shared', '/vagrant_shared' + vm.provider 'virtualbox' do |vb| + vb.memory = '4096' + vb.cpus = '2' + end + vm.provision 'shell', inline: script + vm.provision 'shell', inline: network_script, run: 'always' +end + +Vagrant.configure('2') do |config| + config.vm.box = 'generic/alpine314' + + config.vm.define 'server', primary: true do |server| + config_vm(server_name, server_ip, server_script, server.vm) + end + + agents.each do |agent_name, agent_ip| + config.vm.define agent_name do |agent| + config_vm(agent_name, agent_ip, agent_script, agent.vm) + end + end +end +``` + +The Kubernetes manifest to add the registry: + +##### registry.yaml +```yaml +apiVersion: v1 +kind: ReplicationController +metadata: + name: kube-registry-v0 + namespace: kube-system + labels: + k8s-app: kube-registry + version: v0 +spec: + replicas: 1 + selector: + app: kube-registry + version: v0 + template: + metadata: + labels: + app: kube-registry + version: v0 + spec: + containers: + - name: registry + image: registry:2 + resources: + limits: + cpu: 100m + memory: 200Mi + env: + - name: REGISTRY_HTTP_ADDR + value: :5000 + - name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY + value: /var/lib/registry + volumeMounts: + - name: image-store + mountPath: /var/lib/registry + ports: + - containerPort: 5000 + name: registry + protocol: TCP + volumes: + - name: image-store + hostPath: + path: /var/lib/registry-storage +--- +apiVersion: v1 +kind: Service +metadata: + name: kube-registry + namespace: kube-system + labels: + app: kube-registry + kubernetes.io/name: "KubeRegistry" +spec: + selector: + app: kube-registry + ports: + - name: registry + port: 5000 + targetPort: 5000 + protocol: TCP + type: LoadBalancer +``` diff --git a/versioned_docs/version-2.22/howtos/docker.md b/versioned_docs/version-2.22/howtos/docker.md new file mode 100644 index 00000000..c40d2aae --- /dev/null +++ b/versioned_docs/version-2.22/howtos/docker.md @@ -0,0 +1,101 @@ +--- +title: "Using Telepresence with Docker" +hide_table_of_contents: true +--- +# Telepresence with Docker + +## Why? + +It can be tedious to adopt Telepresence across your organization, since in its handiest form, it requires admin access, +and needs to get along with any exotic networking setup that your company may have. + +If Docker is already approved in your organization, this Golden path should be considered. + +## How? + +When using Telepresence in Docker mode, users can eliminate the need for admin access on their machines, address several networking challenges, and forego the need for third-party applications to enable volume mounts. + +You can simply add the docker flag to any Telepresence command, and it will start your daemon in a container. +Thus removing the need for root access, making it easier to adopt as an organization + +Let's illustrate with a quick demo, assuming a default Kubernetes context named default, and a simple HTTP service: + +```console +$ telepresence connect --docker +Connected to context default, namespace default (https://kubernetes.docker.internal:6443) +``` + +This method limits the scope of the potential networking issues since everything stays inside Docker. The Telepresence daemon can be found under the name `tp--cn` when listing your containers. + +```console +$ docker ps +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +540a3c12f45b ghcr.io/telepresenceio/telepresence:2.22.0 "telepresence connec…" 18 seconds ago Up 16 seconds 127.0.0.1:58802->58802/tcp tp-default-cn +``` + +Replace a container in the cluster and start a corresponding local container: + +```cli +$ telepresence replace echo-sc --docker-run -- ghcr.io/telepresenceio/echo-server:latest +Using Deployment echo-sc + Container name: echo-sc + State : ACTIVE + Workload kind : Deployment + Port forwards : 127.0.0.1 -> 127.0.0.1 + 8080 -> 8080 TCP +Echo server listening on port 8080. +``` + +Using `--docker-run` starts the local container that acts as the handler, so that it uses the same network as the +container that runs the telepresence daemon. It will also receive the same incoming traffic and have the remote volumes +mounted in the same way as the remote container that it replaces. + +If you want to curl your remote service, you'll need to do that from a container that shares the daemon container's +network. Telepresence provides a `curl` command that will do just that. + +```console +$ telepresence curl echo-sc + % Total % Received % Xferd Average Speed Time Time Time Current + Dload Upload Total Spent Left Speed +100 196 100 196 0 0 4232 0 --:--:-- --:--:-- --:--:-- 4260 +Request served by 540a3c12f45b + +Intercept id b0bd5e75-2618-4bef-ac4e-4c08c4b58ec7:echo-sc/echo-sc +Intercepted container "echo-sc" +HTTP/1.1 GET / + +Host: echo-sc +User-Agent: curl/8.11.1 +Accept: */* +``` + +Similarly, if you want to start your container manually using `docker run`, you must ensure that it shares the +daemon container's network. The most convenient way to do that is to use the `--docker-run` flag as explained above, +but you can also start a container separately using `telepresence docker-run`. + +```console +$ telepresence docker-run ghcr.io/telepresenceio/echo-server:latest +Echo server listening on port 8080. +``` + +> [!TIP] +> Use named connections +> You can use the `--name` flag to name the connection if you want to connect to several namespaces simultaneously, e.g. + +```console +$ telepresence connect --docker --name alpha --namespace alpha +$ telepresence connect --docker --name beta --namespace beta +``` + +Now, with two connections active, you must pass the flag `--use ` to other commands, e.g. + +```console +$ telepresence replace echo-easy --use alpha --docker-run -- ghcr.io/telepresenceio/echo-server:latest +``` + +## Key learnings + +* Using the Docker mode of telepresence **does not require root access**, and makes it **easier** to adopt it across your organization. +* It **limits the potential networking issues** you can encounter. +* It **limits the potential mount issues** you can encounter. +* It **enables simultaneous engagements in multiple namespaces**. diff --git a/versioned_docs/version-2.22/howtos/engage.md b/versioned_docs/version-2.22/howtos/engage.md new file mode 100644 index 00000000..1a766bba --- /dev/null +++ b/versioned_docs/version-2.22/howtos/engage.md @@ -0,0 +1,253 @@ +--- +title: Code and debug an application locally +description: Start using Telepresence in your own environment. Follow these steps to work locally with cluster applications. +hide_table_of_contents: true +--- + +# Code and debug an application locally + +## Local Development Methods + +Telepresence offers three powerful ways to develop your services locally: + +### Replace +* **How it Works:** + - Replaces an existing container within your Kubernetes cluster with a Traffic Agent. + - Reroutes traffic intended for the replaced container to your local workstation. + - Makes the remote environment of the replaced container available to the local workstation. + - Provides read-write access to the volumes mounted by replaced container. +* **Impact:** + - A Traffic Agent is injected into the pods of the targeted workload. + - The replaced container is removed from the pods of the targeted workload. + - The replaced container is restored when the replace operation ends. +* **Use-cases:** + - You're working with message queue consumers and must stop the remote container. + - You're working with remote containers configured without incoming traffic. + +### Intercept +* **How it Works:** + - Intercepts requests destined for a specific service port (or ports) and reroutes them to the local workstation. + - Makes the remote environment of the targeted container available to the local workstation. + - Provides read-write access to the volumes mounted by the targeted container. +* **Impact:** + - A Traffic Agent is injected into the pods of the targeted workload. + - Intercepted traffic is rerouted to the local workstation and will no longer reach the remote service. + - All containers keep on running. +* **Use-cases:** + - Your main focus is the service API rather than the cluster's pods and containers. + - You want your local service to only receive specific ingress traffic, while other traffic must be untouched. + - You want your remote container to continue processing other requests or background tasks. + +### Ingest +* **How it Works:** + - Makes the remote environment of the ingested container available to the local workstation. + - Provides read-only access to the volumes mounted by replaced container. +* **Impact:** + - A Traffic Agent is injected into the pods of the targeted workload. + - No traffic is rerouted and all containers keep on running. +* **Use-cases:** + - You want to keep the impact of your local development to a minimum. + - You have don't need traffic being routed from the cluster, and read-only access to the container's volumes is ok. + +## Prerequisites + +Before you begin, you need to have [Telepresence installed](../install/client.md). This document uses the Kubernetes command-line tool, [`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/) +in several examples. OpenShift users can substitute oc [commands instead](https://docs.openshift.com/container-platform/4.1/cli_reference/developer-cli-commands.html). + +This guide assumes you have an application represented by a Kubernetes deployment and service accessible publicly by an ingress controller, +and that you can run a copy of that application on your laptop. + +## Replace your Container + +This approach offers the benefit of direct cluster connectivity from your workstation, simplifying debugging and +modification of your application within its familiar environment. However, it requires root access to configure +network telepresence, and remote mounts must be made relative to a specific mount point, which can add complexity. + +1. Connect to your cluster with `telepresence connect` and try to curl to the Kubernetes API server. A 401 or 403 response code is expected and indicates that the service could be reached: + + ```console + $ curl -ik https://kubernetes.default + HTTP/1.1 401 Unauthorized + Cache-Control: no-cache, private + Content-Type: application/json + ... + ``` + + You now have access to your remote Kubernetes API server as if you were on the same network. You can now use any local tools to connect to any service in the cluster. + +2. Enter `telepresence list` and make sure the workload (deployment in this case) you want to intercept is listed. For example: + + ```console + $ telepresence list + ... + deolpoyment example-app: ready to engage (traffic-agent not yet installed) + ... + ``` + +3. Get the name of the container you want to replace (output truncated for brewity) + ```console + $ kubectl describe deploy example-app + Name: example-app + Namespace: default + CreationTimestamp: Tue, 14 Jan 2025 03:49:29 +0100 + Labels: app=example-app + Annotations: deployment.kubernetes.io/revision: 1 + Selector: app=example-app + Replicas: 1 desired | 1 updated | 1 total | 0 available | 1 unavailable + StrategyType: RollingUpdate + MinReadySeconds: 0 + RollingUpdateStrategy: 25% max unavailable, 25% max surge + Pod Template: + Labels: app=example-app + Containers: + echo-server: + Image: ghcr.io/telepresencio/echo-server + Port: 8080/TCP + ``` + +4. Replace the container. Please note that the `--container echo-server` flag here is optional. It's only needed when the workload has more than one container: + ```console + $ telepresence replace example-app --container echo-server --env-file /tmp/example-app.env --mount /tmp/example-app-mounts + Using Deployment example-app + Container name : echo-server + State : ACTIVE + Workload kind : Deployment + Port forwards : 10.1.4.106 -> 127.0.0.1 + 8080 -> 8080 TCP + Volume Mount Point: /tmp/example-app-mounts + ``` + Your workstation is now ready. You can run the application using the environment in the `/tmp/example-app.env` file and the + mounts under `/tmp/example-app-mounts`. The application can listen to `localhost:8080` to receive traffic intended for the + replaced container. On the cluster side of things, a Traffic Agent container has replaced the `echo-server`. + + Telepresence assumes that you want all declared container ports to be mapped to their corresponding port on `localhost`. You + can change this with the `--port` flag. For example, `--port 1080:8080` will map the replaced containers port number `8080` + to `localhost:1080`. The `--port` can also be used when the container is known to listen to ports that are not declared in + the manifest. + +5. Query the cluster in which you replaced your application and verify your local instance being invoked. All the traffic previously routed to your Kubernetes Service is now routed to your local environment + +You can now: +- Make changes on the fly and see them reflected when interacting with your Kubernetes environment. +- Query services only exposed in your cluster's network. +- Set breakpoints in your IDE to investigate bugs. + +6. You end the replace operation with the command `telepresence leave example-app --container echo-server` + +## Ingest your Container + +In some situations, you want to work and debug the code locally, and you want it to be able to access other services in the cluster, +but you don't wish to interfere with the targeted workload. This is where the `telepresence ingest` command comes into play. Just +like `replace` command, it will make the environment and mounted containers of the targeted container available locally, but it will +not replace the container nor will it intercept any of its traffic. + +This example assumes that you have the `example-app` deployment. + +1. Connect and run and start an ingest from `example-app`: + ```console + $ telepresence connect + Launching Telepresence User Daemon + Launching Telepresence Root Daemon + Connected to context xxx, namespace default (https://) + $ telepresence ingest example-app --container echo-server --env-file /tmp/example-app.env --mount /tmp/example-app-mounts + Using Deployment example-app + Container name : echo-server + Workload kind : Deployment + Volume Mount Point: /tmp/example-app-mounts + ``` + +2. Start your local application using the environment variables retrieved and the volumes that were mounted in the previous step. + +You can now: +- Code and debug your local app while it interacts with other services in your cluster. +- Query services only exposed in your cluster's network. +- Set breakpoints in your IDE to investigate bugs. + +## Intercept your application + +You can use the `telepresence intercept` command when you want to intercept the traffic for a specific service and route that +traffic to your workstation. The `intercept` is less intrusive than the `replace`, because it allows the original receiver of the +intercepted traffic to continue to run and deal with tasks that aren't directly related to that traffic. + +1. Connect to your cluster with `telepresence connect`. + +2. Intercept all traffic going to the application's http port in your cluster and redirect to port 8080 on your workstation. + ```console + $ telepresence intercept example-app --port 8080:http --env-file ~/example-app-intercept.env --mount /tmp/example-app-mounts + Using Deployment example-app + intercepted + Intercept name: example-app + State : ACTIVE + Workload kind : Deployment + Destination : 127.0.0.1:8080 + Intercepting : all TCP connections + ``` + + * For `--port`: specify the port the local instance of your application is running on, and optionally the remote port + that you want to intercept. Telepresence will select the remote port automatically when there's only one service + port available to access the workload. You must specify the port to intercept when the workload exposes multiple + ports. You can do this by specifying the port you want to intercept after a colon in the `--port` argument (like in + the example), and/or by specifying the service you want to intercept using the `--service` flag. + + * For `--env-file`: specify a file path for Telepresence to write the environment variables that are set for the targeted + container. + +3. Start your local application using the environment variables retrieved and the volumes that were mounted in the previous step. + +You can now: +- Make changes on the fly and see them reflected when interacting with your Kubernetes environment. +- Query services only exposed in your cluster's network. +- Set breakpoints in your IDE to investigate bugs. + +### Running everything using Docker + +This approach eliminates the need for root access and confines the Telepresence network interface and remote mounts +to a container. Additionally, it allows for precise replication of the target container's volume mounts, using identical +mount points. However, this method will require docker to get cluster connectivity, and the containerized environment can +present challenges in terms of toolchain integration, debugging, and the overall development workflow. + +1. Connect to your cluster with `telepresence connect --docker`. This starts the Telepresence daemon in a docker + container and ensures that this container has access to the cluster network. +2. Use `telepresence curl` to access the Kubernetes API server from a container. + A 401 or 403 response code is expected and indicates that the service could be reached. The `telepresence curl` command + used will execute a standard `curl` command from a container that shares the network created by the `connect` call: + + ```console + $ telepresence curl -ik https://kubernetes.default + HTTP/1.1 401 Unauthorized + Cache-Control: no-cache, private + Content-Type: application/json + ... + ``` + + You now have access to your remote Kubernetes API server as if you were on the same network. + +3. Enter `telepresence list` and make sure the workload you want to engage is listed. For example: + + ```console + $ telepresence list + ... + deployment example-app: ready to engage (traffic-agent not yet installed) + ... + ``` + +4. Use `replace`, `inject`, or `intercept` to engage the container in combination with the `--docker-run` flag. + Example using `telepresence replace` + + ```console + $ telepresence replace example-app --container echo-server --docker-run -- + Using Deployment example-app + intercepted + Intercept name: example-app + State : ACTIVE + Workload kind : Deployment + Destination : 127.0.0.1:8080 + Intercepting : all TCP connections + + ``` + +You can now: +- Make changes on the fly and see them reflected when interacting with your Kubernetes environment; although + depending on how your local container is configured, this might require that it is rebuilt. +- Query services only exposed in your cluster's network using `telepresence curl`. +- Set breakpoints in a _Remote Debug_ configuration in your IDE to investigate bugs. diff --git a/versioned_docs/version-2.22/howtos/large-clusters.md b/versioned_docs/version-2.22/howtos/large-clusters.md new file mode 100644 index 00000000..954dcde5 --- /dev/null +++ b/versioned_docs/version-2.22/howtos/large-clusters.md @@ -0,0 +1,50 @@ +--- +title: Work with large clusters +description: Use Telepresence to intercept services in clusters with a large number of namespaces and workloads. +hide_table_of_contents: true +--- +# Working with large clusters + +## Large number of namespaces + +### The problem +When telepresence connects to a cluster, it will configure the local DNS server so that each namespace in the cluster can be used as a top-level domain (TLD). E.g. if the cluster contains the namespace "example", then a curl for the name "my_service.example" will be directed to Telepresence DNS server, because it has announced that it wants to resolve the "example" domain. + +Telepresence tries to be conservative about what namespaces that it will create TLDs for, and first check if the namespace is accessible by the user. This check can be time-consuming in a cluster with a large number of namespaces, because each check will typically take up to a second to complete, which means that for a cluster with 120 namespaces, this check can take two minutes. That's a long time to wait when doing `telepresence connect`. + +### How to solve it + +#### Limiting at connect + +The `telepresence connect` command will accept the flag `--mapped-namespaces `, which will limit the names that Telepresence create TLDs for in the DNS resolver. This may drastically decrease the time it takes to connect, and also improve the DNS resolver's performance. + +#### Limiting the traffic-manager + +It is possible to limit the namespaces that the traffic-manager will care about when it is installed or upgraded by passing the Helm chart value `namespaces` or `namespaceSelector`. This will tell the manager to only manage those namespaces with respect to connects and engagements. A namespace-limited manager creates an implicit `mapped-namespaces` set for all clients that connect to it. + +## Large number of pods + +### The problem + +A cluster with a large number of pods can be problematic in situations where the traffic-manager is unable to use its default behavior of retrieving the pod-subnets from the cluster nodes. The manager will then use a fallback method, which is to retrieve the IP of all pods and then use those IPs to calculate the pod-subnets. This in turn, might cause a very large number of requests to the Kubernetes API server. + +### The solution + +If it is RBAC permission limitations that prevent the traffic-manager from reading the `podCIDR` from the nodes, then adding the necessary permissions might help. But in many cases, the nodes will not have a `podCIDR` defined. The fallback for such cases is to specify the `podCIDRs` manually (and thus prevent the scan + calculation) using the Helm chart values: + +```yaml +podCIDRStrategy: environment +podCIDRs: + - +... +``` + +## Traffic Manager Namespaces + +Depending on use-case, it's sometimes beneficial to have several Traffic Managers installed, each being responsible from +a limited number of namespaces and prohibited from accessing other namespaces. A cluster can have any number of Traffic +Managers, as long as each one manages its own unique set of namespaces. + +A client that connects to a Traffic Manager will automatically be limited to its managed namespaces. + +See [Installing a namespaced-scoped traffic-manager](../install/manager.md#limiting-the-namespace-scope) for details. diff --git a/versioned_docs/version-2.22/images/TP_Architecture.svg b/versioned_docs/version-2.22/images/TP_Architecture.svg new file mode 100644 index 00000000..a93bdd7e --- /dev/null +++ b/versioned_docs/version-2.22/images/TP_Architecture.svg @@ -0,0 +1,900 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/versioned_docs/version-2.22/images/bugfix.png b/versioned_docs/version-2.22/images/bugfix.png new file mode 100644 index 00000000..7c03920b Binary files /dev/null and b/versioned_docs/version-2.22/images/bugfix.png differ diff --git a/versioned_docs/version-2.22/images/change.png b/versioned_docs/version-2.22/images/change.png new file mode 100644 index 00000000..d271fc51 Binary files /dev/null and b/versioned_docs/version-2.22/images/change.png differ diff --git a/versioned_docs/version-2.22/images/container-inner-dev-loop.png b/versioned_docs/version-2.22/images/container-inner-dev-loop.png new file mode 100644 index 00000000..06586cd6 Binary files /dev/null and b/versioned_docs/version-2.22/images/container-inner-dev-loop.png differ diff --git a/versioned_docs/version-2.22/images/docker-header-containers.png b/versioned_docs/version-2.22/images/docker-header-containers.png new file mode 100644 index 00000000..06f422a9 Binary files /dev/null and b/versioned_docs/version-2.22/images/docker-header-containers.png differ diff --git a/versioned_docs/version-2.22/images/feature.png b/versioned_docs/version-2.22/images/feature.png new file mode 100644 index 00000000..525aa1f9 Binary files /dev/null and b/versioned_docs/version-2.22/images/feature.png differ diff --git a/versioned_docs/version-2.22/images/logo.png b/versioned_docs/version-2.22/images/logo.png new file mode 100644 index 00000000..701f63ba Binary files /dev/null and b/versioned_docs/version-2.22/images/logo.png differ diff --git a/versioned_docs/version-2.22/images/secondary-container-intercept.png b/versioned_docs/version-2.22/images/secondary-container-intercept.png new file mode 100644 index 00000000..e19a3059 Binary files /dev/null and b/versioned_docs/version-2.22/images/secondary-container-intercept.png differ diff --git a/versioned_docs/version-2.22/images/secondary-no-intercept.png b/versioned_docs/version-2.22/images/secondary-no-intercept.png new file mode 100644 index 00000000..9764dbe5 Binary files /dev/null and b/versioned_docs/version-2.22/images/secondary-no-intercept.png differ diff --git a/versioned_docs/version-2.22/images/secondary-normal-intercept.png b/versioned_docs/version-2.22/images/secondary-normal-intercept.png new file mode 100644 index 00000000..8cad6648 Binary files /dev/null and b/versioned_docs/version-2.22/images/secondary-normal-intercept.png differ diff --git a/versioned_docs/version-2.22/images/security.png b/versioned_docs/version-2.22/images/security.png new file mode 100644 index 00000000..37078d66 Binary files /dev/null and b/versioned_docs/version-2.22/images/security.png differ diff --git a/versioned_docs/version-2.22/images/split-tunnel.png b/versioned_docs/version-2.22/images/split-tunnel.png new file mode 100644 index 00000000..5bf30378 Binary files /dev/null and b/versioned_docs/version-2.22/images/split-tunnel.png differ diff --git a/versioned_docs/version-2.22/images/tracing.png b/versioned_docs/version-2.22/images/tracing.png new file mode 100644 index 00000000..c374807e Binary files /dev/null and b/versioned_docs/version-2.22/images/tracing.png differ diff --git a/versioned_docs/version-2.22/images/trad-inner-dev-loop.png b/versioned_docs/version-2.22/images/trad-inner-dev-loop.png new file mode 100644 index 00000000..618b674f Binary files /dev/null and b/versioned_docs/version-2.22/images/trad-inner-dev-loop.png differ diff --git a/versioned_docs/version-2.22/images/tunnelblick.png b/versioned_docs/version-2.22/images/tunnelblick.png new file mode 100644 index 00000000..8944d445 Binary files /dev/null and b/versioned_docs/version-2.22/images/tunnelblick.png differ diff --git a/versioned_docs/version-2.22/images/vpn-dns.png b/versioned_docs/version-2.22/images/vpn-dns.png new file mode 100644 index 00000000..eed535c4 Binary files /dev/null and b/versioned_docs/version-2.22/images/vpn-dns.png differ diff --git a/versioned_docs/version-2.22/images/vpn-k8s-config.jpg b/versioned_docs/version-2.22/images/vpn-k8s-config.jpg new file mode 100644 index 00000000..66116e41 Binary files /dev/null and b/versioned_docs/version-2.22/images/vpn-k8s-config.jpg differ diff --git a/versioned_docs/version-2.22/images/vpn-routing.jpg b/versioned_docs/version-2.22/images/vpn-routing.jpg new file mode 100644 index 00000000..18410dd4 Binary files /dev/null and b/versioned_docs/version-2.22/images/vpn-routing.jpg differ diff --git a/versioned_docs/version-2.22/images/vpn-vnat.jpg b/versioned_docs/version-2.22/images/vpn-vnat.jpg new file mode 100644 index 00000000..9c7d0551 Binary files /dev/null and b/versioned_docs/version-2.22/images/vpn-vnat.jpg differ diff --git a/versioned_docs/version-2.22/images/vpn-with-tele.jpg b/versioned_docs/version-2.22/images/vpn-with-tele.jpg new file mode 100644 index 00000000..843b253e Binary files /dev/null and b/versioned_docs/version-2.22/images/vpn-with-tele.jpg differ diff --git a/versioned_docs/version-2.22/install/client.md b/versioned_docs/version-2.22/install/client.md new file mode 100644 index 00000000..67f894ac --- /dev/null +++ b/versioned_docs/version-2.22/install/client.md @@ -0,0 +1,166 @@ +--- +title: Install client +hide_table_of_contents: true +--- + + +import Platform from '@site/src/components/Platform'; + +# Client Installation + +Install the Telepresence client on your workstation by running the commands below for your OS. + + + + + +## Install with brew: +```shell +brew install telepresenceio/telepresence/telepresence-oss +``` + +## OR download the binary for your platform + +### AMD (Intel) Macs + +```shell +# 1. Download the binary. +sudo curl -fL https://github.com/telepresenceio/telepresence/releases/latest/download/telepresence-darwin-amd64 -o /usr/local/bin/telepresence + +# 2. Make the binary executable: +sudo chmod a+x /usr/local/bin/telepresence +``` + +### ARM (Apple Silicon) Macs + +```shell +# 1. Ensure that no old binary exists. This is very important because Silicon macs track the executable's signature +# and just updating it in place will not work. +sudo rm -f /usr/local/bin/telepresence + +# 2. Download the binary. +sudo curl -fL https://github.com/telepresenceio/telepresence/releases/latest/download/telepresence-darwin-arm64 -o /usr/local/bin/telepresence + +# 3. Make the binary executable: +sudo chmod a+x /usr/local/bin/telepresence +``` + + + + +```shell +# 1. Download the latest binary (~95 MB): +# AMD +sudo curl -fL https://github.com/telepresenceio/telepresence/releases/latest/download/telepresence-linux-amd64 -o /usr/local/bin/telepresence + +# ARM +sudo curl -fL https://github.com/telepresenceio/telepresence/releases/latest/download/telepresence-linux-arm64 -o /usr/local/bin/telepresence + +# 2. Make the binary executable: +sudo chmod a+x /usr/local/bin/telepresence +``` + + + + +We've developed a Powershell script to simplify the process of installing telepresence. Here are the commands you can execute: + +### Windows AMD64 + +```powershell +# To install Telepresence, run the following commands +# from PowerShell as Administrator. + +# 1. Download the latest windows zip containing telepresence.exe and its dependencies (~60 MB): +$ProgressPreference = 'SilentlyContinue' +Invoke-WebRequest https://github.com/telepresenceio/telepresence/releases/latest/download/telepresence-windows-amd64.zip -OutFile telepresence.zip + +# 2. Unzip the telepresence.zip file to the desired directory, then remove the zip file: +Expand-Archive -Path telepresence.zip -DestinationPath telepresenceInstaller/telepresence +Remove-Item 'telepresence.zip' +cd telepresenceInstaller/telepresence + +# 3. Run the install-telepresence.ps1 to install telepresence's dependencies. It will install telepresence to +# C:\telepresence by default, but you can specify a custom path by passing in -Path C:\my\custom\path +powershell.exe -ExecutionPolicy bypass -c " . '.\install-telepresence.ps1';" + +# 4. Remove the unzipped directory: +cd ../.. +Remove-Item telepresenceInstaller -Recurse -Confirm:$false -Force + +# 5. Telepresence is now installed and you can use telepresence commands in PowerShell. +``` + +### Windows ARM64 + +```powershell +# To install Telepresence, run the following commands +# from PowerShell as Administrator. + +# 1. Download the latest windows zip containing telepresence.exe and its dependencies (~60 MB): +$ProgressPreference = 'SilentlyContinue' +Invoke-WebRequest https://github.com/telepresenceio/telepresence/releases/latest/download/telepresence-windows-arm64.zip -OutFile telepresence.zip + +# 2. Unzip the telepresence.zip file to the desired directory, then remove the zip file: +Expand-Archive -Path telepresence.zip -DestinationPath telepresenceInstaller/telepresence +Remove-Item 'telepresence.zip' +cd telepresenceInstaller/telepresence + +# 3. Run the install-telepresence.ps1 to install telepresence's dependencies. It will install telepresence to +# C:\telepresence by default, but you can specify a custom path by passing in -Path C:\my\custom\path +powershell.exe -ExecutionPolicy bypass -c " . '.\install-telepresence.ps1';" + +# 4. Remove the unzipped directory: +cd ../.. +Remove-Item telepresenceInstaller -Recurse -Confirm:$false -Force + +# 5. Telepresence is now installed and you can use telepresence commands in PowerShell. +``` + + + + +> [!TIP] +> What's Next? +> Follow one of our [quick start guides](../quick-start.md) to start using Telepresence, either with our sample app or in your own environment. + +## Installing older versions of Telepresence + +Use these URLs to download an older version for your OS (including older nightly builds), replacing `x.y.z` with the versions you want. + + + + +```shell +# AMD (Intel) Macs +https://github.com/telepresenceio/telepresence/releases/download/vX.Y.Z/telepresence-darwin-amd64 + +# ARM (Apple Silicon) Macs +https://github.com/telepresenceio/telepresence/releases/download/vX.Y.Z/telepresence-darwin-arm64 +``` + + + + +``` +# AMD +https://github.com/telepresenceio/telepresence/releases/download/vX.Y.Z/telepresence-linux-amd64 + +# ARM +https://github.com/telepresenceio/telepresence/releases/download/vX.Y.Z/telepresence-linux-arm64 +``` + + + + +``` +# Windows AMD64 +https://github.com/telepresenceio/telepresence/releases/download/vX.Y.Z/telepresence-windows-amd64.zip + +# Windows ARM64 +https://github.com/telepresenceio/telepresence/releases/download/vX.Y.Z/telepresence-windows-arm64.zip +``` + + + + diff --git a/versioned_docs/version-2.22/install/cloud.md b/versioned_docs/version-2.22/install/cloud.md new file mode 100644 index 00000000..0379bd39 --- /dev/null +++ b/versioned_docs/version-2.22/install/cloud.md @@ -0,0 +1,60 @@ +--- +title: Cloud Provider Prerequisites +hide_table_of_contents: true +--- + +# Provider Prerequisites for Traffic Manager + +## GKE + +### Firewall Rules for private clusters + +A GKE cluster with private networking will come preconfigured with firewall rules that prevent the Traffic Manager's +webhook injector from being invoked by the Kubernetes API server. +For Telepresence to work in such a cluster, you'll need to [add a firewall rule](https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters#add_firewall_rules) allowing the Kubernetes masters to access TCP port `8443` in your pods. +For example, for a cluster named `tele-webhook-gke` in region `us-central1-c1`: + +```bash +$ gcloud container clusters describe tele-webhook-gke --region us-central1-c | grep masterIpv4CidrBlock + masterIpv4CidrBlock: 172.16.0.0/28 # Take note of the IP range, 172.16.0.0/28 + +$ gcloud compute firewall-rules list \ + --filter 'name~^gke-tele-webhook-gke' \ + --format 'table( + name, + network, + direction, + sourceRanges.list():label=SRC_RANGES, + allowed[].map().firewall_rule().list():label=ALLOW, + targetTags.list():label=TARGET_TAGS + )' + +NAME NETWORK DIRECTION SRC_RANGES ALLOW TARGET_TAGS +gke-tele-webhook-gke-33fa1791-all tele-webhook-net INGRESS 10.40.0.0/14 esp,ah,sctp,tcp,udp,icmp gke-tele-webhook-gke-33fa1791-node +gke-tele-webhook-gke-33fa1791-master tele-webhook-net INGRESS 172.16.0.0/28 tcp:10250,tcp:443 gke-tele-webhook-gke-33fa1791-node +gke-tele-webhook-gke-33fa1791-vms tele-webhook-net INGRESS 10.128.0.0/9 icmp,tcp:1-65535,udp:1-65535 gke-tele-webhook-gke-33fa1791-node +# Take note fo the TARGET_TAGS value, gke-tele-webhook-gke-33fa1791-node + +$ gcloud compute firewall-rules create gke-tele-webhook-gke-webhook \ + --action ALLOW \ + --direction INGRESS \ + --source-ranges 172.16.0.0/28 \ + --rules tcp:8443 \ + --target-tags gke-tele-webhook-gke-33fa1791-node --network tele-webhook-net +Creating firewall...⠹Created [https://www.googleapis.com/compute/v1/projects/datawire-dev/global/firewalls/gke-tele-webhook-gke-webhook]. +Creating firewall...done. +NAME NETWORK DIRECTION PRIORITY ALLOW DENY DISABLED +gke-tele-webhook-gke-webhook tele-webhook-net INGRESS 1000 tcp:8443 False +``` + +### GKE Authentication Plugin + +Starting with Kubernetes version 1.26 GKE will require the use of the [gke-gcloud-auth-plugin](https://cloud.google.com/blog/products/containers-kubernetes/kubectl-auth-changes-in-gke). +You will need to install this plugin to use Telepresence with Docker while using GKE. + +## EKS + +### EKS Authentication Plugin + +If you are using AWS CLI version earlier than `1.16.156` you will need to install [aws-iam-authenticator](https://docs.aws.amazon.com/eks/latest/userguide/install-aws-iam-authenticator.html). +You will need to install this plugin to use Telepresence with Docker while using EKS. \ No newline at end of file diff --git a/versioned_docs/version-2.22/install/manager.md b/versioned_docs/version-2.22/install/manager.md new file mode 100644 index 00000000..ad84ea8c --- /dev/null +++ b/versioned_docs/version-2.22/install/manager.md @@ -0,0 +1,236 @@ +--- +title: Install Traffic Manager +hide_table_of_contents: true +--- + +# Install/Uninstall the Traffic Manager + +Telepresence uses a traffic manager to send/receive cloud traffic to the user. Telepresence uses [Helm](https://helm.sh) under the +hood to install the traffic manager in your cluster. The `telepresence` binary embeds both `helm` and a helm-chart for a +traffic-manager that is of the same version as the binary. + +The Telepresence Helm chart documentation is published at [ArtifactHUB](https://artifacthub.io/packages/helm/telepresence-oss/telepresence-oss). + +You can also use `helm` command directly, see [Install With Helm](#install-with-helm) for more details. + +## Prerequisites + +Before you begin, you need to have [Telepresence installed](../install/client.md). + +If you are not the administrator of your cluster, you will need [administrative RBAC permissions](../reference/rbac.md#administrating-telepresence) to install and use Telepresence in your cluster. + +In addition, you may need certain prerequisites depending on your cloud provider and platform. +See the [cloud provider installation notes](../install/cloud.md) for more. + +## Install the Traffic Manager + +The telepresence cli can install the traffic manager for you. The basic install will install the same version as the client used. + +1. Install the Telepresence Traffic Manager with the following command: + + ```shell + telepresence helm install + ``` + +### Customizing the Traffic Manager. + +For details on what the Helm chart installs and what can be configured, see the Helm chart [configuration on artifacthub](https://artifacthub.io/packages/helm/datawire/telepresence). + +1. Create a values.yaml file with your config values. + +2. Run the `install` command with the `--values` flag set to the path to your values file: + + ```shell + telepresence helm install --values values.yaml + ``` + alternatively, provide values using the `--set` flag: + ```shell + telepresence helm install --set logLevel=debug + ``` + +### Install into custom namespace + +The Helm chart supports being installed into any namespace, not necessarily `ambassador`. Simply pass a different `namespace` argument to +`telepresence helm install`. For example, if you wanted to deploy the traffic manager to the `staging` namespace: + +```shell +telepresence helm install traffic-manager --namespace staging datawire/telepresence +``` + +> [!NOTE] +> If you have several traffic-managers installed, or if users don't have permissions to list +> namespaces, they will need to either use a `--manager-namespace ` flag when connecting or +> configure their config.yml or kubeconfig to find the desired installation of the Traffic Manager + +As kubeconfig extension: +```yaml +apiVersion: v1 +clusters: +- cluster: + server: https://127.0.0.1 + extensions: + - name: telepresence.io + extension: + cluster: + defaultManagerNamespace: staging + name: example-cluster +``` + +or in the `config.yml`: + +```yaml +cluster: + defaultManagerNamespace: staging +``` + +See [the kubeconfig documentation](../reference/config.md#manager) for more information. + +## Upgrading/Downgrading the Traffic Manager. + +1. Download the cli of the version of Telepresence you wish to use. + +2. Run the `upgrade` command. Optionally with `--values` and/or `--set` flags + + ```shell + telepresence helm upgrade + ``` + You can also use the `--reuse-values` or `--reset-values` to specify if previously installed values should be reused or reset. + + +## Uninstall + +The telepresence cli can uninstall the traffic manager for you using the `telepresence helm uninstall`. + +1. Uninstall the Telepresence Traffic Manager and all the agents installed by it using the following command: + + ```shell + telepresence helm uninstall + ``` +## Limiting the Namespace Scope + +You might not want the Traffic Manager to have permissions across the entire kubernetes cluster, or you might want to be able to install multiple traffic managers per cluster (for example, to separate them by environment). +In these cases, the traffic manager supports being installed with a namespace scope, allowing cluster administrators to limit the reach of a traffic manager's permissions. + +For example, suppose you want a Traffic Manager that only works on namespaces `dev` and `staging`. +To do this, create a `values.yaml` like the following: + +```yaml +namespaces: + - dev + - staging +``` + +This can then be installed via: + +```shell +telepresence helm install --namespace staging -f ./values.yaml +``` + +### Namespace collision detection + +The Telepresence Helm chart incorporates a mechanism to prevent conflicts between Traffic Managers operating within +different namespaces. This is achieved by: +1. Determining the Traffic Manager's set of namespaces by applying its namespace selector to all of the cluster's namespaces. +2. Verifying that there is no overlap between the sets of namespaces for any pair of Traffic Managers. + +So, for example, suppose you install one Traffic Manager to manage namespaces `dev` and `staging`, as: + +```bash +telepresence helm install --namespace dev --set 'namespaces={dev,staging}' +``` + +You might then attempt to install another Traffic Manager to manage namespaces `staging` and `prod`: + +```bash +telepresence helm install --namespace prod --set 'namespaces={staging,prod}' +``` + +This would fail with an error: + +``` +telepresence helm install: error: execution error at (telepresence-oss/templates/agentInjectorWebhook.yaml:61:14): traffic-manager in namespace dev already manages namespace staging +``` + +To fix this error, fix the overlap either by removing `staging` from the first install, or from the second. + +### Static versus Dynamic Namespace Selection + +A namespace selector can be dynamic or static. This in turn controls if telepresence needs "cluster-wide" or +"namespaced" role/rolebinding pairs. A Traffic Manager configured with a dynamic selector requires cluster-wide +namespace access and `ClusterRole`/`ClusterRoleBinding` pairs. A Traffic Manager configured with a static selector needs +a `Role`/`RoleBinding` pair in each of the selected namespaces. + +A selector is considered _static_ if it meets the following conditions: +- The selector must have exactly one element in either the `matchLabels` or the `matchExpression` list (a `key=value` + element in the `matchLabels` list, it is normalized into a `key in [value]` expression element). +- The element must meet the following criteria: + The `key` of the match expression must be "kubernetes.io/metadata.name". + The `operator` of the match expression must be "In" (case sensitive). + The `values` list of the match expression must contain at least one value. + +## Static Namespace Selection RBAC + +Optionally, you can also configure user rbac to be scoped to the same namespaces as the manager itself. +You might want to do this if you don't give your users permissions throughout the cluster, and want to make sure they +only have the minimum set required to perform telepresence commands on certain namespaces. + +Continuing with the `dev` and `staging` example from the previous section, simply add the following to `values.yaml` +(make sure you set the `subjects`!): + +```yaml +clientRbac: + create: true + + # These are the users or groups to which the user rbac will be bound. + # This MUST be set. + subjects: {} + # - kind: User + # name: jane + # apiGroup: rbac.authorization.k8s.io + + # The namespaces can be explicitly specified here, but can be omitted unless the + # Traffic Manager's namespaceSelector is dynamic. + namespaces: + - dev + - staging +``` + +### Installing RBAC only + +Telepresence Traffic Manager does require some [RBAC](../reference/rbac.md) for the traffic-manager deployment itself, as well as for users. +To make it easier for operators to introspect / manage RBAC separately, you can use `rbac.only=true` to +only create the rbac-related objects. +Additionally, you can use `clientRbac.create=true` and `managerRbac.create=true` to toggle which subset(s) of RBAC objects you wish to create. + +## Install with Helm + +Before you begin, you must ensure that the [helm command](https://helm.sh/docs/intro/install/) is installed. + +The Telepresence Helm chart is published at GitHub in the ghcr.io repository. + +### Installing + +Install the latest stable version of the traffic-manager into the default "ambassador" namespace with the following command: + +```bash +helm install --create-namespace --namespace ambassador traffic-manager oci://ghcr.io/telepresenceio/telepresence-oss +``` + +### Upgrading/Downgrading + +Use this command if you installed the Traffic Manager into the "ambassador" namespace, and you just wish to upgrade it +to the latest version without changing any configuration values: + +```bash +helm upgrade --namespace ambassador --reuse-values traffic-manager oci://ghcr.io/telepresenceio/telepresence-oss +``` + +If you want to upgrade (or downgrade) the Traffic Manager to a specific version, add a `--version` flag with the version +number to the upgrade command, e.g.: `--version v2.20.3`. + +### Uninstalling + +Use the following command to uninstall the Traffic Manager: +```bash +helm uninstall --namespace ambassador traffic-manager +``` diff --git a/versioned_docs/version-2.22/install/upgrade.md b/versioned_docs/version-2.22/install/upgrade.md new file mode 100644 index 00000000..bdff3219 --- /dev/null +++ b/versioned_docs/version-2.22/install/upgrade.md @@ -0,0 +1,81 @@ +--- +title: Upgrade client +description: "How to upgrade your installation of Telepresence and install previous versions." +hide_table_of_contents: true +--- + +import Platform from '@site/src/components/Platform'; + +# Upgrade Process +The Telepresence CLI will periodically check for new versions and notify you when an upgrade is available. Running the same commands used for installation will replace your current binary with the latest version. + +Before upgrading your CLI, you must stop any live Telepresence processes by issuing `telepresence quit -s` (or `telepresence quit -ur` +if your current version is less than 2.8.0). + + + + + +## Upgrade with brew: +```shell +brew upgrade telepresenceio/telepresence/telepresence-oss +``` + +## OR upgrade by downloading the binary for your platform + +### Intel Macs + +```shell +# 1. Download the binary. +sudo curl -fL https://github.com/telepresenceio/telepresence/releases/latest/download/telepresence-darwin-amd64 + +# 2. Make the binary executable: +sudo chmod a+x /usr/local/bin/telepresence +``` + +### ARM (Apple Silicon) Macs + +```shell +# 1. Ensure that no old binary exists. This is very important because Silicon macs track the executable's signature +# and just updating it in place will not work. + +# 2. Download the binary. +sudo curl -fL https://github.com/telepresenceio/telepresence/releases/latest/download/telepresence-darwin-arm64 -o /usr/local/bin/telepresence + +# 3. Make the binary executable: +sudo chmod a+x /usr/local/bin/telepresence +``` + + + +```shell + +# 1. Download the latest binary (~95 MB): +### Intel +sudo curl -fL https://github.com/telepresenceio/telepresence/releases/latest/download/telepresence-linux-amd64 -o /usr/local/bin/telepresence + +### ARM +sudo curl -fL https://github.com/telepresenceio/telepresence/releases/latest/download/telepresence-linux-arm64 -o /usr/local/bin/telepresence + +# 2. Make the binary executable: +sudo chmod a+x /usr/local/bin/telepresence +``` + + + + +To upgrade Telepresence,Click [here](https://github.com/telepresenceio/telepresence/releases/latest/download/telepresence-windows-amd64.zip) +to download the Intel Telepresence binary or [here](https://github.com/telepresenceio/telepresence/releases/latest/download/telepresence-windows-arm64.zip) +to download the ARM Telepresence binary. + +Once you have the binary downloaded and unzipped you will need to do a few things: + +1. Rename the binary from `telepresence-windows-[amd64|arm64].exe` to `telepresence.exe` +2. Move the binary to `C:\Program Files (x86)\$USER\Telepresence\` + + + + + +The Telepresence CLI contains an embedded Helm chart. See [Install/Uninstall the Traffic Manager](manager.md) if you want to also upgrade +the Traffic Manager in your cluster. diff --git a/versioned_docs/version-2.22/licenses.md b/versioned_docs/version-2.22/licenses.md new file mode 100644 index 00000000..47737aa8 --- /dev/null +++ b/versioned_docs/version-2.22/licenses.md @@ -0,0 +1,8 @@ +Telepresence CLI incorporates Free and Open Source software under the following licenses: + +* [2-clause BSD license](https://opensource.org/licenses/BSD-2-Clause) +* [3-clause BSD license](https://opensource.org/licenses/BSD-3-Clause) +* [Apache License 2.0](https://opensource.org/licenses/Apache-2.0) +* [ISC license](https://opensource.org/licenses/ISC) +* [MIT license](https://opensource.org/licenses/MIT) +* [Mozilla Public License 2.0](https://opensource.org/licenses/MPL-2.0) diff --git a/versioned_docs/version-2.22/quick-start.md b/versioned_docs/version-2.22/quick-start.md new file mode 100644 index 00000000..8bb4a703 --- /dev/null +++ b/versioned_docs/version-2.22/quick-start.md @@ -0,0 +1,25 @@ +--- +title: Quick start +description: "Start using Telepresence in your own environment. Follow these steps to intercept your service in your cluster." +hide_table_of_contents: true +--- + +# Telepresence Quickstart + +Telepresence is an open source tool that enables you to set up remote development environments for Kubernetes where you can still use all of your favorite local tools like IDEs, debuggers, and profilers. + +## Prerequisites + +- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/), the Kubernetes command-line tool, or the OpenShift Container Platform command-line interface, [oc](https://docs.openshift.com/container-platform/4.2/cli_reference/openshift_cli/getting-started-cli.html#cli-installing-cli_cli-developer-commands). +- A Kubernetes Deployment and Service. + +## Install Telepresence + +Follow [Install Client](install/client.md) and [Install Traffic Manager](install/manager.md) instructions to install the +telepresence client on your workstation, and the traffic manager in your cluster. + +Checkout the [Howto](howtos/engage.md) to learn how Telepresence can engage with resources in your remote cluster, +enabling you to run the code on your local workstation. + +## What’s Next? +- [Learn about the Telepresence architecture.](reference/architecture) diff --git a/versioned_docs/version-2.22/redirects.yml b/versioned_docs/version-2.22/redirects.yml new file mode 100644 index 00000000..5961b347 --- /dev/null +++ b/versioned_docs/version-2.22/redirects.yml @@ -0,0 +1 @@ +- {from: "", to: "quick-start"} diff --git a/versioned_docs/version-2.22/reference/architecture.md b/versioned_docs/version-2.22/reference/architecture.md new file mode 100644 index 00000000..79c8d47f --- /dev/null +++ b/versioned_docs/version-2.22/reference/architecture.md @@ -0,0 +1,46 @@ +--- +title: Architecture +description: How Telepresence works to intercept traffic from your Kubernetes cluster to code running on your laptop. +hide_table_of_contents: true +--- + +# Telepresence Architecture + +![Architecture](../images/TP_Architecture.svg) + +## Telepresence CLI + +The Telepresence CLI orchestrates the moving parts on the workstation: it starts the Telepresence Daemons and then acts +as a user-friendly interface to the Telepresence User Daemon. + +## Telepresence Daemons +Telepresence has Daemons that run on a developer's workstation and act as the main point of communication to the cluster's +network in order to communicate with the cluster and handle intercepted traffic. + +### User-Daemon +The User-Daemon coordinates the creation and deletion of replacements, ingests and intercepts by communicating with the [Traffic Manager](#traffic-manager). +All requests from and to the cluster go through this Daemon. + +### Root-Daemon +The Root-Daemon manages the networking necessary to handle traffic between the local workstation and the cluster by setting up a +[Virtual Network Device](tun-device.md) (VIF). + +## Traffic Manager + +The Traffic Manager is the central point of communication between Traffic Agents in the cluster and Telepresence Daemons +on developer workstations. It is responsible for injecting the Traffic Agent sidecar into engaged pods, +proxying all relevant inbound and outbound traffic, and tracking active engagements. + +The Traffic-Manager is installed by a cluster administrator. It can either be installed using the Helm chart embedded +in the telepresence client binary (`telepresence helm install`) or by using a Helm Chart directly. + +## Traffic Agent + +The Traffic Agent is a sidecar container that facilitates engagements. When a `replace`, `ingest` or `intercept` is first +started, the Traffic Agent container is injected into the workload's pod(s). You can see the Traffic Agent's status by +running `telepresence list` or `kubectl describe pod `. + +Depending on if an `replace` or `intercept` is active or not, the Traffic Agent will either route the incoming request +to your workstation, or it will pass it along to the container in the pod usually handling requests. + +Please see [Traffic Agent Sidecar](engagements/sidecar.md) for details. \ No newline at end of file diff --git a/versioned_docs/version-2.22/reference/client.md b/versioned_docs/version-2.22/reference/client.md new file mode 100644 index 00000000..b6ff818c --- /dev/null +++ b/versioned_docs/version-2.22/reference/client.md @@ -0,0 +1,38 @@ +--- +title: Client reference +description: CLI options for Telepresence to engage with resources in your Kubernetes cluster with code running on your laptop. +hide_table_of_contents: true +--- + +# Client reference + +The [Telepresence CLI client](../quick-start.md) is used to connect Telepresence to your cluster, start and stop intercepts, and create preview URLs. All commands are run in the form of `telepresence `. + +## Commands + +A list of all CLI commands and flags is available by running `telepresence help`, but here is more detail on the most common ones. +You can append `--help` to each command below to get even more information about its usage. + +| Command | Description | +|------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `completion` | Generate a shell completion script for bash, zsh, fish, or powershell | +| `config view` | View current Telepresence configuration | +| `connect` | Starts the local daemon and connects Telepresence to a namespace in your cluster. After connecting, outbound traffic is routed to the cluster so that you can interact with services as if your laptop was another pod (for example, curling a service by it's name) | +| `curl` | curl using a containerized executable that shares the network established by a connect. Especially useful when using `connect --docker`. | +| `docker-run` | run a docker image in a container that shares the network established by a connect. Especially useful when using `connect --docker`. | +| `gather-logs` | Gather logs from traffic-manager, traffic-agents, user, and root daemons, and export them into a zip file that can be shared with others or included with a github issue. Use `--get-pod-yaml` to include the yaml for the `traffic-manager` and `traffic-agent`s. Use `--anonymize` to replace the actual pod names + namespaces used for the `traffic-manager` and pods containing `traffic-agent`s in the logs. | +| `helm install` | Install the traffic-manager using the helm chart embedded in the telepresence executable. | +| `helm upgrade` | Upgrade the traffic-manager using the helm chart embedded in the telepresence executable. | +| `helm uninstall` | Uninstall the traffic-manager and all traffic-agents. | +| `ingest` | Ingest a container to get access to its mounted volumes and environment variables: `telepresence ingest --container --env-file ` When used with a `--` separator, this command can also start a process so you can run a local instance of the ingested container. | + | +| `intercept` | Intercepts a service to get its ingress traffic routed to the workstation and access to its mounted volumes and environment variables: `telepresence intercept --port ` (use `port/UDP` to force UDP). When used with a `--` separator, this command can also start a process so you can run a local instance of the service you are intercepting. | +| `leave` | Stops an active replace, ingest, or intercept: `telepresence leave hello`. | +| `list` | Lists all workloads that are eligible for replace, ingest, or intercept. | +| `loglevel` | Temporarily change the log-level. The default duration (30 minutes) can be altered using `-d `. The flags `--local-only` and `--remote-only` can be used to alter the scope of the change. | +| `quit` | Tell Telepresence daemons to quit. | +| `replace` | Replace a container to get access to its traffic, mounted volumes and environment variables: `telepresence replace --container --env-file ` When used with a `--` separator, this command can also start a process so you can run a local instance of the replaced container. | +| +| `status` | Shows the current connectivity status. | +| `uninstall` | Uninstalls a Traffic Agent for a specific workload. Use the `--all-agents` flag to remove all Traffic Agents from all workloads. | +| `version` | Show version of Telepresence CLI + Traffic-Manager (if connected) | \ No newline at end of file diff --git a/versioned_docs/version-2.22/reference/cluster-config.md b/versioned_docs/version-2.22/reference/cluster-config.md new file mode 100644 index 00000000..196fd065 --- /dev/null +++ b/versioned_docs/version-2.22/reference/cluster-config.md @@ -0,0 +1,222 @@ +--- +title: Cluster-side configuration +--- +# Cluster-side configuration + +For the most part, Telepresence doesn't require any special +configuration in the cluster and can be used right away in any +cluster (as long as the user has adequate [RBAC permissions](rbac.md). + +## Helm Chart configuration +Some cluster specific configuration can be provided when installing +or upgrading the Telepresence cluster installation using Helm. Once +installed, the Telepresence client will configure itself from values +that it receives when connecting to the Traffic manager. + +See the Helm chart [README](https://artifacthub.io/packages/helm/telepresence-oss/telepresence-oss/$version$) +for a full list of available configuration settings. + +### Values +To add configuration, create a yaml file with the configuration values and then use it executing `telepresence helm install [--upgrade] --values ` + +## Client Configuration + +It is possible for the Traffic Manager to automatically push config to all +connecting clients. To learn more about this, please see the [client config docs](config.md#global-configuration) + +## Traffic Manager Configuration + +The `trafficManager` structure of the Helm chart configures the behavior of the Telepresence traffic manager. + +## Agent Configuration + +The `agent` structure of the Helm chart configures the behavior of the Telepresence agents. + +### Image Configuration + +The `agent.image` structure contains the following values: + +| Setting | Meaning | +|------------|-----------------------------------------------------------------------------| +| `registry` | Registry used when downloading the image. Defaults to "docker.io/datawire". | +| `name` | The name of the image. Defaults to "tel2" | +| `tag` | The tag of the image. Defaults to $version$ | + +### Log level + +The `agent.LogLevel` controls the log level of the traffic-agent. See [Log Levels](config.md#log-levels) for more info. + +### Resources + +The `agent.resources` and `agent.initResources` will be used as the `resources` element when injecting traffic-agents and init-containers. + +## Mutating Webhook + +Telepresence uses a Mutating Webhook to inject the [Traffic Agent](architecture.md#traffic-agent) sidecar container and update the +port definitions. This means that an engaged workload (Deployment, StatefulSet, ReplicaSet, ArgoRollout) will remain untouched +and in sync as far as GitOps workflows (such as ArgoCD) are concerned. + +The injection will happen on demand the first time an attempt is made to replace, ingest, or intercept the workload. + +If you want to prevent that the injection ever happens, simply add the `telepresence.io/inject-traffic-agent: disabled` +annotation to your workload template's annotations: + +```diff + spec: + template: + metadata: + labels: + service: your-service ++ annotations: ++ telepresence.io/inject-traffic-agent: disabled + spec: + containers: +``` + +### Service Name and Port Annotations + +Telepresence will automatically find all services and all ports that will connect to a workload and make them available +for an intercept, but you can explicitly define that only one service and/or port can be intercepted. + +```diff + spec: + template: + metadata: + labels: + service: your-service + annotations: ++ telepresence.io/inject-service-name: my-service ++ telepresence.io/inject-service-ports: https + spec: + containers: +``` + +### Control Volume Sharing + +Telepresence enables control over what volumes that will be shared with connecting clients using mount policies. A +policy can be declared for a volume name, or for paths matching a path prefix, and can be added either as a Helm +chart value using `agents.mountPolicies` or using the workload annotation `telepresence.io/mount-policies`. + +Possible Mount Policies are: + +| Policy | Meaning | +|----------------|------------------------------------------------------------------------------------------------------| +| Ignore | Do not share this volume with engaging clients | +| Local | Do not share this volume with engaging clients, instead Mount it using the client's local filesystem | +| Remote | Share this volume, and give engaging clients read and write access to it | +| RemoteReadOnly | Like Remote, but with read-only access | + +Example Helm chart value: +```yaml +agents: + mountPolicies: + '/tmp': Local + certs: RemoteReadOnly + private: Ignore +``` + +Example using the `telepresence.io/mount-policies` annotation: +```yaml +spec: + template: + metadata: + annotations: + 'telepresence.io/mount-policies': '{"/tmp":"Local","certs":"RemoteReadOnly","private":"Ignore"}' +``` + +The annotation `telepresence.io/inject-ignore-volume-mounts` can be used if the objective is to just ignore +volume mounts, but it's recommended to always use the `telepresence.io/mount-policies` annotation. + +Example using the `telepresence.io/inject-ignore-volume-mounts` annotation: + +```yaml + spec: + template: + metadata: + annotations: + telepresence.io/inject-ignore-volume-mounts: "private" +``` + +The example is equivalent to: +```yaml + spec: + template: + metadata: + annotations: + telepresence.io/mount-policies: '{"private":"Ignore"}' +``` + +### Note on Numeric Ports + +If the `targetPort` of your intercepted service is pointing at a port number, in addition to +injecting the Traffic Agent sidecar, Telepresence will also inject an `initContainer` that will +reconfigure the pod's firewall rules to redirect traffic to the Traffic Agent. + +> [!IMPORTANT] +> Note that this `initContainer` requires `NET_ADMIN` capabilities. If your cluster administrator has disabled them, you will be unable to use numeric ports with the agent injector. + +For example, the following service is using a numeric port, so Telepresence would inject an initContainer into it: +```yaml +apiVersion: v1 +kind: Service +metadata: + name: your-service +spec: + type: ClusterIP + selector: + service: your-service + ports: + - port: 80 + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: your-service + labels: + service: your-service +spec: + replicas: 1 + selector: + matchLabels: + service: your-service + template: + metadata: + annotations: + telepresence.io/inject-traffic-agent: enabled + labels: + service: your-service + spec: + containers: + - name: your-container + image: jmalloc/echo-server + ports: + - containerPort: 8080 +``` + +## Excluding Envrionment Variables + +If your pod contains sensitive variables like a database password, or third party API Key, you may want to exclude those from being propagated through an intercept. +Telepresence allows you to configure this through a ConfigMap that is then read and removes the sensitive variables. + +This can be done in two ways: + +When installing your traffic-manager through helm you can use the `--set` flag and pass a comma separated list of variables: + +`telepresence helm install --set intercept.environment.excluded="{DATABASE_PASSWORD,API_KEY}"` + +This also applies when upgrading: + +`telepresence helm upgrade --set intercept.environment.excluded="{DATABASE_PASSWORD,API_KEY}"` + +Once this is completed, the environment variables will no longer be in the environment file created by an Intercept. + +The other way to complete this is in your custom `values.yaml`. Customizing your traffic-manager through a values file can be viewed [here](../install/manager.md). + +```yaml +intercept: + environment: + excluded: ['DATABASE_PASSWORD', 'API_KEY'] +``` + +You can exclude any number of variables, they just need to match the `key` of the variable within a pod to be excluded. diff --git a/versioned_docs/version-2.22/reference/config.md b/versioned_docs/version-2.22/reference/config.md new file mode 100644 index 00000000..1cb21cfc --- /dev/null +++ b/versioned_docs/version-2.22/reference/config.md @@ -0,0 +1,342 @@ +--- +title: Laptop-side configuration +--- + +# Laptop-side configuration + +There are a number of configuration values that can be tweaked to change how Telepresence behaves. +These can be set in three ways: globally, by a platform engineer with powers to deploy the Telepresence Traffic Manager, or locally by any user, either in the Telepresence configuration file `config.yml`, or as a Telepresence extension the Kubernetes configuration. +One important exception is the configuration of the of the traffic manager namespace, which, if it's different from the default of `ambassador`, [must be set](#manager) locally to be able to connect. + +## Global Configuration + +Global configuration is set at the Traffic Manager level and applies to any user connecting to that Traffic Manager. +To set it, simply pass in a `client` dictionary to the `telepresence helm install` command, with any config values you wish to set. + +The `client` config supports values for [cluster](#cluster), [dns](#dns), [grpc](#grpc), [images](#images), [logLevels](#log-levels), [routing](#routing), +and [timeouts](#timeouts). + +Here is an example configuration to show you the conventions of how Telepresence is configured: +**note: This config shouldn't be used verbatim, since the registry `privateRepo` used doesn't exist** + +```yaml +client: + timeouts: + agentInstall: 1m + intercept: 10s + logLevels: + userDaemon: debug + images: + registry: privateRepo # This overrides the default docker.io/datawire repo + agentImage: tel2:$version$ # This overrides the agent image to inject when engaging with a workload + grpc: + maxReceiveSize: 10Mi + dns: + includeSuffixes: [.private] + excludeSuffixes: [.se, .com, .io, .net, .org, .ru] + lookupTimeout: 30s + routing: + alsoProxySubnets: + - 1.2.3.4/32 + neverProxySubnets: + - 1.2.3.4/32 +``` + +### Cluster +Values for `client.cluster` controls aspects on how client's connection to the traffic-manager. + +| Field | Description | Type | Default | +|---------------------------|--------------------------------------------------------------------|---------------------------------------------|--------------------| +| `defaultManagerNamespace` | The default namespace where the Traffic Manager will be installed. | [string][yaml-str] | ambassador | +| `mappedNamespaces` | Namespaces that will be mapped by default. | [sequence][yaml-seq] of [strings][yaml-str] | `[]` | +| `connectFromRootDaeamon` | Make connections to the cluster directly from the root daemon. | [boolean][yaml-bool] | `true` | +| `agentPortForward` | Let telepresence-client use port-forwards directly to agents | [boolean][yaml-bool] | `true` | + +### DNS + +The `client.dns` configuration offers options for configuring the DNS resolution behavior in a client application or system. Here is a summary of the available fields: + +The fields for `client.dns` are: `localIP`, `excludeSuffixes`, `includeSuffixes`, and `lookupTimeout`. + +| Field | Description | Type | Default | +|-------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------|----------------------------------------------------| +| `localIP` | The address of the local DNS server. This entry is only used on Linux systems that are not configured to use systemd-resolved. | IP address [string][yaml-str] | first `nameserver` mentioned in `/etc/resolv.conf` | +| `excludeSuffixes` | Suffixes for which the DNS resolver will always fail (or fallback in case of the overriding resolver). Can be globally configured in the Helm chart. | [sequence][yaml-seq] of [strings][yaml-str] | `[".arpa", ".com", ".io", ".net", ".org", ".ru"]` | +| `includeSuffixes` | Suffixes for which the DNS resolver will always attempt to do a lookup. Includes have higher priority than excludes. Can be globally configured in the Helm chart. | [sequence][yaml-seq] of [strings][yaml-str] | `[]` | +| `excludes` | Names to be excluded by the DNS resolver | `[]` | +| `mappings` | Names to be resolved to other names (CNAME records) or to explicit IP addresses | `[]` | +| `lookupTimeout` | Maximum time to wait for a cluster side host lookup. | [duration][go-duration] [string][yaml-str] | 4 seconds | + +Here is an example values.yaml: +```yaml +client: + dns: + includeSuffixes: [.private] + excludeSuffixes: [.se, .com, .io, .net, .org, .ru] + localIP: 8.8.8.8 + lookupTimeout: 30s +``` + +#### Mappings + +Allows you to map hostnames to aliases or to IP addresses. This is useful when you want to use an alternative name for a service in the cluster, or when you want the DNS resolver to map a name to an IP address of your choice. + +In the given cluster, the service named `postgres` is located within a separate namespace titled `big-data`, and it's referred to as `psql` : + +```yaml +dns: + mappings: + - name: postgres + aliasFor: psql.big-data + - name: my.own.domain + aliasFor: 192.168.0.15 +``` + +#### Exclude + +Lists service names to be excluded from the Telepresence DNS server. This is useful when you want your application to interact with a local service instead of a cluster service. In this example, "redis" will not be resolved by the cluster, but locally. + +```yaml +dns: + excludes: + - redis +``` + +### Grpc +The `maxReceiveSize` determines how large a message that the workstation receives via gRPC can be. The default is 4Mi (determined by gRPC). All traffic to and from the cluster is tunneled via gRPC. + +The size is measured in bytes. You can express it as a plain integer or as a fixed-point number using E, G, M, or K. You can also use the power-of-two equivalents: Gi, Mi, Ki. For example, the following represent roughly the same value: +``` +128974848, 129e6, 129M, 123Mi +``` + +### Images +Values for `client.images` are strings. These values affect the objects that are deployed in the cluster, +so it's important to ensure users have the same configuration. + +These are the valid fields for the `client.images` key: + +| Field | Description | Type | Default | +|---------------|------------------------------------------------------------------------------------------|------------------------------------------------|-------------------------------------| +| `registry` | Docker registry to be used for installing the Traffic Manager and default Traffic Agent. | Docker registry name [string][yaml-str] | `docker.io/datawire` | +| `agentImage` | `$registry/$imageName:$imageTag` to use when installing the Traffic Agent. | qualified Docker image name [string][yaml-str] | (unset) | +| `clientImage` | `$registry/$imageName:$imageTag` to use locally when connecting with `--docker`. | qualified Docker image name [string][yaml-str] | `$registry/ambassador-telepresence` | + +### Intercept + +The `intercept` controls applies to how Telepresence will intercept the communications to replaced containers and intercepted services. + +| Field | Description | Type | Default | +|-----------------------|------------------------------------------------------------------------------------------------------------------------------------------------|---------------------|--------------| +| `defaultPort` | controls which port is selected when no `--port` flag is given to the `telepresence intercept` command. | int | 8080 | +| `useFtp` | Use fuseftp instead of sshfs when mounting remote file systems | boolean | false | + +### Log Levels + +Values for the `client.logLevels` fields are one of the following strings, +case-insensitive: + +- `trace` +- `debug` +- `info` +- `warning` or `warn` +- `error` + +For whichever log-level you select, you will get logs labeled with that level and of higher severity. +(e.g. if you use `info`, you will also get logs labeled `error`. You will NOT get logs labeled `debug`. + +These are the valid fields for the `client.logLevels` key: + +| Field | Description | Type | Default | +|--------------|---------------------------------------------------------------------|---------------------------------------------|---------| +| `userDaemon` | Logging level to be used by the User Daemon (logs to connector.log) | [loglevel][logrus-level] [string][yaml-str] | debug | +| `rootDaemon` | Logging level to be used for the Root Daemon (logs to daemon.log) | [loglevel][logrus-level] [string][yaml-str] | info | + +### Routing + +#### AlsoProxySubnets + +When using `alsoProxySubnets`, you provide a list of subnets to be added to the TUN device. +All connections to addresses that the subnet spans will be dispatched to the cluster + +Here is an example values.yaml for the subnet `1.2.3.4/32`: +```yaml +client: + routing: + alsoProxySubnets: + - 1.2.3.4/32 +``` + +#### NeverProxySubnets + +When using `neverProxySubnets` you provide a list of subnets. These will never be routed via the TUN device, +even if they fall within the subnets (pod or service) for the cluster. Instead, whatever route they have before +telepresence connects is the route they will keep. + +Here is an example kubeconfig for the subnet `1.2.3.4/32`: + +```yaml +client: + routing: + neverProxySubnets: + - 1.2.3.4/32 +``` + +#### Using AlsoProxy together with NeverProxy + +Never proxy and also proxy are implemented as routing rules, meaning that when the two conflict, regular routing routes apply. +Usually this means that the most specific route will win. + +So, for example, if an `alsoProxySubnets` subnet falls within a broader `neverProxySubnets` subnet: + +```yaml +neverProxySubnets: [10.0.0.0/16] +alsoProxySubnets: [10.0.5.0/24] +``` + +Then the specific `alsoProxySubnets` of `10.0.5.0/24` will be proxied by the TUN device, whereas the rest of `10.0.0.0/16` will not. + +Conversely, if a `neverProxySubnets` subnet is inside a larger `alsoProxySubnets` subnet: + +```yaml +alsoProxySubnets: [10.0.0.0/16] +neverProxySubnets: [10.0.5.0/24] +``` + +Then all of the `alsoProxySubnets` of `10.0.0.0/16` will be proxied, with the exception of the specific `neverProxySubnets` of `10.0.5.0/24` + +These are the valid fields for the `client.routing` key: + +| Field | Description | Type | Default | +|---------------------------|----------------------------------------------------------------------------------------|-------------------------|--------------------| +| `alsoProxySubnets` | Proxy these subnets in addition to the service and pod subnets | [CIDR][cidr] | | +| `neverProxySubnets` | Do not proxy these subnets | [CIDR][cidr] | | +| `allowConflictingSubnets` | Give Telepresence precedence when these subnets conflict with other network interfaces | [CIDR][cidr] | | +| `recursionBlockDuration` | Prevent recursion in VIF for this duration after a connect | [duration][go-duration] | | +| `virtualSubnet` | The CIDR to use when generating virtual IPs | [CIDR][cidr] | platform dependent | +| `autoResolveConflicts` | Auto resolve conflicts using a virtual subnet | [bool][yaml-bool] | true | + + +### Timeouts + +Values for `client.timeouts` are all durations either as a number of seconds +or as a string with a unit suffix of `ms`, `s`, `m`, or `h`. Strings +can be fractional (`1.5h`) or combined (`2h45m`). + +These are the valid fields for the `timeouts` key: + +| Field | Description | Type | Default | +|-------------------------|------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------|-----------------| +| `agentInstall` | Waiting for Traffic Agent to be installed | [int][yaml-int] or [float][yaml-float] number of seconds, or [duration][go-duration] [string][yaml-str] | 2 minutes | +| `apply` | Waiting for a Kubernetes manifest to be applied | [int][yaml-int] or [float][yaml-float] number of seconds, or [duration][go-duration] [string][yaml-str] | 1 minute | +| `clusterConnect` | Waiting for cluster to be connected | [int][yaml-int] or [float][yaml-float] number of seconds, or [duration][go-duration] [string][yaml-str] | 20 seconds | +| `connectivityCheck` | Timeout used when checking if cluster is already proxied on the workstation | [int][yaml-int] or [float][yaml-float] number of seconds, or [duration][go-duration] [string][yaml-str] | 500 ms (max 5s) | +| `endpointDial` | Waiting for a Dial to a service for which the IP is known | [int][yaml-int] or [float][yaml-float] number of seconds, or [duration][go-duration] [string][yaml-str] | 3 seconds | +| `roundtripLatency` | How much to add to the endpointDial timeout when establishing a remote connection | [int][yaml-int] or [float][yaml-float] number of seconds, or [duration][go-duration] [string][yaml-str] | 2 seconds | +| `intercept` | Waiting for an intercept to become active | [int][yaml-int] or [float][yaml-float] number of seconds, or [duration][go-duration] [string][yaml-str] | 30 seconds | +| `proxyDial` | Waiting for an outbound connection to be established | [int][yaml-int] or [float][yaml-float] number of seconds, or [duration][go-duration] [string][yaml-str] | 5 seconds | +| `trafficManagerConnect` | Waiting for the Traffic Manager API to connect for port forwards | [int][yaml-int] or [float][yaml-float] number of seconds, or [duration][go-duration] [string][yaml-str] | 60 seconds | +| `trafficManagerAPI` | Waiting for connection to the gPRC API after `trafficManagerConnect` is successful | [int][yaml-int] or [float][yaml-float] number of seconds, or [duration][go-duration] [string][yaml-str] | 15 seconds | +| `helm` | Waiting for Helm operations (e.g. `install`) on the Traffic Manager | [int][yaml-int] or [float][yaml-float] number of seconds, or [duration][go-duration] [string][yaml-str] | 30 seconds | + +## Local Overrides + +In addition, it is possible to override each of these variables at the local level by setting up new values in local config files. +There are two types of config values that can be set locally: those that apply to all clusters, which are set in a single `config.yml` file, and those +that only apply to specific clusters, which are set as extensions to the `$KUBECONFIG` file. + +### Config for all clusters +Telepresence uses a `config.yml` file to store and change those configuration values that will be used for all clusters you use Telepresence with. +The location of this file varies based on your OS: + +* macOS: `$HOME/Library/Application Support/telepresence/config.yml` +* Linux: `$XDG_CONFIG_HOME/telepresence/config.yml` or, if that variable is not set, `$HOME/.config/telepresence/config.yml` +* Windows: `%APPDATA%\telepresence\config.yml` + +For Linux, the above paths are for a user-level configuration. For system-level configuration, use the file at `$XDG_CONFIG_DIRS/telepresence/config.yml` or, if that variable is empty, `/etc/xdg/telepresence/config.yml`. If a file exists at both the user-level and system-level paths, the user-level path file will take precedence. + +### Values + +The definitions of the values in the `config.yml` are identical to those values in the `client` config above, but without the top level `client` key. + +Here is an example configuration to show you the conventions of how Telepresence is configured: +**note: This config shouldn't be used verbatim, since the registry `privateRepo` used doesn't exist** + +```yaml +timeouts: + agentInstall: 1m + intercept: 10s +logLevels: + userDaemon: debug +images: + registry: privateRepo # This overrides the default docker.io/datawire repo + agentImage: tel2:$version$ # This overrides the agent image to inject when engaging with a workload +grpc: + maxReceiveSize: 10Mi +``` + + +## Workstation Per-Cluster Configuration + +Configuration that is specific to a cluster can also be overriden per-workstation by modifying your `$KUBECONFIG` file. +It is recommended that you do not do this, and instead rely on upstream values provided to the Traffic Manager. This ensures +that all users that connect to the Traffic Manager will behave the same. +An important exception to this is the [`cluster.defaultManagerNamespace` configuration](#manager) which must be set locally. + +### Values + +The definitions of the values in the Telepresence kubeconfig extension are identical to those values in the `config.yml` config. The values will be merged into the config and have higher +priority when Telepresence is connected to the extended cluster. + +Example kubeconfig: +```yaml +apiVersion: v1 +clusters: +- cluster: + server: https://127.0.0.1 + extensions: + - name: telepresence.io + extension: + cluster: + defaultManagerNamespace: staging + dns: + includeSuffixes: [.private] + excludeSuffixes: [.se, .com, .io, .net, .org, .ru] + routing: + neverProxy: [10.0.0.0/16] + alsoProxy: [10.0.5.0/24] + name: example-cluster +``` + +#### Manager + +This is the one cluster configuration that cannot be set using the Helm chart because it defines how Telepresence connects to +the Traffic manager. When not default, that setting needs to be configured in the workstation's kubeconfig for the cluster. + +The `cluster.defaultManagerNamespace` key contains configuration for finding the `traffic-manager` that telepresence will connect to. + +Here is an example kubeconfig that will instruct telepresence to connect to a manager in namespace `staging`. The setting can be overridden using the Telepresence connect flag `--manager-namespace`. + +Please note that the `cluster.defaultManagerNamespace` can be set in the `config.yml` too, but will then not be unique per cluster. + +```yaml +apiVersion: v1 +clusters: + - cluster: + server: https://127.0.0.1 + extensions: + - name: telepresence.io + extension: + cluster: + defaultManagerNamespace: staging + name: example-cluster +``` + +[yaml-bool]: https://yaml.org/type/bool.html +[yaml-float]: https://yaml.org/type/float.html +[yaml-int]: https://yaml.org/type/int.html +[yaml-seq]: https://yaml.org/type/seq.html +[yaml-str]: https://yaml.org/type/str.html +[go-duration]: https://pkg.go.dev/time#ParseDuration +[logrus-level]: https://github.com/sirupsen/logrus/blob/v1.8.1/logrus.go#L25-L45 +[cidr]: https://www.geeksforgeeks.org/classless-inter-domain-routing-cidr/ diff --git a/versioned_docs/version-2.22/reference/dns.md b/versioned_docs/version-2.22/reference/dns.md new file mode 100644 index 00000000..64469844 --- /dev/null +++ b/versioned_docs/version-2.22/reference/dns.md @@ -0,0 +1,43 @@ +--- +title: DNS resolution +hide_table_of_contents: true +--- +# DNS resolution + +The Telepresence DNS resolver is dynamically configured to resolve names using the namespaces currently managed by the Traffic Manager. Processes running locally on the desktop will have network access to all services in the currently connected namespace by service-name only, and to other managed namespaces using service-name.namespace. + +See this demonstrated below, using the [quick start's](../quick-start.md) sample app services. + +We'll connect to a namespace in the cluster and list the services that can be intercepted. + +``` +$ telepresence connect --namespace default + + Connecting to traffic manager... + Connected to context default, namespace default (https://) + +$ telepresence list + + deployment web-app: ready to engage (traffic-agent not yet installed) + deployment emoji : ready to engage (traffic-agent not yet installed) + deployment web : ready to engage (traffic-agent not yet installed) + +$ curl web-app:80 + + + + + + Emoji Vote + ... +``` + +The DNS resolver will also be able to resolve services using `.` regardless of what namespace the +client is connected to as long as the given namespace is among the set managed by the Traffic Manager. + +### Supported Query Types + +The Telepresence DNS resolver is now capable of resolving queries of type `A`, `AAAA`, `CNAME`, +`MX`, `NS`, `PTR`, `SRV`, and `TXT`. + +See [Outbound connectivity](routing.md#dns-resolution) for details on DNS lookups. diff --git a/versioned_docs/version-2.22/reference/docker-run.md b/versioned_docs/version-2.22/reference/docker-run.md new file mode 100644 index 00000000..e70595a9 --- /dev/null +++ b/versioned_docs/version-2.22/reference/docker-run.md @@ -0,0 +1,128 @@ +--- +title: Using Docker for intercepts +description: How a Telepresence intercept can run a Docker container with configured environment and volume mounts. +toc_min_heading_level: 2 +toc_max_heading_level: 2 +--- + +# Using Docker when engaging with workloads + +## Using command flags + +### The docker flag +You can start the Telepresence daemon in a Docker container on your laptop using the command: + +```console +$ telepresence connect --docker +``` + +### The telepresence curl command + +The network interface that is added when connecting using `telepresence connect --docker` will not be accessible directly from the host computer. It is confined to the telepresence daemon container, and there you should not expect to be able to curl your cluster resources directly. + +You can use the `telepresence curl` command to curl your cluster resources. This command will run curl in a docker container that shares the network of the daemon container. + +### The telepresence docker-run command + +The `telepresence docker-run` command will start a container that automatically shares the daemon container network. It +will also circumvent Docker limitations that prevent containers that share another container's network to also make +ports available using `--publish`, `--expose`, or adding additional networks using `--network`. + +To achieve this, Telepresence temporarily adds the necessary network to the containerized daemon. This allows the new +container to join the same network. Additionally, Telepresence starts extra socat containers to handle port mappings, +ensuring that the desired ports are exposed to the local environment. + +> [!NOTE] +> If you use `telepresence docker-run` to run a command that lasts longer than the `telepresence connect --docker` that +> was in effect when it started, then it will lose its network. In other words, when using `telepresence docker-run`, +> you must always rerun after a `telepresence quit`/`telepresence connect --docker`. + +### The replace/ingest/intercept --docker-run flag + +If you want your replace, ingest, or intercept to use another Docker container, you can use the `--docker-run` flag. It will establish the engagement, +run your container in the foreground, then automatically end the engagement when the container exits. + +After establishing a connection to a cluster using `telepresence connect --docker`, the container started when using `--docker-run` will share +the same network as the containerized daemon that maintains the connection. This enables seamless communication between your local development +environment and the remote cluster. + +The `docker run` flags `--network`, `--publish`, or `--expose` are all available, just as with the `docker-run` command. + +```console +$ telepresence replace --container --docker-run -- +``` +OR +```console +$ telepresence ingest --container --docker-run -- +``` +OR +```console +$ telepresence intercept --port --docker-run -- +``` + +The `--` separates flags intended for `telepresence replace/ingest/intercept` from flags intended for `docker run`. + +It's recommended that you always use the `--docker-run` in combination with a connection started with the `telepresence connect --docker`, +because that makes everything less intrusive: + +- No admin user access is needed. Network modifications are confined to a Docker network. +- There's no need for special filesystem mount software like MacFUSE or WinFSP. The volume mounts happen in the Docker engine. + +The following happens under the hood when both flags are in use: + +- The network of for the replace, ingest, or intercept handler will be set to the same as the network used by the daemon. This guarantees that the + handler can access the Telepresence VIF, and hence have access the cluster. +- Volume mounts will be automatic and made using the Telemount Docker volume plugin so that all volumes exposed by the targeted + remote container are mounted on the local handler container. +- The environment of the remote container becomes the environment of the local handler container. + +### The docker-build flag + +The `--docker-build ` and the repeatable `docker-build-opt key=value` flags enable container's to be build on the fly by the replace/ingest/intercept command. + +When using `--docker-build`, the image name used in the argument list must be verbatim `IMAGE`. The word acts as a placeholder and will be replaced by the ID of the image that is built. + +The `--docker-build` flag implies `--docker-run`. + +## Using docker-run flag without docker + +It is possible to use `--docker-run` with a daemon running on your host, which is the default behavior of Telepresence. + +However, it isn't recommended since you'll be in a hybrid mode: while your handler runs in a container, the daemon will modify the host network, and if remote mounts are desired, they may require extra software. + +The ability to use this special combination is retained for backward compatibility reasons. It might be removed in a future release of Telepresence. + +The `--port` flag has slightly different semantics and can be used in situations when the local and container port must be different. This +is done using `--port :`. The container port will default to the local port when using the `--port ` syntax. + +## Examples + +Imagine you are working on a new version of your frontend service. It is running in your cluster as a Deployment called `frontend-v1`. You use Docker on your laptop to build an improved version of the container called `frontend-v2`. To test it out, use this command to run the new container on your laptop and start an intercept of the cluster service to your local container. + +```console +$ telepresence connect --docker +$ telepresence replace frontend-v1 --docker-run -- frontend-v2 +``` + +Now, imagine that the `frontend-v2` image is built by a `Dockerfile` that resides in the directory `images/frontend-v2`. You can build and replace directly. + +```console +$ telepresence replace frontend-v1 --docker-build images/frontend-v2 --docker-build-opt tag=mytag -- IMAGE +``` + +## Automatic flags + +Telepresence will automatically pass some relevant flags to Docker to connect the container with the remote container. Those flags are combined with the arguments given after `--` on the command line. + +- `--env-file ` Loads the remote environment +- `--name intercept--` Names the Docker container, this flag is omitted if explicitly given on the command line +- `-v ` Volume mount specification, see CLI help for `--docker-mount` flags for more info + +When used with a container based daemon: +- `--rm` Mandatory, because the volume mounts cannot be removed until the container is removed. +- `-v :` Volume mount specifications propagated from the engaged container +- `--network container:` Network is shared with the containerized daemon + +When used with a daemon that isn't container based: +- `--dns-search tel2-search` Enables single label name lookups in the connected namespace +- `-p ` The local port for the intercept and the container port diff --git a/versioned_docs/version-2.22/reference/engagements/cli.md b/versioned_docs/version-2.22/reference/engagements/cli.md new file mode 100644 index 00000000..b2f2bb59 --- /dev/null +++ b/versioned_docs/version-2.22/reference/engagements/cli.md @@ -0,0 +1,371 @@ +--- +title: Configure workload engagements using CLI +--- + +# Configuring workload engagements using CLI + +## Specifying a namespace for an engagement + +The namespace of the engaged workload is specified during connect using the `--namespace` option. + +```shell +telepresence connect --namespace myns +telepresence replace/ingest/intercept hello +``` + +## Importing environment variables + +Telepresence can import the environment variables from the pod that is +being engaged, see [this doc](../environment.md) for more details. + +## Creating an intercept + +The following command will intercept all traffic bound to the service and proxy it to your +laptop. This includes traffic coming through your ingress controller, so use this option +carefully as to not disrupt production environments. + +```shell +telepresence intercept --port= +``` + +Run `telepresence status` to see the list of active intercepts. + +```console +$ telepresence status +OSS User Daemon: Running + Version : v2.18.0 + Executable : /usr/local/bin/telepresence + Install ID : 4b1658f3-7ff8-4af3-66693-f521bc1da32f + Status : Connected + Kubernetes server : https://cluster public IP> + Kubernetes context: default + Namespace : default + Manager namespace : ambassador + Intercepts : 1 total + dataprocessingnodeservice: @ +OSS Root Daemon: Running + Version: v2.18.0 + DNS : + Remote IP : 127.0.0.1 + Exclude suffixes: [.com .io .net .org .ru] + Include suffixes: [] + Timeout : 8s + Subnets: (2 subnets) + - 10.96.0.0/16 + - 10.244.0.0/24 +OSS Traffic Manager: Connected + Version : v2.19.0 + Traffic Agent: docker.io/datawire/tel2:2.18.0 +``` + +Finally, run `telepresence leave ` to stop the intercept. + +[kube-multi-port-services]: https://kubernetes.io/docs/concepts/services-networking/service/#multi-port-services + +```console +$ telepresence intercept --port=: +Using Deployment +intercepted + Intercept name : + State : ACTIVE + Workload kind : Deployment + Destination : 127.0.0.1: + Service Port Identifier: + Intercepting : all TCP connections +``` + +When intercepting a service that has multiple ports, the name of the +service port that has been intercepted is also listed. + +If you want to change which port has been intercepted, you can create +a new intercept the same way you did above, and it will change which +service port is being intercepted. + +## Creating an intercept when multiple services match your workload + +Oftentimes, there's a 1-to-1 relationship between a service and a +workload, so telepresence is able to auto-detect which service it +should intercept based on the workload you are trying to intercept. +But if you use something like +[Argo](https://www.getambassador.io/docs/argo/latest/), there may be +two services (that use the same labels) to manage traffic between a +canary and a stable service. + +Fortunately, if you know which service you want to use when +intercepting a workload, you can use the `--service` flag. So in the +aforementioned example, if you wanted to use the `echo-stable` service +when intercepting your workload, your command would look like this: + +```console +$ telepresence intercept echo-rollout- --port --service echo-stable +Using ReplicaSet echo-rollout- +intercepted + Intercept name : echo-rollout- + State : ACTIVE + Workload kind : ReplicaSet + Destination : 127.0.0.1:3000 + Volume Mount Point: /var/folders/cp/2r22shfd50d9ymgrw14fd23r0000gp/T/telfs-921196036 + Intercepting : all TCP connections +``` + +## Intercepting multiple ports + +It is possible to intercept more than one service and/or service port that are using the same workload. You do this +by repeating the `--port` flag. + +Let's assume that we have a service `multi-echo` with the two ports `http` and `grpc`. They are both +targeting the same `multi-echo` deployment. + +```console +$ telepresence intercept multi-echo-http --workload multi-echo --port 8080:http --port 8443:grpc +Using Deployment multi-echo +intercepted + Intercept name : multi-echo-http + State : ACTIVE + Workload kind : Deployment + Intercepting : 10.1.54.120 -> 127.0.0.1 + 8080 -> 8080 TCP + 8443 -> 8443 TCP + Volume Mount Point : /tmp/telfs-893700837 +``` + +## Port-forwarding an intercepted container's sidecars + +Sidecars are containers that sit in the same pod as an application +container; they usually provide auxiliary functionality to an +application, and can usually be reached at +`localhost:${SIDECAR_PORT}`. For example, a common use case for a +sidecar is to proxy requests to a database, your application would +connect to `localhost:${SIDECAR_PORT}`, and the sidecar would then +connect to the database, perhaps augmenting the connection with TLS or +authentication. + +When intercepting a container that uses sidecars, you might want those +sidecars' ports to be available to your local application at +`localhost:${SIDECAR_PORT}`, exactly as they would be if running +in-cluster. Telepresence's `--to-pod ${PORT}` flag implements this +behavior, adding port-forwards for the port given. + +```console +$ telepresence intercept --port=: --to-pod= +Using Deployment +intercepted + Intercept name : + State : ACTIVE + Workload kind : Deployment + Destination : 127.0.0.1: + Service Port Identifier: + Intercepting : all TCP connections +``` + +If there are multiple ports that you need forwarded, simply repeat the +flag (`--to-pod= --to-pod=`). + +## Intercepting headless services + +Kubernetes supports creating [services without a ClusterIP](https://kubernetes.io/docs/concepts/services-networking/service/#headless-services), +which, when they have a pod selector, serve to provide a DNS record that will directly point to the service's backing pods. +Telepresence supports intercepting these `headless` services as it would a regular service with a ClusterIP. +So, for example, if you have the following service: + +```yaml +--- +apiVersion: v1 +kind: Service +metadata: + name: my-headless +spec: + type: ClusterIP + clusterIP: None + selector: + service: my-headless + ports: + - port: 8080 + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: my-headless + labels: + service: my-headless +spec: + replicas: 1 + serviceName: my-headless + selector: + matchLabels: + service: my-headless + template: + metadata: + labels: + service: my-headless + spec: + containers: + - name: my-headless + image: jmalloc/echo-server + ports: + - containerPort: 8080 + resources: {} +``` + +You can intercept it like any other: + +```console +$ telepresence intercept my-headless --port 8080 +Using StatefulSet my-headless +intercepted + Intercept name : my-headless + State : ACTIVE + Workload kind : StatefulSet + Destination : 127.0.0.1:8080 + Volume Mount Point: /var/folders/j8/kzkn41mx2wsd_ny9hrgd66fc0000gp/T/telfs-524189712 + Intercepting : all TCP connections +``` + +> [!IMPORTANT] +> This utilizes an `initContainer` that requires `NET_ADMIN` capabilities. +> If your cluster administrator has disabled them, you will be unable to use numeric ports with the agent injector. + +## Intercepting without a service + +You can intercept a workload without a service by adding an annotation that informs Telepresence what container +ports that are eligable for intercepts. Telepresence will then inject a traffic-agent when the workload is +deployed, and you will be able to intercept the given ports as if they were service ports. The annotation is: + +```yaml + annotations: + telepresence.io/inject-container-ports: http +``` + +The annotation value is a comma separated list of port identifiers consisting of either the name or the port number of a container +port, optionally suffixed with `/TCP` or `/UDP` + +### Let's try it out! + +1. Deploy an annotation similar to this one to your cluster: + + ```yaml + apiVersion: apps/v1 + kind: Deployment + metadata: + name: echo-no-svc + labels: + app: echo-no-svc + spec: + replicas: 1 + selector: + matchLabels: + app: echo-no-svc + template: + metadata: + labels: + app: echo-no-svc + annotations: + telepresence.io/inject-container-ports: http + spec: + automountServiceAccountToken: false + containers: + - name: echo-server + image: ghcr.io/telepresenceio/echo-server:latest + ports: + - name: http + containerPort: 8080 + env: + - name: PORT + value: "8080" + resources: + limits: + cpu: 50m + memory: 8Mi + ``` + +2. Connect telepresence: + + ```console + $ telepresence connect + Launching Telepresence User Daemon + Launching Telepresence Root Daemon + Connected to context kind-dev, namespace default (https://127.0.0.1:36767) + ``` + +3. List your intercept eligible workloads. If the annotation is correct, the deployment should show up in the list: + + ```console + $ telepresence list + deployment echo-no-svc: ready to engage (traffic-agent not yet installed) + ``` + +4. Start an intercept handler locally that will receive the incoming traffic. Here's an example using a simple python http service: + + ```console + $ python3 -m http.server 8080 + ``` + +5. Create an intercept: + + ```console + $ telepresence intercept echo-no-svc + Using Deployment echo-no-svc + Intercept name : echo-no-svc + State : ACTIVE + Workload kind : Deployment + Destination : 127.0.0.1:8080 + Volume Mount Point: /tmp/telfs-3306285526 + Intercepting : all TCP connections + Address : 10.244.0.13:8080 + ``` + +Note that the response contains an "Address" that you can curl to reach the intercepted pod. You will not be able to +curl the name "echo-no-svc". Since there's no service by that name, there's no DNS entry for it either. + +6. Curl the intercepted workload: + + ```console + $ curl 10.244.0.13:8080 + < output from your local service> + ``` + +> [!IMPORTANT] +> A service-less intercept utilizes an `initContainer` that requires `NET_ADMIN` capabilities. +> If your cluster administrator has disabled them, you will only be able to intercept services using symbolic target ports. + +## Specifying the intercept traffic target + +By default, it's assumed that your local app is reachable on `127.0.0.1`, and intercepted traffic will be sent to that IP +at the port given by `--port`. If you wish to change this behavior and send traffic to a different IP address, you can use the `--address` parameter +to `telepresence intercept`. Say your machine is configured to respond to HTTP requests for an intercept on `172.16.0.19:8080`. You would run this as: + +```console +$ telepresence intercept my-service --address 172.16.0.19 --port 8080 +Using Deployment echo-easy + Intercept name : echo-easy + State : ACTIVE + Workload kind : Deployment + Destination : 172.16.0.19:8080 + Service Port Identifier: proxied + Volume Mount Point : /var/folders/j8/kzkn41mx2wsd_ny9hrgd66fc0000gp/T/telfs-517018422 + Intercepting : all TCP connections +``` + +## Replacing a running workload + +By default, your application container continues to run while Telepresence intercepts its traffic. This can cause issues +for applications with ongoing background activities, such as consuming from a message queue. + +To address this, the `telepresence intercept` command provides the `--replace` flag. When used, the Traffic Agent +replaces the application container within the pod. This ensures that the application itself is not running and avoids +unintended side effects. The original application container is automatically restored once the intercept session ends. + +```console +$ telepresence intercept my-service --port 8080 --replace + Intercept name : my-service + State : ACTIVE + Workload kind : Deployment + Destination : 127.0.0.1:8080 + Service Port Identifier: proxied + Volume Mount Point : /var/folders/j8/kzkn41mx2wsd_ny9hrgd66fc0000gp/T/telfs-517018422 + Intercepting : all TCP connections +``` + +> [!NOTE] +> Sidecars will not be stopped. Only the targeted container will be removed from the pod. diff --git a/versioned_docs/version-2.22/reference/engagements/container.md b/versioned_docs/version-2.22/reference/engagements/container.md new file mode 100644 index 00000000..ee213eed --- /dev/null +++ b/versioned_docs/version-2.22/reference/engagements/container.md @@ -0,0 +1,48 @@ +--- +title: Target a specific container +--- + +# Target a specific container +A `telepresence replace` or `telepresence ingest` will always target a specific container, and the `--container` flag is +mandatory when the workload has more than one container. + +A `telepresence intercept` will ultimately target a specific port within a container. The port is usually determined +by examining the relationship between the service's `targetPort` and the container's `containerPort`. + +In certain scenarios, the container owning the intercepted port differs from the container the intercept +targets. This container's sole purpose is to route traffic from the service to the intended container, +often using a direct localhost connection. Use the `--container` flag with the intercept in these scenarios. + +## No intercept + +Consider the following scenario: + +![no-intercept](../../images/secondary-no-intercept.png) + +## Standard Intercept + +During a replace, the Telepresence traffic-agent will redirect all traffic intended for the replaced container to the +workstation. It will also make the environment and mounts for the **Nginx container** available, because it is +considered to be the one targeted by the intercept. + +During an intercept, the Telepresence traffic-agent will redirect the `http` port to the workstation. +It will also make the environment and mounts for the **Nginx container** available, because it is +considered to be the one targeted by the intercept. + +```console +$ telepresence intercept myservice --port http +``` + +![normal-intercept](../../images/secondary-normal-intercept.png) + +## Intercept With --container + +The `--container ` intercept flag is useful when the objective is to work with the App container +locally. While this option doesn't influence the port selection, it guarantees that the environment +variables and mounts propagated to the workstation originate from the specified container. + +```console +$ telepresence intercept myservice --port http --container app +``` + +![container-intercept](../../images/secondary-container-intercept.png) diff --git a/versioned_docs/version-2.22/reference/engagements/sidecar.md b/versioned_docs/version-2.22/reference/engagements/sidecar.md new file mode 100644 index 00000000..0d96e90b --- /dev/null +++ b/versioned_docs/version-2.22/reference/engagements/sidecar.md @@ -0,0 +1,74 @@ +--- +title: Traffic Agent Sidecar +--- +# Traffic Agent Sidecar + +When replacing a container or intercepting a service, the Telepresence Traffic Manager ensures +that a Traffic Agent has been injected into the targeted workload. +The injection is triggered by a Kubernetes Mutating Webhook and will +only happen once. The Traffic Agent is responsible for making the environment and volumes available +on the developer's workstation, and also for redirecting traffic to it. + +When replacing a workload container, all traffic intended for it will be rerouted to the local workstation, unless +limited using the `--port` flag. + +When intercepting, all `tcp` and/or `udp` traffic to the targeted port is sent to the developer's workstation. + +This means that both a `replace` and an `intercept` will affect all users of the targeted workload. + +## Supported workloads + +Kubernetes has various +[workloads](https://kubernetes.io/docs/concepts/workloads/). +Currently, Telepresence supports installing a +Traffic Agent container on `Deployments`, `ReplicaSets`, `StatefulSets`, and `ArgoRollouts`. A Traffic Agent is +installed the first time a user makes a `telepresence replace WORKLOAD`, `telepresence ingest WORKLOAD`, +`telepresence intercept WORKLOAD`, or a `telepresence connect --proxy-via CIDR=WORKLAOD`. + +A Traffic Agent may also be installed up front by adding a `telepresence.io/inject-traffic-agent: enabled` +annotation to the WORKLOADS pod template. + +### Sidecar injection + +The actual installation of the Traffic Agent is performed by a mutating admission webhook that calls the agent-injector +service in the Traffic Manager's namespace. + +The configuration for the sidecar, which is automatically generated, resides in the configmap `telepresence-agents`. + +### Uninstalling the Traffic Agent + +A Traffic Agent will normally remain in the workload's pods once it has been installed. It can be explicitly removed by +issuing the command `telepresence uninstall WORKLOAD`. It will also be removed if its configuration is removed +from the `telepresence-agents` configmap. + +Removing the `telepresence-agents` configmap will effectively uninstall all injected Traffic Agents from the same +namespace. + +> [!NOTE] +> Uninstalling will not work if the Traffic Agent is installed using the pod template annotation. + +### Disable Traffic Agent in a workload + +The Traffic Agent installation can be completely disabled by adding a `telepresence.io/inject-traffic-agent: disabled` +annotation to the WORKLOADS pod template. This will prevent all attempts to do anything with the workload that will +require a Traffic Agent. + +### Disable workloads + +By default, traffic-manager will observe `Deployments`, `ReplicaSets` and `StatefulSets`. +Each workload used today adds certain overhead. If you are not engaging a specific workload type, you can disable it to reduce that overhead. +That can be achieved by setting the Helm chart values `workloads..enabled=false` when installing the traffic-manager. +The following are the Helm chart values to disable the workload types: + +- `workloads.deployments.enabled=false` for `Deployments`, +- `workloads.replicaSets.enabled=false` for `ReplicaSets`, +- `workloads.statefulSets.enabled=false` for `StatefulSets`. + +### Enable ArgoRollouts + +In order to use `ArgoRollouts`, you must pass the Helm chart value `workloads.argoRollouts.enabled=true` when installing the traffic-manager. +It is recommended to set the pod template annotation `telepresence.io/inject-traffic-agent: enabled` to avoid creation of unwanted +revisions. + +> [!NOTE] +> While many of our examples use Deployments, they would also work on other supported workload types. diff --git a/versioned_docs/version-2.22/reference/environment.md b/versioned_docs/version-2.22/reference/environment.md new file mode 100644 index 00000000..f6418439 --- /dev/null +++ b/versioned_docs/version-2.22/reference/environment.md @@ -0,0 +1,49 @@ +--- +title: Environment variables +description: "How Telepresence can import environment variables from your Kubernetes cluster to use with code running on your laptop." +hide_table_of_contents: true +--- + +# Environment variables + +Telepresence will import environment variables from the cluster container when engaging with it. +You can use these variables with the code running on your laptop. + +There are several options available to do this: + +1. `telepresence replace --container --env-file ` + + This will write the environment variables to a file. This file can be used when starting containers locally. The option `--env-syntax` + will allow control over the syntax of the file. Valid syntaxes are "docker", "compose", "sh", "csh", "cmd", and "ps" where "sh", "csh", + and "ps" can be suffixed with ":export". + +2. `telepresence replace --container --env-file --env-syntax=json` + + This will write the environment variables to a JSON file. This file can be injected into other build processes. + +3. `telepresence replace --container -- ` + + This will run a command locally with the pod's environment variables set on your laptop. Once the command quits the `replace` is stopped (as if `telepresence leave ` was run). This can be used in conjunction with a local server command, such as `python [FILENAME]` or `node [FILENAME]` to run a service locally while using the environment variables that were set on the pod via a ConfigMap or other means. + + Another use would be running a subshell, Bash for example: + +4. `telepresence replace -- /bin/bash` + + This would start the `replace` and then launch the subshell on your laptop with all the same variables set as on the pod. + +5. `telepresence replace --docker-run -- ` + + This will ensure that the environment is propagated to the container. Will also work for `--docker-build` and `--docker-debug`. + +## Telepresence Environment Variables + +Telepresence adds some useful environment variables in addition to the ones imported from the engaged container: + +### TELEPRESENCE_ROOT +Directory where all remote volumes mounts are rooted. See [Volume Mounts](volume.md) for more info. + +### TELEPRESENCE_MOUNTS +Colon separated list of remotely mounted directories. + +### TELEPRESENCE_CONTAINER +The name of the targeted container. Useful when a pod has several containers, and you want to know which one that was engaged by Telepresence. diff --git a/versioned_docs/version-2.22/reference/inside-container.md b/versioned_docs/version-2.22/reference/inside-container.md new file mode 100644 index 00000000..8059b940 --- /dev/null +++ b/versioned_docs/version-2.22/reference/inside-container.md @@ -0,0 +1,38 @@ +--- +title: Running Telepresence inside a container +hide_table_of_contents: true +--- +# Running Telepresence inside a container + +## Run with the daemon and engagement handler in containers + +The `telepresence connect` command now has the option `--docker`. This option tells telepresence to start the Telepresence daemon in a +docker container. + +Running the daemon in a container brings many advantages. The daemon will no longer make modifications to the host's network or DNS, and +it will not mount files in the host's filesystem. Consequently, it will not need admin privileges to run, nor will it need special software +like macFUSE or WinFSP to mount the remote file systems. + +The engagement handler (the process that runs locally and optionally will receive intercepted traffic) must also be a docker container, +because that is the only way to access the cluster network that the daemon makes available, and to mount the docker volumes needed. + +## Run everything in a container + +Environments like [GitHub Codespaces](https://docs.github.com/en/codespaces/overview) runs everything in a container. Your shell, the +telepresence CLI, and both its daemons. This means that the container must be configured so that it allows Telepresence to set up its +Virtual Network Interface before you issue a `telepresence connect`. + +There are several conditions that must be met. + +- Access to the `/dev/net/tun` device +- The `NET_ADMIN` capability +- If you're using IPv6, then you also need sysctl `net.ipv6.conf.all.disable_ipv6=0` + +The Codespaces `devcontainer.json` will typically need to include: + +```json + "runArgs": [ + "--privileged", + "--cap-add=NET_ADMIN", + ], +``` diff --git a/versioned_docs/version-2.22/reference/monitoring.md b/versioned_docs/version-2.22/reference/monitoring.md new file mode 100644 index 00000000..b91a55cf --- /dev/null +++ b/versioned_docs/version-2.22/reference/monitoring.md @@ -0,0 +1,432 @@ +--- +title: Monitoring +--- + +# Monitoring + +Telepresence offers powerful monitoring capabilities to help you keep a close eye on your telepresence activities and traffic manager metrics. + +## Prometheus Integration + +One of the key features of Telepresence is its seamless integration with Prometheus, which allows you to access real-time metrics and gain insights into your system's performance. With Prometheus, you can monitor various aspects of your traffic manager, including the number of active intercepts and users. Additionally, you can track consumption-related information, such as the number of intercepts used by your developers and how long they stayed connected. + +To enable Prometheus metrics for your traffic manager, follow these steps: + +1. **Configure Prometheus Port** + + First, you'll need to specify the Prometheus port by setting a new environment variable called `PROMETHEUS_PORT` for your traffic manager. You can do this by running the following command: + + ```shell + telepresence helm upgrade --set-string prometheus.port=9090 + ``` + +2. **Validate the Prometheus Exposure** + + After configuring the Prometheus port, you can validate its exposure by port-forwarding the port using Kubernetes: + + ```shell + kubectl port-forward deploy/traffic-manager 9090:9090 -n ambassador + ``` + +3. **Access Prometheus Dashboard** + + Once the port-forwarding is set up, you can access the Prometheus dashboard by navigating to `http://localhost:9090` in your web browser: + + Here, you will find a wealth of built-in metrics, as well as custom metrics (see below) that we have added to enhance your tracking capabilities. + + | **Name** | **Type** | **Description** | **Labels** | + |-----------------------------|----------|-------------------------------------------------------------------------------|------------------------------------------| + | `agent_count` | Gauge | Number of connected traffic agents. | | + | `client_count` | Gauge | Number of connected clients. | | + | `active_intercept_count` | Gauge | Number of active intercepts. | | + | `session_count` | Gauge | Number of sessions. | | + | `tunnel_count` | Gauge | Number of tunnels. | | + | `tunnel_ingress_bytes` | Counter | Number of bytes tunnelled from clients. | | + | `tunnel_egress_bytes` | Counter | Number of bytes tunnelled to clients. | | + | `active_http_request_count` | Gauge | Number of currently served HTTP requests. | | + | `active_grpc_request_count` | Gauge | Number of currently served gRPC requests. | | + | `connect_count` | Counter | The total number of connects by user. | `client`, `install_id` | + | `connect_active_status` | Gauge | Flag to indicate when a connect is active. 1 for active, 0 for not active. | `client`, `install_id` | + | `intercept_count` | Counter | The total number of intercepts by user. | `client`, `install_id`, `intercept_type` | + | `intercept_active_status` | Gauge | Flag to indicate when an intercept is active. 1 for active, 0 for not active. | `client`, `install_id`, `workload` | + +4. **Enable Scraping for Traffic Manager Metrics** + To ensure that these metrics are collected regularly by your Prometheus server and to maintain a historical record, it's essential to enable scraping. If you're using the default Prometheus configuration, you can achieve this by specifying specific pod annotations as follows: + + ```yaml + template: + metadata: + annotations: + prometheus.io/path: / + prometheus.io/port: "9090" + prometheus.io/scrape: "true" + ``` + + These annotations instruct Prometheus to scrape metrics from the Traffic Manager pod, allowing you to track consumption metrics and other important data over time. + +## Grafana Integration + +Grafana plays a crucial role in enhancing Telepresence's monitoring capabilities. While the step-by-step instructions for Grafana integration are not included in this documentation, you have the option to explore the integration process. By doing so, you can create visually appealing and interactive dashboards that provide deeper insights into your telepresence activities and traffic manager metrics. + +Moreover, we've developed a dedicated Grafana dashboard for your convenience. Below, you can find sample screenshots of the dashboard, and you can access the JSON model for configuration: + +**JSON Model:** + +This dashboard is designed to provide you with comprehensive monitoring and visualization tools to effectively manage your Telepresence environment. + +```json +{ + "__inputs": [ + { + "name": "DS_PROMETHEUS", + "label": "Prometheus", + "description": "", + "type": "datasource", + "pluginId": "prometheus", + "pluginName": "Prometheus" + } + ], + "__elements": {}, + "__requires": [ + { + "type": "panel", + "id": "barchart", + "name": "Bar chart", + "version": "" + }, + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "10.1.5" + }, + { + "type": "panel", + "id": "piechart", + "name": "Pie chart", + "version": "" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "1.0.0" + }, + { + "type": "panel", + "id": "stat", + "name": "Stat", + "version": "" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": null, + "links": [], + "liveNow": false, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 6, + "x": 0, + "y": 0 + }, + "id": 5, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "10.1.5", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "agent_count", + "instant": true, + "legendFormat": "__auto", + "range": false, + "refId": "A" + } + ], + "title": "Number of connected traffic agents", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 6, + "x": 6, + "y": 0 + }, + "id": 6, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "10.1.5", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "client_count", + "instant": true, + "legendFormat": "__auto", + "range": false, + "refId": "A" + } + ], + "title": "Number of connected clients", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 6, + "x": 12, + "y": 0 + }, + "id": 7, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "10.1.5", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "active_intercept_count", + "instant": true, + "legendFormat": "__auto", + "range": false, + "refId": "A" + } + ], + "title": "Number of active intercepts", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 6, + "x": 18, + "y": 0 + }, + "id": 8, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "10.1.5", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "session_count", + "instant": true, + "legendFormat": "__auto", + "range": false, + "refId": "A" + } + ], + "title": "Number of sessions", + "type": "stat" + } + ], + "refresh": "", + "schemaVersion": 38, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-30d", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "Telepresence", + "uid": "d99c884a-8f4f-43f8-bd4e-bd68e47f100d", + "version": 5, + "weekStart": "" +} +``` diff --git a/versioned_docs/version-2.22/reference/rbac.md b/versioned_docs/version-2.22/reference/rbac.md new file mode 100644 index 00000000..5d92eefd --- /dev/null +++ b/versioned_docs/version-2.22/reference/rbac.md @@ -0,0 +1,267 @@ +--- +title: RBAC +toc_min_heading_level: 2 +toc_max_heading_level: 2 +--- + +# Telepresence RBAC +The intention of this document is to provide a template for securing and limiting the permissions of Telepresence. +This documentation covers the full extent of permissions necessary to administrate Telepresence components in a cluster. + +There are two general categories for cluster permissions with respect to Telepresence. There are RBAC settings for a User and for an Administrator described above. The User is expected to only have the minimum cluster permissions necessary to create a Telepresence [engagement](../howtos/engage.md), and otherwise be unable to affect Kubernetes resources. + +In addition to the above, there is also a consideration of how to manage Users and Groups in Kubernetes which is outside of the scope of the document. This document will use Service Accounts to assign Roles and Bindings. Other methods of RBAC administration and enforcement can be found on the [Kubernetes RBAC documentation](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) page. + +## Requirements + +- Kubernetes version 1.16+ +- Cluster admin privileges to apply RBAC + +## Editing your kubeconfig + +This guide also assumes that you are utilizing a kubeconfig file that is specified by the `KUBECONFIG` environment variable. This is a `yaml` file that contains the cluster's API endpoint information as well as the user data being supplied for authentication. The Service Account name used in the example below is called tp-user. This can be replaced by any value (i.e. John or Jane) as long as references to the Service Account are consistent throughout the `yaml`. After an administrator has applied the RBAC configuration, a user should create a `config.yaml` in your current directory that looks like the following: + +```yaml +apiVersion: v1 +kind: Config +clusters: +- name: my-cluster # Must match the cluster value in the contexts config + cluster: + ## The cluster field is highly cloud-dependent. +contexts: +- name: my-context + context: + cluster: my-cluster # Must match the name field in the clusters config + user: tp-user +users: +- name: tp-user # Must match the name of the Service Account created by the cluster admin + user: + token: # See note below +``` + +The Service Account token will be obtained by the cluster administrator after they create the user's Service Account. Creating the Service Account will create an associated Secret in the same namespace with the format `-token-`. This token can be obtained by your cluster administrator by running `kubectl get secret -n ambassador -o jsonpath='{.data.token}' | base64 -d`. + +After creating `config.yaml` in your current directory, export the file's location to KUBECONFIG by running `export KUBECONFIG=$(pwd)/config.yaml`. You should then be able to switch to this context by running `kubectl config use-context my-context`. + +## Administrating Telepresence + +Telepresence administration requires permissions for creating the `traffic-manager` [deployment](architecture.md#traffic-manager) which is typically +done by a full cluster administrator. + +Once installed, the Telepresence Traffic Manager will run using the `traffic-manager` ServiceAccount. This account is +set up differently depending on if the manager is installed using a dynamic or a static namespace selector. + +### Installation without, or with dynamic, namespace selection + +The Traffic Manager will require cluster wide access to several resources when it lacks a namespace selector, or when it +is configured with a dynamic namespace selector. + +### Traffic Manager Permissions + +These are the permissions required by the `traffic-manager` account in such a configuration: + +```yaml +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: traffic-manager + namespace: ambassador +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: traffic-manager +rules: + - apiGroups: ["apps"] + resources: ["deployments", "replicasets", "statefulsets"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["namespaces"] + verbs: ["get", "list"] + - apiGroups: ["events.k8s.io"] + resources: ["events"] + verbs: ["get", "watch"] + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch"] # patch not needed when agentInjector.enabled is set to false + + # If argoRollouts.enabled is set to true + - apiGroups: ["argoproj.io"] + resources: ["rollouts"] + verbs: ["get", "list", "watch"] + + # When using podCIDRStrategy nodePodCIDRs + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + + # The following is not needed when agentInjector.enabled is set to false + - apiGroups: [""] + resources: ["pods"] + verbs: ["patch"] + - apiGroups: ["apps"] + resources: ["deployments", "replicasets", "statefulsets"] + verbs: ["patch"] + # If argoRollouts.enabled is set to true + - apiGroups: ["argoproj.io"] + resources: ["rollouts"] + verbs: ["patch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: traffic-manager +subjects: + - name: traffic-manager + kind: ServiceAccount + namespace: default +roleRef: + apiGroup: rbac.authorization.k8s.io + name: traffic-manager + kind: ClusterRole +``` + + +### Installation with static namespace selection + +The permissions required by the `traffic-manager` account in a statically namespaced configuration is very similar to +the ones used in a dynamic configuration, but a `Role`/`RoleBinding` will be installed in each managed namespace instead +of the `ClusterRole`/`ClusterRoleBinding` pair. + +## Telepresence Client Access + +A Telepresence client requires just a small set of RBAC permissions. The bare minimum to connect is the ability to +create a port-forward to the traffic-manager. + +The following configuration assumes that a ServiceAccount "tp-user" has been created in the traffic-manager's default +"ambassador" namespace. + +In order to connect, the client must resolve the traffic-manager service name into a pod-IP and set up a port-forward. +This requires the following Role/RoleBinding in the Traffic Manager's namespace. + +```yaml +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: traffic-manager-connect + namespace: ambassador +rules: + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["services"] + resourceNames: ["traffic-manager"] + verbs: ["get"] + - apiGroups: [""] + resources: ["pods/portforward"] + verbs: ["create"] +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: traffic-manager-connect + namespace: ambassador +subjects: + - kind: ServiceAccount + name: tp-user + namespace: ambassador +roleRef: + kind: Role + name: traffic-manager-connect + apiGroup: rbac.authorization.k8s.io +``` + +Once connected, it is desirable, but not necessary that the client can create port-forwards directly to Traffic Agents +in the namespace that it is connected to. The lack of this permission will cause all traffic to be routed via the +Traffic Manager, which will have a slightly negative impact on throughput. + +It's recommended that the client also has the following permissions in a dynamic namespaces installation: + +```yaml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: telepresence-ambassador +rules: +- apiGroups: + - "" + resources: ["namespaces"] + verbs: ["get", "list", "watch"] +- apiGroups: [""] + resources: ["pods"] + verbs: ["get"] + + # Necessary if the client should be able to gather the pod logs +- apiGroups: [""] + resources: ["pods"] + verbs: ["list"] +- apiGroups: [""] + resources: ["pods/log"] + verbs: ["get"] + + # All traffic will be routed via the traffic-manager unless a portforward can be created directly to a pod +- apiGroups: [""] + resources: ["pods/portforward"] + verbs: ["create"] + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: telepresence-ambassador +subjects: + - kind: ServiceAccount + name: tp-user + namespace: ambassador +roleRef: + kind: ClusterRole + name: telepresence-ambassador + apiGroup: rbac.authorization.k8s.io +``` + +The corresponding configuration for a static namespace installation, for each namespaece that the client should be able +to access: + + +```yaml +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: telepresence-client +rules: +- apiGroups: [""] + resources: ["pods"] + verbs: ["get"] + + # Necessary if the client should be able to gather the pod logs +- apiGroups: [""] + resources: ["pods"] + verbs: ["list"] +- apiGroups: [""] + resources: ["pods/log"] + verbs: ["get"] + + # All traffic will be routed via the traffic-manager unless a portforward can be created directly to a pod +- apiGroups: [""] + resources: ["pods/portforward"] + verbs: ["create"] + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: telepresence-client +subjects: + - kind: ServiceAccount + name: tp-user + namespace: ambassador +roleRef: + kind: Role + name: telepresence-client + apiGroup: rbac.authorization.k8s.io +``` + +The user will also need the [Traffic Manager connect permission](#traffic-manager-permissions) described above. diff --git a/versioned_docs/version-2.22/reference/routing.md b/versioned_docs/version-2.22/reference/routing.md new file mode 100644 index 00000000..ec48e7ee --- /dev/null +++ b/versioned_docs/version-2.22/reference/routing.md @@ -0,0 +1,57 @@ +--- +title: Connection Routing +toc_min_heading_level: 2 +toc_max_heading_level: 2 +--- + +# Connection Routing + +## DNS resolution +When requesting a connection to a host, the IP of that host must be determined. Telepresence provides DNS resolvers to help with this task. There are currently four types of resolvers but only one of them will be used on a workstation at any given time. Common for all of them is that they will propagate a selection of the host lookups to be performed in the cluster. The selection normally includes all names ending with `.cluster.local` or a currently mapped namespace but more entries can be added to the list using the `includeSuffixes` or `mappings` option in the +[cluster DNS configuration](config.md#dns) + +### Cluster side DNS lookups +The cluster side host lookup will be performed by a traffic-agent in the connected namespace, or by the traffic-manager if no such agent exists. + +### macOS resolver +This resolver hooks into the macOS DNS system by creating files under `/etc/resolver`. Those files correspond to some domain and contain the port number of the Telepresence resolver. Telepresence creates one such file for each of the currently mapped namespaces and `include-suffixes` option. The file `telepresence.local` contains a search path that is configured based on currently connected namespace so that single label names can be resolved correctly. + +### Linux systemd-resolved resolver +This resolver registers itself as part of telepresence's [VIF](tun-device.md) using `systemd-resolved` and uses the DBus API to configure domains and routes that corresponds to the connected namespace and the namespaces managed by the Traffic Manager. + +### Linux overriding resolver +Linux systems that aren't configured with `systemd-resolved` will use this resolver. A Typical case is when running Telepresence [inside a docker container](inside-container.md). During initialization, the resolver will first establish a _fallback_ connection to the IP passed as `--dns`, the one configured as `local-ip` in the [local DNS configuration](config.md#dns), or the primary `nameserver` registered in `/etc/resolv.conf`. It will then use iptables to actually override that IP so that requests to it instead end up in the overriding resolver, which unless it succeeds on its own, will use the _fallback_. + +### Windows resolver +This resolver uses the DNS resolution capabilities of the [win-tun](https://www.wintun.net/) device in conjunction with [Win32_NetworkAdapterConfiguration SetDNSDomain](https://docs.microsoft.com/en-us/powershell/scripting/samples/performing-networking-tasks?view=powershell-7.2#assigning-the-dns-domain-for-a-network-adapter). + +### DNS caching +The Telepresence DNS resolver often changes its configuration. Telepresence will not flush the host's DNS caches. Instead, all records will have a short Time To Live (TTL) so that such caches evict the entries quickly. This causes increased load on the Telepresence resolver (shorter TTL means more frequent queries) and to cater for that, telepresence now has an internal cache to minimize the number of DNS queries that it sends to the cluster. This cache is flushed as needed without causing instabilities. + +## Routing + +### Subnets +The Telepresence `traffic-manager` service is responsible for discovering the cluster's service subnet and all subnets used by the pods. In order to do this, it needs permission to create a dummy service[^1] in its own namespace, and the ability to list, get, and watch nodes and pods. Most clusters will expose the pod subnets as `podCIDR` in the `Node` while others, like Amazon EKS, don't. Telepresence will then fall back to deriving the subnets from the IPs of all pods. If you'd like to choose a specific method for discovering subnets, or want to provide the list yourself, you can use the `podCIDRStrategy` configuration value in the [helm](../install/manager.md) chart to do that. + +The complete set of subnets that the [VIF](tun-device.md) will be configured with is dynamic and may change during a connection's life cycle as new nodes arrive or disappear from the cluster. The set consists of what that the traffic-manager finds in the cluster, and the subnets configured using the [also-proxy](config.md#alsoproxysubnets) configuration option. Telepresence will remove subnets that are equal to, or completely covered by, other subnets. + +### Connection origin +A request to connect to an IP-address that belongs to one of the subnets of the [VIF](tun-device.md) will cause a connection request to be made in the cluster. As with host name lookups, the request will originate from a traffic-agent in the connected namespace, of by the traffic-manager when no agent is present. + +There are multiple reasons for doing this. One is that it is important that the request originates from the correct namespace. Example: + +```bash +curl some-host +``` +results in a http request with header `Host: some-host`. Now, if a service-mesh like Istio performs header-based routing, then it will fail to find that host unless the request originates from the same namespace as the host resides in. Another reason is that the configuration of a service mesh can contain very strict rules. If the request then originates from the wrong pod, it will be denied. + +## Recursion detection +It is common that clusters used in development, such as Minikube, Minishift or k3s, run on the same host as the Telepresence client, often in a Docker container. Such clusters may have access to host network, which means that both DNS and L4 routing may be subjected to recursion. + +### DNS recursion +When a local cluster's DNS-resolver fails to resolve a hostname, it may fall back to querying the local host network. This means that the Telepresence resolver will be asked to resolve a query that was issued from the cluster. Telepresence must check if such a query is recursive because there is a chance that it actually originated from the Telepresence DNS resolver and was dispatched to the `traffic-manager`, or a `traffic-agent`. + +Telepresence handles this by sending one initial DNS-query to resolve the hostname "tel2-recursion-check.kube-system". If the cluster runs locally, and has access to the local host's network, then that query will recurse back into the Telepresence resolver. Telepresence remembers this and alters its own behavior so that queries that are believed to be recursions are detected and respond with an NXNAME record. Telepresence performs this solution to the best of its ability, but may not be completely accurate in all situations. There's a chance that the DNS-resolver will yield a false negative for the second query if the same hostname is queried more than once in rapid succession, that is when the second query is made before the first query has received a response from the cluster. + +##### Footnotes: +[^1]: The error message from an attempt to create a service in a bad subnet contains the service subnet. The trick of creating a dummy service is currently the only way to get Kubernetes to expose that subnet. diff --git a/versioned_docs/version-2.22/reference/tun-device.md b/versioned_docs/version-2.22/reference/tun-device.md new file mode 100644 index 00000000..6bd8b29d --- /dev/null +++ b/versioned_docs/version-2.22/reference/tun-device.md @@ -0,0 +1,32 @@ +--- +title: Networking through Virtual Network Interface +hide_table_of_contents: true +--- + +# Networking through Virtual Network Interface + +The Telepresence daemon process creates a Virtual Network Interface (VIF) when Telepresence connects to the cluster. The VIF ensures that the cluster's subnets are available to the workstation. It also intercepts DNS requests and forwards them to the traffic-manager which in turn forwards them to engaged agents, if any, or performs a host lookup by itself. + +### TUN-Device +The VIF is a TUN-device, which means that it communicates with the workstation in terms of L3 IP-packets. The router will recognize UDP and TCP packets and tunnel their payload to the traffic-manager via its encrypted gRPC API. The traffic-manager will then establish corresponding connections in the cluster. All protocol negotiation takes place in the client because the VIF takes care of the L3 to L4 translation (i.e. the tunnel is L4, not L3). + +## Gains when using the VIF + +### Both TCP and UDP +The TUN-device is capable of routing both TCP and UDP traffic. + +### No SSH required + +The VIF approach is somewhat similar to using `sshuttle` but without +any requirements for extra software, configuration or connections. +Using the VIF means that only one single connection needs to be +forwarded through the Kubernetes apiserver (à la `kubectl +port-forward`), using only one single port. There is no need for +`ssh` in the client nor for `sshd` in the traffic-manager. This also +means that the traffic-manager container can run as the default user. + +#### sshfs without ssh encryption +When a POD is engaged, and its volumes are mounted on the local machine, this mount is performed by [sshfs](https://github.com/libfuse/sshfs). Telepresence will run `sshfs -o slave` which means that instead of using `ssh` to establish an encrypted communication to an `sshd`, which in turn terminates the encryption and forwards to `sftp`, the `sshfs` will talk `sftp` directly on its `stdin/stdout` pair. Telepresence tunnels that directly to an `sftp` in the agent using its already encrypted gRPC API. As a result, no `sshd` is needed in client nor in the traffic-agent, and the traffic-agent container can run as the default user. + +### No Firewall rules +With the VIF in place, there's no longer any need to tamper with firewalls in order to establish IP routes. The VIF makes the cluster subnets available during connect, and the kernel will perform the routing automatically. When the session ends, the kernel is also responsible for cleaning up. diff --git a/versioned_docs/version-2.22/reference/volume.md b/versioned_docs/version-2.22/reference/volume.md new file mode 100644 index 00000000..5f7003f3 --- /dev/null +++ b/versioned_docs/version-2.22/reference/volume.md @@ -0,0 +1,42 @@ +--- +title: Volume mounts +hide_table_of_contents: true +--- +# Volume mounts + +Volume mounts are achieved using a Docker Volume plug-in and Docker volume mounts when connecting using `--docker` and using `--docker-run`. This page +describes how mounts are achieved when running directly on the host. + +Telepresence supports locally mounting of volumes that are mounted to your Pods. You can specify a command to run when starting the engagement, this could be a subshell or local server such as Python or Node. + +``` +telepresence replace --mount=/tmp/ -- /bin/bash +``` + +In this case, Telepresence replaces the remote container, mounts the Pod's volumes to locally to `/tmp`, and starts a Bash subshell. + +Telepresence can set a random mount point for you by using `--mount=true` instead, you can then find the mount point in the output of `telepresence list` or using the `$TELEPRESENCE_ROOT` variable. + +``` +$ telepresence replace --mount=true -- /bin/bash +Using Deployment +replaced + Container name : + State : ACTIVE + Workload kind : Deployment + Destination : 127.0.0.1: + Volume Mount Point: /var/folders/cp/2r22shfd50d9ymgrw14fd23r0000gp/T/telfs-988349784 + +bash-3.2$ echo $TELEPRESENCE_ROOT +/var/folders/cp/2r22shfd50d9ymgrw14fd23r0000gp/T/telfs-988349784 +``` + +> [!NOTE] +> `--mount=true` is the default if a mount option is not specified, use `--mount=false` to disable mounting volumes. + +With either method, the code you run locally either from the subshell or from the `replace` command will need to be prepended with the `$TELEPRESENCE_ROOT` environment variable to utilize the mounted volumes. + +For example, Kubernetes mounts secrets to `/var/run/secrets/kubernetes.io` (even if no `mountPoint` for it exists in the Pod spec). Once mounted, to access these you would need to change your code to use `$TELEPRESENCE_ROOT/var/run/secrets/kubernetes.io`. + +> [!NOTE] +> If using `--mount=true` without a command, you can use either [environment variable](environment.md) flag to retrieve the variable. diff --git a/versioned_docs/version-2.22/reference/vpn.md b/versioned_docs/version-2.22/reference/vpn.md new file mode 100644 index 00000000..23989e44 --- /dev/null +++ b/versioned_docs/version-2.22/reference/vpn.md @@ -0,0 +1,316 @@ +--- +title: Telepresence and VPNs +--- + +import Platform from '@site/src/components/Platform'; + +# Telepresence and VPNs + +Telepresence creates a virtual network interface (VIF) when it connects. This VIF is configured to route the cluster's +service and pod subnets so that the user can access resources in the cluster. It's not uncommon that the workstation +where Telepresence runs already has network interfaces that route subnets that will overlap. Such +conflicts must be resolved deterministically. + +Unless configured otherwise, Telepresence will resolve subnet conflicts by moving the cluster's subnet out of the way +using network address translation. For a majority of use-cases, this will be enough, but there are some +[caveats](#caveats-when-using-vnat) to be aware of. + +For more info, see the section on how to [avoid the conflict](#avoiding-the-conflict) below. + +## VPN Configuration + +Let's begin by reviewing what a VPN does and imagining a sample configuration that might come +to conflict with Telepresence. +Usually, a VPN client adds two kinds of routes to your machine when you connect. +The first serves to override your default route; in other words, it makes sure that packets +you send out to the public internet go through the private tunnel instead of your +ethernet or wifi adapter. We'll call this a `public VPN route`. +The second kind of route is a `private VPN route`. These are the routes that allow your +machine to access hosts inside the VPN that are not accessible to the public internet. +Generally speaking, this is a more circumscribed route that will connect your machine +only to reachable hosts on the private network, such as your Kubernetes API server. + +This diagram represents what happens when you connect to a VPN, supposing that your +private network spans the CIDR range: `10.0.0.0/8`. + +![VPN routing](../images/vpn-routing.jpg) + +## Kubernetes configuration + +One of the things a Kubernetes cluster does for you is assign IP addresses to pods and services. +This is one of the key elements of Kubernetes networking, as it allows applications on the cluster +to reach each other. When Telepresence connects you to the cluster, it will try to connect you +to the IP addresses that your cluster assigns to services and pods. +Cluster administrators can configure, on cluster creation, the CIDR ranges that the Kubernetes +cluster will place resources in. Let's imagine your cluster is configured to place services in +`10.130.0.0/16` and pods in `10.132.0.0/16`: + +![VPN Kubernetes config](../images/vpn-k8s-config.jpg) + +# Telepresence conflicts + +When you run `telepresence connect` to connect to a cluster, it talks to the API server +to figure out what pod and service CIDRs it needs to map in your machine. If it detects +that these CIDR ranges are already mapped by a VPN's `private route`, it will produce an +error and inform you of the conflicting subnets: + +```console +$ telepresence connect +telepresence connect: error: connector.Connect: failed to connect to root daemon: rpc error: code = Unknown desc = subnet 10.43.0.0/16 overlaps with existing route "10.0.0.0/8 via 10.0.0.0 dev utun4, gw 10.0.0.1" +``` + +Telepresence offers three different ways to resolve this: + +- [Avoid the conflict](#avoiding-the-conflict) using the `--proxy-via` connect flag +- [Allow the conflict](#allowing-the-conflict) in a controlled manner +- [Use docker](#using-docker) to make telepresence run in a container with its own network config + + +## Avoiding the conflict + +Telepresence can perform Virtual Network Address Translation (henceforth referred to as VNAT) of the cluster's subnets +when routing them from the workstation, thus moving those subnets so that conflicts are avoided. Unless configured not +to, Telepresence will use VNAT by default when it detects conflicts. + +VNAT is enabled by passing a `--vnat` flag (introduced in Telepresence 2.21) to`teleprence connect`. When using this +flag, Telepresence will take the following actions: + +- The local DNS-server will translate any IP contained in a VNAT subnet to a virtual IP. +- All access to a virtual IP will be translated back to its original when routed to the cluster. +- The container environment retrieved when using `replace`, `ingest`, or `intercept` will be mangled, so that all IPs contained + in VNAT subnets are replaced with corresponding virtual IPs. + +The `--vnat` flag can be repeated to make Telepresence translate more than one subnet. + +```console +$ telepresence connect --vnat CIDR +``` +The CIDR can also be a symbolic name that identifies a well-known subnet or list of subnets: + +| Symbol | Meaning | +|-----------|-------------------------------------| +| `also` | All subnets added with --also-proxy | +| `service` | The cluster's service subnet | +| `pods` | The cluster's pod subnets. | +| `all` | All of the above. | + + +### Virtual Subnet Configuration + +Telepresence will use a special subnet when it generates the virtual IPs that are used locally. On a Linux or macOS +workstation, this subnet will be a class E subnet (not normally used for any other purposes). On Windows, the class E is +not routed, and Telepresence will instead default to `211.55.48.0/20`. + +The default subnet used can be overridden in the client configuration. + +In `config.yml` on the workstation: +```yaml +routing: + virtualSubnet: 100.10.20.0/24 +``` + +Or as a Helm chart value to be applied on all clients: +```yaml +client: + routing: + virtualSubnet: 100.10.20.0/24 +``` + +#### Example + +Let's assume that we have a conflict between the cluster's subnets, all covered by the CIDR `10.124.0.0/9` and a VPN +using `10.0.0.0/9`. We avoid the conflict using: + +```console +$ telepresence connect --vnat all +``` + +The cluster's subnets are now hidden behind a virtual subnet, and the resulting configuration will look like this: + +![VPN Telepresence](../images/vpn-vnat.jpg) + +### Proxying via a specific workload + +Telepresence is capable of routing all traffic to a VNAT to a specific workload. This is particularly useful when the +cluster's DNS is configured with domains that resolve to loop-back addresses. This is sometimes the case when the +cluster uses a mesh configured to listen to a loopback address and then reroute from there. + +The `--proxy-via` flag (introduced in Telepresenc 2.19) is similar to `--vnat`, but the argument must be in the form +CIDR=WORKLOAD. When using this flag, all traffic to the given CIDR will be routed via the given workstation. + +The WORKLOAD is the deployment, replicaset, statefulset, or argo-rollout in the cluster whose traffic-agent will be used +for targeting the routed subnets. + +#### Example + +Let's assume that we have a conflict between the cluster's subnets, all covered by the CIDR `10.124.0.0/9` and a VPN +using `10.0.0.0/9`. We avoid the conflict using: + +```console +$ telepresence connect --proxy-via all=echo +``` + +The cluster's subnets are now hidden behind a virtual subnet, and all traffic is routed to the echo workload. + +### Caveats when using VNAT + +Telepresence may not accurately detect cluster-side IP addresses being used by services running locally on a workstation +in certain scenarios. This limitation arises when local services obtain IP addresses from remote sources such as +databases or configmaps, or when IP addresses are sent to it in API calls. + +### Disabling default VNAT + +The default behavior of using VNAT to resolve conflicts can be disabled by adding the following to the client config. + +In `config.yml` on the workstation: +```yaml +routing: + autoResolveConflicts: false +``` + +Or as a Helm chart value to be applied on all clients: +```yaml +client: + routing: + autoResolveConflicts: false +``` + +Explicitly allowing all conflicts will also effectively prevent the default VNAT behavior. + +## Allowing the conflict + +A conflict can be resolved by carefully considering what your network layout looks like, and then allow Telepresence to +override the conflicting subnets. Telepresence is refusing to map them, because mapping them could render certain hosts +that are inside the VPN completely unreachable. However, you (or your network admin) know better than anyone how hosts +are spread out inside your VPN. + +Even if the private route routes ALL of `10.0.0.0/8`, it's possible that hosts are only being spun up in one of the +sub-blocks of the `/8` space. Let's say, for example, that you happen to know that all your hosts in the VPN are bunched +up in the first half of the space -- `10.0.0.0/9` (and that you know that any new hosts will only be assigned IP +addresses from the `/9` block). In this case you can configure Telepresence to override the other half of this CIDR +block, which is where the services and pods happen to be. + +To do this, all you have to do is configure the `client.routing.allowConflictingSubnets` flag in the Telepresence helm +chart. You can do this directly via `telepresence helm upgrade`: + +In `config.yml` on the workstation: +```yaml +routing: + allowConflictingSubnets: 10.128.0.0/9 +``` + +Or as a Helm chart configuration value to be applied on all clients: +```yaml +client: + routing: + allowConflictingSubnets: 10.128.0.0/9 +``` + +Or pass the Helm chart configuration using the `--set` flag +```console +$ telepresence helm upgrade --set client.routing.allowConflictingSubnets="{10.128.0.0/9}" +``` + +The end result of this (assuming an allowlist of `/9`) will be a configuration like this: + +![VPN Telepresence](../images/vpn-with-tele.jpg) + +### Using docker + +Use `telepresence connect --docker` to make the Telepresence daemon containerized, which means that it has its own +network configuration and therefore no conflict with a VPN. Read more about docker [here](docker-run.md). + +## Some helpful hints when dealing with conflicts + +When resolving a conflict by allowing it, you might want to validate that the routing is correct during the time when +Telepresence is connected. One way of doing this is to retrieve the route for an IP in a conflicting subnet. + +This example assumes that Telepresence detected a conflict with a VPN using subnet `100.124.0.0/16`, and that we then +decided to allow a conflict in a small portion of that using allowConflictingSubnets `100.124.150.0/24`. Without +telepresence being connected, we check the route for the IP `100.124.150.45`, and discover that it's running through a +Tailscale device. + + + + + +```console +$ route -n get 100.124.150.45 + route to: 100.64.2.3 +destination: 100.64.0.0 + mask: 255.192.0.0 + interface: utun4 + flags: + recvpipe sendpipe ssthresh rtt,msec rttvar hopcount mtu expire + 0 0 0 0 0 0 1280 0 +``` + +Note that in macOS it's difficult to determine what software the name of a virtual interface corresponds to -- `utun4` +doesn't indicate that it was created by Tailscale. One option is to look at the output of `ifconfig` before and after +connecting to your VPN to see if the interface in question is being added upon connection + + + + +```console +$ ip route get 100.124.150.45 +100.64.2.3 dev tailscale0 table 52 src 100.111.250.89 uid 0 +``` + + + + +```console +$ Find-NetRoute -RemoteIPAddress 100.124.150.45 + +IPAddress : 100.102.111.26 +InterfaceIndex : 29 +InterfaceAlias : Tailscale +AddressFamily : IPv4 +Type : Unicast +PrefixLength : 32 +PrefixOrigin : Manual +SuffixOrigin : Manual +AddressState : Preferred +ValidLifetime : Infinite ([TimeSpan]::MaxValue) +PreferredLifetime : Infinite ([TimeSpan]::MaxValue) +SkipAsSource : False +PolicyStore : ActiveStore + + +Caption : +Description : +ElementName : +InstanceID : ;::8;;;8 + + + +Now, run the same command with telepresence connected. The output should differ and instead show that the same IP Is +routed via the Telepresence Virtual Network. This should always be the case for an allowed conflict. + +> [!NOTE] +> If you instead choose to avoid the conflict using VNAT, then the IP will be unaffected and still get routed via +> Tailscale. The cluster resource using that IP will be available to you from another subnet, using another IP. diff --git a/versioned_docs/version-2.22/release-notes.md b/versioned_docs/version-2.22/release-notes.md new file mode 100644 index 00000000..3a3e467e --- /dev/null +++ b/versioned_docs/version-2.22/release-notes.md @@ -0,0 +1,1189 @@ + +[comment]: # (Code generated by relnotesgen. DO NOT EDIT.) +# Telepresence Release Notes +## Version 2.22.0 (March 14) +##
feature
New telepresence replace command.
+
+ +The new `telepresence replace` command simplifies and clarifies container replacement. + +Previously, the `--replace` flag within the `telepresence intercept` command was used to replace containers. +However, this approach introduced inconsistencies and limitations: + +* **Confusion:** Using a flag to modify the core function of a command designed for traffic interception led + to ambiguity. +* **Inaccurate Behavior:** Replacement was not possible when no incoming traffic was intercepted, as the + command's design focused on traffic routing. + +To address these issues, the `--replace` flag within `telepresence intercept` has been deprecated. The new +`telepresence replace` command provides a dedicated and consistent method for replacing containers, enhancing +clarity and reliability. + +Key differences between `replace` and `intercept`: + +1. **Scope:** The `replace` command targets and affects an entire container, impacting all its traffic, while + an `intercept` targets specific services and/or service/container ports. +2. **Port Declarations:** Remote ports specified using the `--port` flag are container ports. +3. **No Default Port:** A `replace` can occur without intercepting any ports. +4. **Container State:** During a `replace`, the original container is no longer active within the cluster. + +The deprecated `--replace` flag still works, but is hidden from the `telepresence intercept` command help, and +will print a deprecation warning when used. +
+ +##
feature
Add json-schema for the Telepresence Helm Chart
+
+ +Helm can validate a chart using a json-schema using the command `helm lint`, and this schema can be part of the actual Helm chart. The telepresence-oss Helm chart now includes such a schema, and a new `telepresence helm lint` command was added so that linting can be performed using the embedded chart. +
+ +##
feature
No dormant container present during replace.
+
+ +Telepresence will no longer inject a dormant container during a `telepresence replace` operation. Instead, the +Traffic Agent now directly serves as the replacement container, eliminating the need to forward traffic to the +original application container. This simplification offers several advantages when using the `--replace` flag: + + - **Removal of the init-container:** The need for a separate init-container is no longer necessary. + - **Elimination of port renames:** Port renames within the intercepted pod are no longer required. +
+ +##
feature
One single invocation of the Telepresence intercept command can now intercept multiple ports.
+
+ +It is now possible to intercept multiple ports with one single invocation of `telepresence intercept` by just repeating the `--port` flag. +
+ +##
feature
[Unify how Traffic Manager selects namespaces](install/manager#static-versus-dynamic-namespace-selection)
+
+ +The definition of what namespaces that a Traffic Manager would manage use was scattered into several Helm +chart values, such as `manager.Rbac.namespaces`, `client.Rbac.namespaces`, and +`agentInjector.webhook.namespaceSelector`. The definition is now unified to the mutual exclusive top-level +Helm chart values `namespaces` and `namespaceSelector`. + +The `namespaces` value is just for convenience and a short form of expressing: +```yaml +namespaceSelector: + matchExpressions: + - key: kubernetes.io/metadata.name + operator: in + values: . +``` +
+ +##
feature
Improved control over how remote volumes are mounted using mount policies
+
+ +Mount policies, that affects how the telepresence traffic-agent shares the pod's volumes, and also how the client will mount them, can now be provided using the Helm chart value `agent.mountPolicies` or as JSON object in the workload annotation `telepresence.io/mount-policies`. A mount policy is applied to a volume or to all paths matching a path-prefix (distinguished by checking if first character is a '/'), and can be one of `Ignore`, `Local`, `Remote`, or `RemoteReadOnly`. +
+ +##
feature
List output includes workload kind.
+
+ +The output of the `telepresence list` command will now include the workload kind (deployment, replicaset, statefulset, or rollout) in all entries. +
+ +##
feature
Add ability to override the default securityContext for the Telepresence init-container
+
+ +Users can now use the Helm value `agent.initSecurityContext` to override the default securityContext for the Telepresence init-container. +
+ +##
change
Let download page use direct links to GitHub
+
+ +The download links on the release page now points directly to the assets on the download page, instead of using being routed from getambassador.io/download/tel2oss/releases. +
+ +##
change
Use telepresence.io as annotation prefix instead of telepresence.getambassador.io
+
+ +The workload and pod annotations used by Telepresence will now use the prefix `telepresence.io` instead of `telepresence.getambassador.io`. The new prefix is consistent with the prefix used by labels, and it also matches the host name of the documentation site. Annotations using the old name will still work, but warnings will be logged when they are encountered. +
+ +##
change
Make the DNS recursion check configurable and turn it off by default.
+
+ +Very few systems experience a DNS recursion lookup problem. It can only occur when the cluster runs locally and the cluster's DNS is configured to somehow use DNS server that is started by Telepresence. The check is therefore now configurable through the client setting `dns.recursionCheck`, and it is `false` by default. +
+ +##
change
Trigger the mutating webhook with Kubernetes eviction objects instead of patching workloads.
+
+ +Telepresence will now attempt to evict pods in order to trigger the traffic-agent's injection or removal, and revert to patching workloads if evictions are prevented by the pod's disruption budget. This causes a slight change in the traffic-manager RBAC, as the traffic-manager must be able to create "pod/eviction" objects. +
+ +##
change
The telepresence-agents configmap is no longer used.
+
+ +The traffic-agent configuration was moved into a pod-annotation. This avoids sync problems between the telepresence-agents (which is no no longer present) and the pods. +
+ +##
change
Drop deprecated current-cluster-id command.
+
+ +The clusterID was deprecated some time ago, and replaced by the ID of the namespace where the traffic-manager is installed. +
+ +##
bugfix
Make telepresence connect --docker work with Rancher Desktop
+
+ +Rancher Desktop will start a K3s control-plane and typically expose the Kubernetes API server at `127.0.0.1:6443`. Telepresence can connect to this cluster when running on the host, but the address is not available when connecting in docker mode. +The problem is solved by ensuring that the Kubernetes API server address used when doing a `telepresence connect --docker` is swapped from 127.0.0.1 to the internal address of the control-plane node. This works because that address is available to other docker containers, and the Kubernetes API server is configured with a certificate that accepts it. +
+ +##
bugfix
Rename charts/telepresence to charts/telepresence-oss.
+
+ +The Helm chart name "telepresence-oss" was inconsistent with its contained folder "telepresence". As a result, attempts to install the chart using an argo ApplicationSet failed. The contained folder was renamed to match the chart name. +
+ +##
bugfix
[Conflict detection between namespaced and cluster-wide install.](install/manager#namespace-collision-detection)
+
+ +The namespace conflict detection mechanism would only discover conflicts between two _namespaced_ Traffic Managers trying to manage the same namespace. This is now fixed so that all types conflicts are discovered. +
+ +##
bugfix
Don't dispatch DNS discovery queries to the cluster.
+
+ +macOS based systems will often PTR queries using nameslike `b._dns-sd._udp`, lb._dns-sd._udp`, or `db-dns-sd._udp`. Those queries are no longer dispatched to the cluster. +
+ +##
bugfix
Using the --namespace option with telepresence causes a deadlock.
+
+ +Using `telepresence list --namespace ` with a namespace different from the one that telepresence was connected to, would cause a deadlock, and then produce an empty list. +
+ +##
bugfix
Fix problem with exclude-suffix being hidden by DNS search path.
+
+ +In some situations, a name ending with an exclude-suffix like "xyz.com" would be expanded by a search path into "xyz.com.<connected namespace>" and therefore not be excluded. Instead, the name was sent to the cluster to be resolved, causing an unnecessary load on its DNS server. +
+ +## Version 2.21.3 (February 6) +##
bugfix
Using the --proxy-via flag would sometimes cause connection timeouts.
+
+ +Typically, a `telepresence connect --proxy-via =` would fail with a "deadline exceeded" message when several workloads were present in the namespace, the one targeted by the proxy-via didn't yet have an agent installed, and other workloads had an agent. This was due to a race condition in the logic for the agent-based port-forwards in the root daemon. The conditions causing this race are now eliminated. +
+ +##
bugfix
Fix panic in root daemon when using the "allow conflicting subnets" feature on macOS.
+
+ +A regression was introduced in version 2.21.0, causing a panic due to an unimplemented method in the TUN-device on macOS based clients. +
+ +##
bugfix
Ensure that annotation enabled traffic-agents are uninstall when uninstalling the traffic-manager.
+
+ +A traffic-agent injected because the workload had the inject annotation enabled would sometimes not get uninstalled when the traffic-manager was uninstalled. +
+ +## Version 2.21.2 (January 26) +##
bugfix
Fix panic when agentpf.client creates a Tunnel
+
+ +A race could occur where several requests where made to `agentpf.client.Tunnel` on a client that had errored when creating its port-forward to the agent. The implementation could handle one such requests but not several, resulting in a panic in situations where multiple simultaneous requests were made to the same client during a very short time period, +
+ +##
bugfix
Fix goroutine leak in dialer.
+
+ +The context passed to the `Tunnel` call that creates a stream for a dialer, was not cancelled when the dialer was finished, so the stream was never properly closed, leading to one dormant goroutine for each stream. +
+ +## Version 2.21.1 (December 17) +##
bugfix
[Allow ingest of serverless deployments without specifying an inject-container-ports annotation](https://github.com/telepresenceio/telepresence/issues/3741)
+
+ +The ability to intercept a workload without a service is built around the `telepresence.getambassador.io/inject-container-ports` annotation, and it was also required in order to ingest such a workload. This was counterintuitive and the requirement was removed. An ingest doesn't use a port. +
+ +##
bugfix
Upgrade module dependencies to get rid of critical vulnerability.
+
+ +Upgrade module dependencies to latest available stable. This includes upgrading golang.org/x/crypto, which had critical issues, from 0.30.0 to 0.31.0 where those issues are resolved. +
+ +## Version 2.21.0 (December 13) +##
feature
[Automatic VPN conflict avoidance](reference/vpn)
+
+ +Telepresence not only detects subnet conflicts between the cluster and workstation VPNs but also resolves them by performing network address translation to move conflicting subnets out of the way. +
+ +##
feature
[Virtual Address Translation (VNAT).](reference/vpn)
+
+ +It is now possible to use a virtual subnet without routing the affected IPs to a specific workload. A new `telepresence connect --vnat CIDR` flag was added that will perform virtual network address translation of cluster IPs. This flag is very similar to the `--proxy-via CIDR=WORKLOAD` introduced in 2.19, but without the need to specify a workload. +
+ +##
feature
[Intercepts targeting a specific container](reference/engagements/container)
+
+ +In certain scenarios, the container owning the intercepted port differs from the container the intercept targets. This port owner's sole purpose is to route traffic from the service to the intended container, often using a direct localhost connection. +This update introduces a `--container ` option to the intercept command. While this option doesn't influence the port selection, it guarantees that the environment variables and mounts propagated to the client originate from the specified container. Additionally, if the `--replace` option is used, it ensures that this container is replaced. +
+ +##
feature
[New telepresence ingest command](howtos/intercepts#ingest-your-service)
+
+ +The new `telepresence ingest` command, similar to `telepresence intercept`, provides local access to the volume mounts and environment variables of a targeted container. However, unlike `telepresence intercept`, `telepresence ingest` does not redirect traffic to the container and ensures that the mounted volumes are read-only. +An ingest requires a traffic-agent to be installed in the pods of the targeted workload. Beyond that, it's a client-side operation. This allows developers to have multiple simultaneous ingests on the same container. +
+ +##
feature
[New telepresence curl command](reference/docker-run#the-telepresence-curl-command)
+
+ +The new `telepresence curl` command runs curl from within a container. The command requires that a connection has been established using `telepresence connect --docker`, and the container that runs `curl` will share the same network as the containerized telepresence daemon. +
+ +##
feature
[New telepresence docker-run command](reference/docker-run#the-telepresence-docker-run-command)
+
+ +The new `telepresence docker-run ` requires that a connection has been established using `telepresence connect --docker` It will perform a `docker run ` and add the flag necessary to ensure that started container shares the same network as the containerized telepresence daemon. +
+ +##
feature
Mount everything read-only during intercept
+
+ +It is now possible to append ":ro" to the intercept `--mount` flag value. This ensures that all remote volumes that the intercept mounts are read-only. +
+ +##
feature
[Unify client configuration](reference/config)
+
+ +Previously, client configuration was divided between the config.yml file and a Kubernetes extension. DNS and routing settings were initially found only in the extension. However, the Helm client structure allowed entries from both. +To simplify this, we've now aligned the config.yml and Kubernetes extension with the Helm client structure. This means DNS and routing settings are now included in both. The Kubernetes extension takes precedence over the config.yml and Helm client object. +While the old-style Kubernetes extension is still supported for compatibility, it cannot be used with the new style. +
+ +##
feature
Use WebSockets for port-forward instead of the now deprecated SPDY.
+
+ +Telepresence will now use WebSockets instead of SPDY when creating port-forwards to the Kubernetes Cluster, and will fall back to SPDY when connecting to clusters that don't support SPDY. Use of the deprecated SPDY can be forced by setting `cluster.forceSPDY=true` in the `config.yml`. +See [Streaming Transitions from SPDY to WebSockets](https://kubernetes.io/blog/2024/08/20/websockets-transition/) for more information about this transition. +
+ +##
feature
Make usage data collection configurable using an extension point, and default to no-ops
+
+ +The OSS code-base will no longer report usage data to the proprietary collector at Ambassador Labs. The actual calls to the collector remain, but will be no-ops unless a proper collector client is installed using an extension point. +
+ +##
feature
[Add deployments, statefulSets, replicaSets to workloads Helm chart value](reference/engagements/sidecar#disable-workloads)
+
+ +The Helm chart value `workloads` now supports the kinds `deployments.enabled`, `statefulSets.enabled`, `replicaSets.enabled`. and `rollouts.enabled`. All except `rollouts` are enabled by default. The traffic-manager will ignore workloads, and Telepresence will not be able to intercept them, if the `enabled` of the corresponding kind is set to `false`. +
+ +##
feature
Improved command auto-completion
+
+ +The auto-completion of namespaces, services, and containers have been added where appropriate, and the default file auto completion has been removed from most commands. +
+ +##
feature
[Docker run flags --publish, --expose, and --network now work with docker mode connections](reference/docker-run#the-telepresence-docker-run-command)
+
+ +After establishing a connection to a cluster using `telepresence connect --docker`, you can run new containers that share the same network as the containerized daemon that maintains the connection. This enables seamless communication between your local development environment and the remote services. +Normally, Docker has a limitation that prevents combining a shared network configuration with custom networks and exposing ports. However, Telepresence now elegantly circumvents this limitation so that a container started with `telepresence docker-run`, `telepresence intercept --docker-run`, or `telepresence ingest --docker-run` can use flags like `--network`, `--publish`, or `--expose`. +To achieve this, Telepresence temporarily adds the necessary network to the containerized daemon. This allows the new container to join the same network. Additionally, Telepresence starts extra socat containers to handle port mapping, ensuring that the desired ports are exposed to the local environment. +
+ +##
feature
[Prevent recursion in the Telepresence Virtual Network Interface (VIF)](howtos/cluster-in-vm)
+
+ +Network problems may arise when running Kubernetes locally (e.g., Docker Desktop, Kind, Minikube, k3s), because the VIF on the host is also accessible from the cluster's nodes. A request that isn't handled by a cluster resource might be routed back into the VIF and cause a recursion. +These recursions can now be prevented by setting the client configuration property `routing.recursionBlockDuration` so that new connection attempts are temporarily blocked for a specific IP:PORT pair immediately after an initial attempt, thereby effectively ending the recursion. +
+ +##
feature
Allow Helm chart to be included as a sub-chart
+
+ +The Helm chart previously had the unnecessary restriction that the .Release.Name under which telepresence is installed is literally called "traffic-manager". This restriction was preventing telepresence from being included as a sub-chart in a parent chart called anything but "traffic-manager". This restriction has been lifted. +
+ +##
feature
Add Windows arm64 client build
+
+ +Telepresence client is now available for Windows ARM64. Updated the release workflow files in github actions to build and publish the Windows ARM64 client. +
+ +##
change
The --agents flag to telepresence uninstall is now the default.
+
+ +The `telepresence uninstall` was once capable of uninstalling the traffic-manager as well as traffic-agents. This behavior has been deprecated for some time now and in this release, the command is all about uninstalling the agents. Therefore the `--agents` flag was made redundant and whatever arguments that are given to the command must be name of workloads that have an agent installed unless the `--all-agents` is used, in which case no arguments are allowed. +
+ +##
change
Performance improvement for the telepresence list command
+
+ +The `telepresence list` command will now retrieve its data from the traffic-manager, which significantly improves its performance when used on namespaces that have a lot of workloads. +
+ +##
change
During an intercept, the local port defaults to the targeted port of the intercepted container instead of 8080.
+
+ +Telepresence mimics the environment of a target container during an intercept, so it's only natural that the default for the local port is determined by the targeted container port rather than just defaulting to 8080. +A default can still be explicitly defined using the `config.intercept.defaultPort` setting. +
+ +##
change
Move the telepresence-intercept-env configmap data into traffic-manager configmap.
+
+ +There's no need for two configmaps that store configuration data for the traffic manager. The traffic-manager configmap is also watched, so consolidating the configuration there saves some k8s API calls. +
+ +##
change
Tracing was removed.
+
+ +The ability to collect trace has been removed along with the `telepresence gather-traces` and `telepresence upload-traces` commands. The underlying code was complex and has not been well maintained since its inception in 2022. We have received no feedback on it and seen no indication that it has ever been used. +
+ +##
bugfix
Remove obsolete code checking the Docker Bridge for DNS
+
+ +The DNS resolver checked the Docker bridge for messages on Linux. This code was obsolete and caused problems when running in Codespaces. +
+ +##
bugfix
Fix telepresence connect confusion caused by /.dockerenv file
+
+ +A `/.dockerenv` will be present when running in a GitHub Codespaces environment. That doesn't mean that telepresence cannot use docker, or that the root daemon shouldn't start. +
+ +##
bugfix
Cap timeouts.connectivityCheck at 5 seconds.
+
+ +The timeout value of `timeouts.connectivityCheck` is used when checking if a cluster is already reachable without Telepresence setting up an additional network route. If it is, this timeout should be high enough to cover the delay when establishing a connection. If this delay is higher than a second, then chances are very low that the cluster already is reachable, and if it can, that all accesses to it will be very slow. In such cases, Telepresence will create its own network interface and do perform its own tunneling. +The default timeout for the check remains at 500 millisecond, which is more than sufficient for the majority of cases. +
+ +##
bugfix
Prevent that traffic-manager injects a traffic-agent into itself.
+
+ +The traffic-manager can never be a subject for an intercept, ingest, or proxy-via, because that means that it injects the traffic-agent into itself, and it is not designed to do that. A user attempting this will now see a meaningful error message. +
+ +##
bugfix
Don't include pods in the kube-system namespace when computing pod-subnets from pod IPs
+
+ +A user would normally never access pods in the `kube-system` namespace directly, and automatically including pods included there when computing the subnets will often lead to problems when running the cluster locally. This namespace is therefore now excluded in situations when the pod subnets are computed from the IPs of pods. Services in this namespace will still be available through the service subnet. +If a user should require the pod-subnet to be mapped, it can be added to the `client.routing.alsoProxy` list in the helm chart. +
+ +##
bugfix
Let routes belonging to an allowed conflict be added as a static route on Linux.
+
+ +The `allowConflicting` setting didn't always work on Linux because the conflicting subnet was just added as a link to the TUN device, and therefore didn't get subjected to routing rule used to assign priority to the given subnet. +
+ +## Version 2.20.3 (November 18) +##
bugfix
[Ensure that Telepresence works with GitHub Codespaces](https://github.com/telepresenceio/telepresence/issues/3722)
+
+ +GitHub Codespaces runs in a container, but not as root. Telepresence didn't handle this situation correctly and only started the user daemon. The root daemon was never started. +
+ +##
bugfix
[Mounts not working correctly when connected with --proxy-via](https://github.com/telepresenceio/telepresence/issues/3715)
+
+ +A mount would try to connect to the sftp/ftp server using the original (cluster side) IP although that IP was translated into a virtual IP when using `--proxy-via`. +
+ +## Version 2.20.2 (October 21) +##
bugfix
Crash in traffic-manager configured with agentInjector.enabled=false
+
+ +A traffic-manager that was installed with the Helm value `agentInjector.enabled=false` crashed when a client used the commands `telepresence version` or `telepresence status`. Those commands would call a method on the traffic-manager that panicked if no traffic-agent was present. This method will now instead return the standard `Unavailable` error code, which is expected by the caller. +
+ +## Version 2.20.1 (October 10) +##
bugfix
Some workloads missing in the telepresence list output (typically replicasets owned by rollouts).
+
+ +Version 2.20.0 introduced a regression in the `telepresence list` command, resulting in the omission of all workloads that were owned by another workload. The correct behavior is to just omit those workloads that are owned by the supported workload kinds `Deployment`, `ReplicaSet`, `StatefulSet`, and `Rollout`. Furthermore, the `Rollout` kind must only be considered supported when the Argo Rollouts feature is enabled in the traffic-manager. +
+ +##
bugfix
Allow comma separated list of daemons for the gather-logs command.
+
+ +The name of the `telepresence gather-logs` flag `--daemons` suggests that the argument can contain more than one daemon, but prior to this fix, it couldn't. It is now possible to use a comma separated list, e.g. `telepresence gather-logs --daemons root,user`. +
+ +## Version 2.20.0 (October 3) +##
feature
Add timestamp to telepresence_logs.zip filename.
+
+ +Telepresence is now capable of easily find telepresence gather-logs by certain timestamp. +
+ +##
feature
[Enable intercepts of workloads that have no service.](https://telepresence.io/docs/reference/engagements/cli#intercepting-without-a-service)
+
+ +Telepresence is now capable of intercepting workloads that have no associated service. The intercept will then target container port instead of a service port. The new behavior is enabled by adding a telepresence.getambassador.io/inject-container-ports annotation where the value is a comma separated list of port identifiers consisting of either the name or the port number of a container port, optionally suffixed with `/TCP` or `/UDP`. +
+ +##
feature
[Publish the OSS version of the telepresence Helm chart](https://artifacthub.io/packages/helm/telepresence-oss/telepresence-oss)
+
+ +The OSS version of the telepresence helm chart is now available at ghcr.io/telepresenceio/telepresence-oss, and can be installed using the command:
helm install traffic-manager oci://ghcr.io/telepresenceio/telepresence-oss --namespace ambassador --version 2.20.0 The chart documentation is published at ArtifactHUB. +
+ +##
feature
[Control the syntax of the environment file created with the intercept flag --env-file](https://telepresence.io/docs/reference/environment)
+
+ +A new --env-syntax <syntax> was introduced to allow control over the syntax of the file created when using the intercept flag --env-file <file>. Valid syntaxes are "docker", "compose", "sh", "csh", "cmd", and "ps"; where "sh", "csh", and "ps" can be suffixed with ":export". +
+ +##
feature
Add support for Argo Rollout workloads.
+
+ +Telepresence now has an opt-in support for Argo Rollout workloads. The behavior is controlled by `workloads.argoRollouts.enabled` Helm chart value. It is recommended to set the following annotation telepresence.getambassador.io/inject-traffic-agent: enabled to avoid creation of unwanted revisions. +
+ +##
bugfix
Enable intercepts of containers that bind to podIP
+
+ +In previous versions, the traffic-agent would route traffic to localhost during periods when an intercept wasn't active. This made it impossible for an application to bind to the pod's IP, and it also meant that service meshes binding to the podIP would get bypassed, both during and after an intercept had been made. This is now changed, so that the traffic-agent instead forwards non intercepted requests to the pod's IP, thereby enabling the application to either bind to localhost or to that IP. +
+ +##
change
Use ghcr.io/telepresenceio instead of docker.io/datawire for OSS images and the telemount Docker volume plugin.
+
+ +All OSS telepresence images and the telemount Docker plugin are now published at the public registry ghcr.io/telepresenceio and all references from the client and traffic-manager has been updated to use this registry instead of the one at docker.io/datawire. +
+ +##
change
Use nftables instead of iptables-legacy
+
+ +Some time ago, we introduced iptables-legacy because users had problems using Telepresence with Fly.io where nftables wasn't supported by the kernel. Fly.io has since fixed this, so Telepresence will now use nftables again. This in turn, ensures that modern systems that lack support iptables-legacy will work. +
+ +##
bugfix
Root daemon wouldn't start when sudo timeout was zero.
+
+ +The root daemon refused to start when sudo was configured with a timestamp_timeout=0. This was due to logic that first requested root privileges using a sudo call, and then relied on that these privileges were cached, so that a subsequent call using --non-interactive was guaranteed to succeed. This logic will now instead do one single sudo call, and rely solely on sudo to print an informative prompt and start the daemon in the background. +
+ +##
bugfix
Detect minikube network when connecting with --docker
+
+ +A telepresence connect --docker failed when attempting to connect to a minikube that uses a docker driver because the containerized daemon did not have access to the minikube docker network. Telepresence will now detect an attempt to connect to that network and attach it to the daemon container as needed. +
+ +## Version 2.19.1 (July 12) +##
feature
[Add brew support for the OSS version of Telepresence.](https://github.com/telepresenceio/telepresence/issues/3609)
+
+ +The Open-Source Software version of Telepresence can now be installed using the brew formula via brew install telepresenceio/telepresence/telepresence-oss. +
+ +##
feature
Add --create-namespace flag to the telepresence helm install command.
+
+ +A --create-namespace (default true) flag was added to the telepresence helm install command. No attempt will be made to create a namespace for the traffic-manager if it is explicitly set to false. The command will then fail if the namespace is missing. +
+ +##
feature
Introduce DNS fallback on Windows.
+
+ +A network.defaultDNSWithFallback config option has been introduced on Windows. It will cause the DNS-resolver to fall back to the resolver that was first in the list prior to when Telepresence establishes a connection. The option is default true since it is believed to give the best experience but can be set to false to restore the old behavior. +
+ +##
feature
[Brew now supports MacOS (amd64/arm64) / Linux (amd64)](https://github.com/datawire/homebrew-blackbird/issues/19)
+
+ +The brew formula can now dynamically support MacOS (amd64/arm64) / Linux (amd64) in a single formula +
+ +##
feature
Add ability to provide an externally-provisioned webhook secret
+
+ +Added supplied as a new option for agentInjector.certificate.method. This fully disables the generation of the Mutating Webhook's secret, allowing the chart to use the values of a pre-existing secret named agentInjector.secret.name. Previously, the install would fail when it attempted to create or update the externally-managed secret. +
+ +##
feature
Let PTR query for DNS server return the cluster domain.
+
+ +The nslookup program on Windows uses a PTR query to retrieve its displayed "Server" property. This Telepresence DNS resolver will now return the cluster domain on such a query. +
+ +##
feature
Add scheduler name to PODs templates.
+
+ +A new Helm chart value schedulerName has been added. With this feature, we are able to define some particular schedulers from Kubernetes to apply some different strategies to allocate telepresence resources, including the Traffic Manager and hooks pods. +
+ +##
bugfix
Race in traffic-agent injector when using inject annotation
+
+ +Applying multiple deployments that used the telepresence.getambassador.io/inject-traffic-agent: enabled would cause a race condition, resulting in a large number of created pods that eventually had to be deleted, or sometimes in pods that didn't contain a traffic agent. +
+ +##
bugfix
Fix configuring custom agent security context
+
+ +-> The traffic-manager helm chart will now correctly use a custom agent security context if one is provided. +
+ +## Version 2.19.0 (June 15) +##
feature
Warn when an Open Source Client connects to an Enterprise Traffic Manager.
+
+ +The difference between the OSS and the Enterprise offering is not well understood, and OSS users often install a traffic-manager using the Helm chart published at getambassador.io. This Helm chart installs an enterprise traffic-manager, which is probably not what the user would expect. Telepresence will now warn when an OSS client connects to an enterprise traffic-manager and suggest switching to an enterprise client, or use telepresence helm install to install an OSS traffic-manager. +
+ +##
feature
Add scheduler name to PODs templates.
+
+ +A new Helm chart value schedulerName has been added. With this feature, we are able to define some particular schedulers from Kubernetes to apply some different strategies to allocate telepresence resources, including the Traffic Manager and hooks pods. +
+ +##
bugfix
Improve traffic-manager performance in very large clusters.
+
+ +-> The traffic-manager will now use a shared-informer when keeping track of deployments. This will significantly reduce the load on the Kublet in large clusters and therefore lessen the risk for the traffic-manager being throttled, which can lead to other problems. +
+ +##
bugfix
Kubeconfig exec authentication failure when connecting with --docker from a WSL linux host
+
+ +Clusters like Amazon EKS often use a special authentication binary that is declared in the kubeconfig using an exec authentication strategy. This binary is normally not available inside a container. Consequently, a modified kubeconfig is used when telepresence connect --docker executes, appointing a kubeauth binary which instead retrieves the authentication from a port on the Docker host that communicates with another process outside of Docker. This process then executes the original exec command to retrieve the necessary credentials. +This setup was problematic when using WSL, because even though telepresence connect --docker was executed on a Linux host, the Docker host available from host.docker.internal that the kubeauth connected to was the Windows host running Docker Desktop. The fix for this was to use the local IP of the default route instead of host.docker.internal when running under WSL.. +
+ +##
bugfix
Fix bug in workload cache, causing endless recursion when a workload uses the same name as its owner.
+
+ +The workload cache was keyed by name and namespace, but not by kind, so a workload named the same as its owner workload would be found using the same key. This led to the workload finding itself when looking up its owner, which in turn resulted in an endless recursion when searching for the topmost owner. +
+ +##
bugfix
FailedScheduling events mentioning node availability considered fatal when waiting for agent to arrive.
+
+ +The traffic-manager considers some events as fatal when waiting for a traffic-agent to arrive after an injection has been initiated. This logic would trigger on events like "Warning FailedScheduling 0/63 nodes are available" although those events indicate a recoverable condition and kill the wait. This is now fixed so that the events are logged but the wait continues. +
+ +##
bugfix
Improve how the traffic-manager resolves DNS when no agent is installed.
+
+ +The traffic-manager is typically installed into a namespace different from the one that clients are connected to. It's therefore important that the traffic-manager adds the client's namespace when resolving single label names in situations where there are any agents to dispatch the DNS query to. +
+ +##
change
Removal of ability import legacy artifact into Helm.
+
+ +A helm install would make attempts to find manually installed artifacts and make them managed by Helm by adding the necessary labels and annotations. This was important when the Helm chart was first introduced but is far less so today, and this legacy import was therefore removed. +
+ +##
bugfix
[Docker aliases deprecation caused failure to detect Kind cluster.](https://docs.docker.com/engine/deprecated/#container-short-id-in-network-aliases-field)
+
+ +The logic for detecting if a cluster is a local Kind cluster, and therefore needs some special attention when using telepresence connect --docker, relied on the presence of Aliases in the Docker network that a Kind cluster sets up. In Docker versions from 26 and up, this value is no longer used, but the corresponding info can instead be found in the new DNSNames field. +
+ +##
bugfix
[Include svc as a top-level domain in the DNS resolver.](https://github.com/telepresenceio/telepresence/issues/2814)
+
+ +It's not uncommon that use-cases involving Kafka or other middleware use FQNs that end with "svc". The core-DNS resolver in Kubernetes can resolve such names. With this bugfix, the Telepresence DNS resolver will also be able to resolve them, and thereby remove the need to add ".svc" to the include-suffix list. +
+ +##
feature
Add ability to enable/disable the mutating webhook.
+
+ +A new Helm chart boolean value agentInjector.enable has been added that controls the agent-injector service and its associated mutating webhook. If set to false, the service, the webhook, and the secrets and certificates associated with it, will no longer be installed. +
+ +##
feature
Add ability to mount a webhook secret.
+
+ +A new Helm chart value agentInjector.certificate.accessMethod which can be set to watch (the default) or mount has been added. The mount setting is intended for clusters with policies that prevent containers from doing a get, list or watch of a Secret, but where a latency of up to 90 seconds is acceptable between the time the secret is regenerated and the agent-injector picks it up. +
+ +##
feature
Make it possible to specify ignored volume mounts using path prefix.
+
+ +Volume mounts like /var/run/secrets/kubernetes.io are not declared in the workload. Instead, they are injected during pod-creation and their names are generated. It is now possible to ignore such mounts using a matching path prefix. +
+ +##
feature
Make the telemount Docker Volume plugin configurable
+
+ +A telemount object was added to the intercept object in config.yml (or Helm value client.intercept), so that the automatic download and installation of this plugin can be fully customised. +
+ +##
feature
Add option to load the kubeconfig yaml from stdin during connect.
+
+ +This allows another process with a kubeconfig already loaded in memory to directly pass it to telepresence connect without needing a separate file. Simply use a dash "-" as the filename for the --kubeconfig flag. +
+ +##
feature
Add ability to specify agent security context.
+
+ +A new Helm chart value agent.securityContext that will allow configuring the security context of the injected traffic agent. The value can be set to a valid Kubernetes securityContext object, or can be set to an empty value ({}) to ensure the agent has no defined security context. If no value is specified, the traffic manager will set the agent's security context to the same as the first container's of the workload being injected into. +
+ +##
change
Tracing is no longer enabled by default.
+
+ +Tracing must now be enabled explicitly in order to use the telepresence gather-traces command. +
+ +##
change
Removal of timeouts that are no longer in use
+
+ +The config.yml values timeouts.agentInstall and timeouts.apply haven't been in use since versions prior to 2.6.0, when the client was responsible for installing the traffic-agent. These timeouts are now removed from the code-base, and a warning will be printed when attempts are made to use them. +
+ +##
bugfix
Search all private subnets to find one open for dnsServerSubnet
+
+ +This resolves a bug that did not test all subnets in a private range, sometimes resulting in the warning, "DNS doesn't seem to work properly." +
+ +##
bugfix
Docker aliases deprecation caused failure to detect Kind cluster.
+
+ +The logic for detecting if a cluster is a local Kind cluster, and therefore needs some special attention when using telepresence connect --docker, relied on the presence of Aliases in the Docker network that a Kind cluster sets up. In Docker versions from 26 and up, this value is no longer used, but the corresponding info can instead be found in the new DNSNames field. +
+ +##
bugfix
Creation of individual pods was blocked by the agent-injector webhook.
+
+ +An attempt to create a pod was blocked unless it was provided by a workload. Hence, commands like kubectl run -i busybox --rm --image=curlimages/curl --restart=Never -- curl echo-easy.default would be blocked from executing. +
+ +##
bugfix
Fix panic due to root daemon not running.
+
+ +If a telepresence connect was made at a time when the root daemon was not running (an abnormal condition) and a subsequent intercept was then made, a panic would occur when the port-forward to the agent was set up. This is now fixed so that the initial telepresence connect is refused unless the root daemon is running. +
+ +##
bugfix
Get rid of telemount plugin stickiness
+
+ +The datawire/telemount that is automatically downloaded and installed, would never be updated once the installation was made. Telepresence will now check for the latest release of the plugin and cache the result of that check for 24 hours. If a new version arrives, it will be installed and used. +
+ +##
bugfix
Use route instead of address for CIDRs with masks that don't allow "via"
+
+ +A CIDR with a mask that leaves less than two bits (/31 or /32 for IPv4) cannot be added as an address to the VIF, because such addresses must have bits allowing a "via" IP. +The logic was modified to allow such CIDRs to become static routes, using the VIF base address as their "via", rather than being VIF addresses in their own right. +
+ +##
bugfix
Containerized daemon created cache files owned by root
+
+ +When using telepresence connect --docker to create a containerized daemon, that daemon would sometimes create files in the cache that were owned by root, which then caused problems when connecting without the --docker flag. +
+ +##
bugfix
Remove large number of requests when traffic-manager is used in large clusters.
+
+ +The traffic-manager would make a very large number of API requests during cluster start-up or when many services were changed for other reasons. The logic that did this was refactored and the number of queries were significantly reduced. +
+ +##
bugfix
Don't patch probes on replaced containers.
+
+ +A container that is being replaced by a telepresence intercept --replace invocation will have no liveness-, readiness, nor startup-probes. Telepresence didn't take this into consideration when injecting the traffic-agent, but now it will refrain from patching symbolic port names of those probes. +
+ +##
bugfix
Don't rely on context name when deciding if a kind cluster is used.
+
+ +The code that auto-patches the kubeconfig when connecting to a kind cluster from within a docker container, relied on the context name starting with "kind-", but although all contexts created by kind have that name, the user is still free to rename it or to create other contexts using the same connection properties. The logic was therefore changed to instead look for a loopback service address. +
+ +## Version 2.18.0 (February 9) +##
feature
Include the image for the traffic-agent in the output of the version and status commands.
+
+ +The version and status commands will now output the image that the traffic-agent will be using when injected by the agent-injector. +
+ +##
feature
Custom DNS using the client DNS resolver.
+
+ +

A new telepresence connect --proxy-via CIDR=WORKLOAD flag was introduced, allowing Telepresence to translate DNS responses matching specific subnets into virtual IPs that are used locally. Those virtual IPs are then routed (with reverse translation) via the pod's of a given workload. This makes it possible to handle custom DNS servers that resolve domains into loopback IPs. The flag may also be used in cases where the cluster's subnets are in conflict with the workstation's VPN.

The CIDR can also be a symbolic name that identifies a subnet or list of subnets:

alsoAll subnets added with --also-proxy
serviceThe cluster's service subnet
podsThe cluster's pod subnets.
allAll of the above.
+
+ +##
bugfix
Ensure that agent.appProtocolStrategy is propagated correctly.
+
+ +The agent.appProtocolStrategy was inadvertently dropped when moving license related code fromm the OSS repository the repository for the Enterprise version of Telepresence. It has now been restored. +
+ +##
bugfix
Include non-default zero values in output of telepresence config view.
+
+ +The telepresence config view command will now print zero values in the output when the default for the value is non-zero. +
+ +##
bugfix
Restore ability to run the telepresence CLI in a docker container.
+
+ +The improvements made to be able to run the telepresence daemon in docker using telepresence connect --docker made it impossible to run both the CLI and the daemon in docker. This commit fixes that and also ensures that the user- and root-daemons are merged in this scenario when the container runs as root. +
+ +##
bugfix
Remote mounts when intercepting with the --replace flag.
+
+ +A telepresence intercept --replace did not correctly mount all volumes, because when the intercepted container was removed, its mounts were no longer visible to the agent-injector when it was subjected to a second invocation. The container is now kept in place, but with an image that just sleeps infinitely. +
+ +##
bugfix
Intercepting with the --replace flag will no longer require all subsequent intercepts to use --replace.
+
+ +A telepresence intercept --replace will no longer switch the mode of the intercepted workload, forcing all subsequent intercepts on that workload to use --replace until the agent is uninstalled. Instead, --replace can be used interchangeably just like any other intercept flag. +
+ +##
bugfix
Kubeconfig exec authentication with context names containing colon didn't work on Windows
+
+ +The logic added to allow the root daemon to connect directly to the cluster using the user daemon as a proxy for exec type authentication in the kube-config, didn't take into account that a context name sometimes contains the colon ":" character. That character cannot be used in filenames on windows because it is the drive letter separator. +
+ +##
bugfix
Provide agent name and tag as separate values in Helm chart
+
+ +The AGENT_IMAGE was a concatenation of the agent's name and tag. This is now changed so that the env instead contains an AGENT_IMAGE_NAME and AGENT_INAGE_TAG. The AGENT_IMAGE is removed. Also, a new env REGISTRY is added, where the registry of the traffic- manager image is provided. The AGENT_REGISTRY is no longer required and will default to REGISTRY if not set. +
+ +##
bugfix
Environment interpolation expressions were prefixed twice.
+
+ +Telepresence would sometimes prefix environment interpolation expressions in the traffic-agent twice so that an expression that looked like $(SOME_NAME) in the app-container, ended up as $(_TEL_APP_A__TEL_APP_A_SOME_NAME) in the corresponding expression in the traffic-agent. +
+ +##
bugfix
Panic in root-daemon on darwin workstations with full access to cluster network.
+
+ +A darwin machine with full access to the cluster's subnets will never create a TUN-device, and a check was missing if the device actually existed, which caused a panic in the root daemon. +
+ +##
bugfix
Show allow-conflicting-subnets in telepresence status and telepresence config view.
+
+ +The telepresence status and telepresence config view commands didn't show the allowConflictingSubnets CIDRs because the value wasn't propagated correctly to the CLI. +
+ +##
feature
It is now possible use a host-based connection and containerized connections simultaneously.
+
+ +Only one host-based connection can exist because that connection will alter the DNS to reflect the namespace of the connection. but it's now possible to create additional connections using --docker while retaining the host-based connection. +
+ +##
feature
Ability to set the hostname of a containerized daemon.
+
+ +The hostname of a containerized daemon defaults to be the container's ID in Docker. You now can override the hostname using telepresence connect --docker --hostname <a name>. +
+ +##
feature
New --multi-daemonflag to enforce a consistent structure for the status command output.
+
+ +The output of the telepresence status when using --output json or --output yaml will either show an object where the user_daemon and root_daemon are top level elements, or when multiple connections are used, an object where a connections list contains objects with those daemons. The flag --multi-daemon will enforce the latter structure even when only one daemon is connected so that the output can be parsed consistently. The reason for keeping the former structure is to retain backward compatibility with existing parsers. +
+ +##
bugfix
Make output from telepresence quit more consistent.
+
+ +A quit (without -s) just disconnects the host user and root daemons but will quit a container based daemon. The message printed was simplified to remove some have/has is/are errors caused by the difference. +
+ +##
bugfix
Fix "tls: bad certificate" errors when refreshing the mutator-webhook secret
+
+ +The agent-injector service will now refresh the secret used by the mutator-webhook each time a new connection is established, thus preventing the certificates to go out-of-sync when the secret is regenerated. +
+ +##
bugfix
Keep telepresence-agents configmap in sync with pod states.
+
+ +An intercept attempt that resulted in a timeout due to failure of injecting the traffic-agent left the telepresence-agents configmap in a state that indicated that an agent had been added, which caused problems for subsequent intercepts after the problem causing the first failure had been fixed. +
+ +##
bugfix
The telepresence status command will now report the status of all running daemons.
+
+ +A telepresence status, issued when multiple containerized daemons were active, would error with "multiple daemons are running, please select one using the --use <match> flag". This is now fixed so that the command instead reports the status of all running daemons. +
+ +##
bugfix
The telepresence version command will now report the version of all running daemons.
+
+ +A telepresence version, issued when multiple containerized daemons were active, would error with "multiple daemons are running, please select one using the --use <match> flag". This is now fixed so that the command instead reports the version of all running daemons. +
+ +##
bugfix
Multiple containerized daemons can now be disconnected using telepresence quit -s
+
+ +A telepresence quit -s, issued when multiple containerized daemons were active, would error with "multiple daemons are running, please select one using the --use <match> flag". This is now fixed so that the command instead quits all daemons. +
+ +##
bugfix
The DNS search path on Windows is now restored when Telepresence quits
+
+ +The DNS search path that Telepresence uses to simulate the DNS lookup functionality in the connected cluster namespace was not removed by a telepresence quit, resulting in connectivity problems from the workstation. Telepresence will now remove the entries that it has added to the search list when it quits. +
+ +##
bugfix
The user-daemon would sometimes get killed when used by multiple simultaneous CLI clients.
+
+ +The user-daemon would die with a fatal "fatal error: concurrent map writes" error in the connector.log, effectively killing the ongoing connection. +
+ +##
bugfix
Multiple services ports using the same target port would not get intercepted correctly.
+
+ +Intercepts didn't work when multiple service ports were using the same container port. Telepresence would think that one of the ports wasn't intercepted and therefore disable the intercept of the container port. +
+ +##
bugfix
Root daemon refuses to disconnect.
+
+ +The root daemon would sometimes hang forever when attempting to disconnect due to a deadlock in the VIF-device. +
+ +##
bugfix
Fix panic in user daemon when traffic-manager was unreachable
+
+ +The user daemon would panic if the traffic-manager was unreachable. It will now instead report a proper error to the client. +
+ +##
change
Removal of backward support for versions predating 2.6.0
+
+ +The telepresence helm installer will no longer discover and convert workloads that were modified by versions prior to 2.6.0. The traffic manager will and no longer support the muxed tunnels used in versions prior to 2.5.0. +
+ +## Version 2.17.0 (November 14) +##
feature
Additional Prometheus metrics to track intercept/connect activity
+
+ +This feature adds the following metrics to the Prometheus endpoint: connect_count, connect_active_status, intercept_count, and intercept_active_status. These are labeled by client/install_id. Additionally, the intercept_count metric has been renamed to active_intercept_count for clarity. +
+ +##
feature
Make the Telepresence client docker image configurable.
+
+ +The docker image used when running a Telepresence intercept in docker mode can now be configured using the setting images.clientImage and will default first to the value of the environment TELEPRESENCE_CLIENT_IMAGE, and then to the value preset by the telepresence binary. This configuration setting is primarily intended for testing purposes. +
+ +##
feature
Use traffic-agent port-forwards for outbound and intercepted traffic.
+
+ +The telepresence TUN-device is now capable of establishing direct port-forwards to a traffic-agent in the connected namespace. That port-forward is then used for all outbound traffic to the device, and also for all traffic that arrives from intercepted workloads. Getting rid of the extra hop via the traffic-manager improves performance and reduces the load on the traffic-manager. The feature can only be used if the client has Kubernetes port-forward permissions to the connected namespace. It can be disabled by setting cluster.agentPortForward to false in config.yml. +
+ +##
feature
Improve outbound traffic performance.
+
+ +The root-daemon now communicates directly with the traffic-manager instead of routing all outbound traffic through the user-daemon. The root-daemon uses a patched kubeconfig where exec configurations to obtain credentials are dispatched to the user-daemon. This to ensure that all authentication plugins will execute in user-space. The old behavior of routing everything through the user-daemon can be restored by setting cluster.connectFromRootDaemon to false in config.yml. +
+ +##
feature
New networking CLI flag --allow-conflicting-subnets
+
+ +telepresence connect (and other commands that kick off a connect) now accepts an --allow-conflicting-subnets CLI flag. This is equivalent to client.routing.allowConflictingSubnets in the helm chart, but can be specified at connect time. It will be appended to any configuration pushed from the traffic manager. +
+ +##
change
Warn if large version mismatch between traffic manager and client.
+
+ +Print a warning if the minor version diff between the client and the traffic manager is greater than three. +
+ +##
change
The authenticator binary was removed from the docker image.
+
+ +The authenticator binary, used when serving proxied exec kubeconfig credential retrieval, has been removed. The functionality was instead added as a subcommand to the telepresence binary. +
+ +## Version 2.16.1 (October 12) +##
feature
Add --docker-debug flag to the telepresence intercept command.
+
+ +This flag is similar to --docker-build but will start the container with more relaxed security using the docker run flags --security-opt apparmor=unconfined --cap-add SYS_PTRACE. +
+ +##
feature
Add a --export option to the telepresence connect command.
+
+ +In some situations it is necessary to make some ports available to the host from a containerized telepresence daemon. This commit adds a repeatable --expose <docker port exposure> flag to the connect command. +
+ +##
feature
Prevent agent-injector webhook from selecting from kube-xxx namespaces.
+
+ +The kube-system and kube-node-lease namespaces should not be affected by a global agent-injector webhook by default. A default namespaceSelector was therefore added to the Helm Chart agentInjector.webhook that contains a NotIn preventing those namespaces from being selected. +
+ +##
bugfix
Backward compatibility for pod template TLS annotations.
+
+ +Users of Telepresence < 2.9.0 that make use of the pod template TLS annotations were unable to upgrade because the annotation names have changed (now prefixed by "telepresence."), and the environment expansion of the annotation values was dropped. This fix restores support for the old names (while retaining the new ones) and the environment expansion. +
+ +##
security
Built with go 1.21.3
+
+ +Built Telepresence with go 1.21.3 to address CVEs. +
+ +##
bugfix
Match service selector against pod template labels
+
+ +When listing intercepts (typically by calling telepresence list) selectors of services are matched against workloads. Previously the match was made against the labels of the workload, but now they are matched against the labels pod template of the workload. Since the service would actually be matched against pods this is more correct. The most common case when this makes a difference is that statefulsets now are listed when they should. +
+ +## Version 2.16.0 (October 2) +##
bugfix
The helm sub-commands will no longer start the user daemon.
+
+ +The telepresence helm install/upgrade/uninstall commands will no longer start the telepresence user daemon because there's no need to connect to the traffic-manager in order for them to execute. +
+ +##
bugfix
Routing table race condition
+
+ +A race condition would sometimes occur when a Telepresence TUN device was deleted and another created in rapid succession that caused the routing table to reference interfaces that no longer existed. +
+ +##
bugfix
Stop lingering daemon container
+
+ +When using telepresence connect --docker, a lingering container could be present, causing errors like "The container name NN is already in use by container XX ...". When this happens, the connect logic will now give the container some time to stop and then call docker stop NN to stop it before retrying to start it. +
+ +##
bugfix
Add file locking to the Telepresence cache
+
+ +Files in the Telepresence cache are accesses by multiple processes. The processes will now use advisory locks on the files to guarantee consistency. +
+ +##
change
Lock connection to namespace
+
+ +The behavior changed so that a connected Telepresence client is bound to a namespace. The namespace can then not be changed unless the client disconnects and reconnects. A connection is also given a name. The default name is composed from <kube context name>-<namespace> but can be given explicitly when connecting using --name. The connection can optionally be identified using the option --use <name match> (only needed when docker is used and more than one connection is active). +
+ +##
change
Deprecation of global --context and --docker flags.
+
+ +The global flags --context and --docker will now be considered deprecated unless used with commands that accept the full set of Kubernetes flags (e.g. telepresence connect). +
+ +##
change
Deprecation of the --namespace flag for the intercept command.
+
+ +The --namespace flag is now deprecated for telepresence intercept command. The flag can instead be used with all commands that accept the full set of Kubernetes flags (e.g. telepresence connect). +
+ +##
change
Legacy code predating version 2.6.0 was removed.
+
+ +The telepresence code-base still contained a lot of code that would modify workloads instead of relying on the mutating webhook installer when a traffic-manager version predating version 2.6.0 was discovered. This code has now been removed. +
+ +##
feature
Add `telepresence list-namespaces` and `telepresence list-contexts` commands
+
+ +These commands can be used to check accessible namespaces and for automation. +
+ +##
change
Implicit connect warning
+
+ +A deprecation warning will be printed if a command other than telepresence connect causes an implicit connect to happen. Implicit connects will be removed in a future release. +
+ +## Version 2.15.1 (September 6) +##
security
Rebuild with go 1.21.1
+
+ +Rebuild Telepresence with go 1.21.1 to address CVEs. +
+ +##
security
Set security context for traffic agent
+
+ +Openshift users reported that the traffic agent injection was failing due to a missing security context. +
+ +## Version 2.15.0 (August 29) +##
security
Add ASLR to telepresence binaries
+
+ +ASLR hardens binary sercurity against fixed memory attacks. +
+ +##
feature
[Added client builds for arm64 architecture.](https://github.com/telepresenceio/telepresence/issues/3259)
+
+ +Updated the release workflow files in github actions to including building and publishing the client binaries for arm64 architecture. +
+ +##
bugfix
[KUBECONFIG env var can now be used with the docker mode.](https://github.com/telepresenceio/telepresence/pull/3300)
+
+ +If provided, the KUBECONFIG environment variable was passed to the kubeauth-foreground service as a parameter. However, since it didn't exist, the CLI was throwing an error when using telepresence connect --docker. +
+ +##
bugfix
[Fix deadlock while watching workloads](https://github.com/telepresenceio/telepresence/pull/3298)
+
+ +The telepresence list --output json-stream wasn't releasing the session's lock after being stopped, including with a telepresence quit. The user could be blocked as a result. +
+ +##
bugfix
Change json output of telepresence list command
+
+ +Replace deprecated info in the JSON output of the telepresence list command. +
+ +## Version 2.14.4 (August 21) +##
bugfix
[Nil pointer exception when upgrading the traffic-manager.](https://github.com/telepresenceio/telepresence/issues/3313)
+
+ +Upgrading the traffic-manager using telepresence helm upgrade would sometimes result in a helm error message executing "telepresence/templates/intercept-env-configmap.yaml" at <.Values.intercept.environment.excluded>: nil pointer evaluating interface {}.excluded" +
+ +## Version 2.14.2 (July 26) +##
bugfix
[Telepresence now use the OSS agent in its latest version by default.](https://github.com/telepresenceio/telepresence/issues/3271)
+
+ +The traffic manager admin was forced to set it manually during the chart installation. +
+ +## Version 2.14.1 (July 7) +##
feature
Envoy's http idle timout is now configurable.
+
+ +A new agent.helm.httpIdleTimeout setting was added to the Helm chart that controls the proprietary Traffic agent's http idle timeout. The default of one hour, which in some situations would cause a lot of resource consuming and lingering connections, was changed to 70 seconds. +
+ +##
feature
Add more gauges to the Traffic manager's Prometheus client.
+
+ +Several gauges were added to the Prometheus client to make it easier to monitor what the Traffic manager spends resources on. +
+ +##
feature
Agent Pull Policy
+
+ +Add option to set traffic agent pull policy in helm chart. +
+ +##
bugfix
Resource leak in the Traffic manager.
+
+ +Fixes a resource leak in the Traffic manager caused by lingering tunnels between the clients and Traffic agents. The tunnels are now closed correctly when terminated from the side that created them. +
+ +##
bugfix
[Fixed problem setting traffic manager namespace using the kubeconfig extension.](https://www.telepresence.io/docs/reference/config#manager)
+
+ +Fixes a regression introduced in version 2.10.5, making it impossible to set the traffic-manager namespace using the telepresence.io kubeconfig extension. +
+ +## Version 2.14.0 (June 12) +##
feature
[DNS configuration now supports excludes and mappings.](https://github.com/telepresenceio/telepresence/pull/3172)
+
+ +The DNS configuration now supports two new fields, excludes and mappings. The excludes field allows you to exclude a given list of hostnames from resolution, while the mappings field can be used to resolve a hostname with another. +
+ +##
feature
Added the ability to exclude environment variables
+
+ +Added a new config map that can take an array of environment variables that will then be excluded from an intercept that retrieves the environment of a pod. +
+ +##
bugfix
Fixed traffic-agent backward incompatibility issue causing lack of remote mounts
+
+ +A traffic-agent of version 2.13.3 (or 1.13.15) would not propagate the directories under /var/run/secrets when used with a traffic manager older than 2.13.3. +
+ +##
bugfix
[Fixed race condition causing segfaults on rare occasions when a tunnel stream timed out.](https://github.com/telepresenceio/telepresence/pull/2963)
+
+ +A context cancellation could sometimes be trapped in a stream reader, causing it to incorrectly return an undefined message which in turn caused the parent reader to panic on a nil pointer reference. +
+ +##
change
Routing conflict reporting.
+
+ +Telepresence will now attempt to detect and report routing conflicts with other running VPN software on client machines. There is a new configuration flag that can be tweaked to allow certain CIDRs to be overridden by Telepresence. +
+ +##
change
test-vpn command deprecated
+
+ +Running telepresence test-vpn will now print a deprecation warning and exit. The command will be removed in a future release. Instead, please configure telepresence for your VPN's routes. +
+ +## Version 2.13.3 (May 25) +##
feature
[Add imagePullSecrets to hooks](https://github.com/telepresenceio/telepresence/pull/3079)
+
+ +Add .Values.hooks.curl.imagePullSecrets and .Values.hooks curl.imagePullSecrets to Helm values. +
+ +##
change
Change reinvocation policy to Never for the mutating webhook
+
+ +The default setting of the reinvocationPolicy for the mutating webhook dealing with agent injections changed from Never to IfNeeded. +
+ +##
bugfix
[Fix mounting fail of IAM roles for service accounts web identity token](https://github.com/telepresenceio/telepresence/issues/3166)
+
+ +The eks.amazonaws.com/serviceaccount volume injected by EKS is now exported and remotely mounted during an intercept. +
+ +##
bugfix
[Correct namespace selector for cluster versions with non-numeric characters](https://github.com/telepresenceio/telepresence/pull/3184)
+
+ +The mutating webhook now correctly applies the namespace selector even if the cluster version contains non-numeric characters. For example, it can now handle versions such as Major:"1", Minor:"22+". +
+ +##
bugfix
[Enable IPv6 on the telepresence docker network](https://github.com/telepresenceio/telepresence/issues/3179)
+
+ +The "telepresence" Docker network will now propagate DNS AAAA queries to the Telepresence DNS resolver when it runs in a Docker container. +
+ +##
bugfix
[Fix the crash when intercepting with --local-only and --docker-run](https://github.com/telepresenceio/telepresence/issues/3171)
+
+ +Running telepresence intercept --local-only --docker-run no longer results in a panic. +
+ +##
bugfix
[Fix incorrect error message with local-only mounts](https://github.com/telepresenceio/telepresence/issues/3171)
+
+ +Running telepresence intercept --local-only --mount false no longer results in an incorrect error message saying "a local-only intercept cannot have mounts". +
+ +##
bugfix
[specify port in hook urls](https://github.com/telepresenceio/telepresence/pull/3161)
+
+ +The helm chart now correctly handles custom agentInjector.webhook.port that was not being set in hook URLs. +
+ +##
bugfix
Fix wrong default value for disableGlobal and agentArrival
+
+ +Params .intercept.disableGlobal and .timeouts.agentArrival are now correctly honored. +
+ diff --git a/versioned_docs/version-2.22/release-notes.mdx b/versioned_docs/version-2.22/release-notes.mdx new file mode 100644 index 00000000..8b9adbb2 --- /dev/null +++ b/versioned_docs/version-2.22/release-notes.mdx @@ -0,0 +1,1195 @@ +--- +title: Release Notes +--- + +import { Note, Title, Body } from '@site/src/components/ReleaseNotes' + +[comment]: # (Code generated by relnotesgen. DO NOT EDIT.) + +# Telepresence Release Notes +## Version 2.22.0 (March 14) + + New telepresence replace command. + +The new `telepresence replace` command simplifies and clarifies container replacement. + +Previously, the `--replace` flag within the `telepresence intercept` command was used to replace containers. +However, this approach introduced inconsistencies and limitations: + +* **Confusion:** Using a flag to modify the core function of a command designed for traffic interception led + to ambiguity. +* **Inaccurate Behavior:** Replacement was not possible when no incoming traffic was intercepted, as the + command's design focused on traffic routing. + +To address these issues, the `--replace` flag within `telepresence intercept` has been deprecated. The new +`telepresence replace` command provides a dedicated and consistent method for replacing containers, enhancing +clarity and reliability. + +Key differences between `replace` and `intercept`: + +1. **Scope:** The `replace` command targets and affects an entire container, impacting all its traffic, while + an `intercept` targets specific services and/or service/container ports. +2. **Port Declarations:** Remote ports specified using the `--port` flag are container ports. +3. **No Default Port:** A `replace` can occur without intercepting any ports. +4. **Container State:** During a `replace`, the original container is no longer active within the cluster. + +The deprecated `--replace` flag still works, but is hidden from the `telepresence intercept` command help, and +will print a deprecation warning when used. + + + + Add json-schema for the Telepresence Helm Chart + +Helm can validate a chart using a json-schema using the command `helm lint`, and this schema can be part of the actual Helm chart. The telepresence-oss Helm chart now includes such a schema, and a new `telepresence helm lint` command was added so that linting can be performed using the embedded chart. + + + + No dormant container present during replace. + +Telepresence will no longer inject a dormant container during a `telepresence replace` operation. Instead, the +Traffic Agent now directly serves as the replacement container, eliminating the need to forward traffic to the +original application container. This simplification offers several advantages when using the `--replace` flag: + + - **Removal of the init-container:** The need for a separate init-container is no longer necessary. + - **Elimination of port renames:** Port renames within the intercepted pod are no longer required. + + + + One single invocation of the Telepresence intercept command can now intercept multiple ports. + +It is now possible to intercept multiple ports with one single invocation of `telepresence intercept` by just repeating the `--port` flag. + + + + Unify how Traffic Manager selects namespaces + +The definition of what namespaces that a Traffic Manager would manage use was scattered into several Helm +chart values, such as `manager.Rbac.namespaces`, `client.Rbac.namespaces`, and +`agentInjector.webhook.namespaceSelector`. The definition is now unified to the mutual exclusive top-level +Helm chart values `namespaces` and `namespaceSelector`. + +The `namespaces` value is just for convenience and a short form of expressing: +```yaml +namespaceSelector: + matchExpressions: + - key: kubernetes.io/metadata.name + operator: in + values: . +``` + + + + Improved control over how remote volumes are mounted using mount policies + +Mount policies, that affects how the telepresence traffic-agent shares the pod's volumes, and also how the client will mount them, can now be provided using the Helm chart value `agent.mountPolicies` or as JSON object in the workload annotation `telepresence.io/mount-policies`. A mount policy is applied to a volume or to all paths matching a path-prefix (distinguished by checking if first character is a '/'), and can be one of `Ignore`, `Local`, `Remote`, or `RemoteReadOnly`. + + + + List output includes workload kind. + +The output of the `telepresence list` command will now include the workload kind (deployment, replicaset, statefulset, or rollout) in all entries. + + + + Add ability to override the default securityContext for the Telepresence init-container + +Users can now use the Helm value `agent.initSecurityContext` to override the default securityContext for the Telepresence init-container. + + + + Let download page use direct links to GitHub + +The download links on the release page now points directly to the assets on the download page, instead of using being routed from getambassador.io/download/tel2oss/releases. + + + + Use telepresence.io as annotation prefix instead of telepresence.getambassador.io + +The workload and pod annotations used by Telepresence will now use the prefix `telepresence.io` instead of `telepresence.getambassador.io`. The new prefix is consistent with the prefix used by labels, and it also matches the host name of the documentation site. Annotations using the old name will still work, but warnings will be logged when they are encountered. + + + + Make the DNS recursion check configurable and turn it off by default. + +Very few systems experience a DNS recursion lookup problem. It can only occur when the cluster runs locally and the cluster's DNS is configured to somehow use DNS server that is started by Telepresence. The check is therefore now configurable through the client setting `dns.recursionCheck`, and it is `false` by default. + + + + Trigger the mutating webhook with Kubernetes eviction objects instead of patching workloads. + +Telepresence will now attempt to evict pods in order to trigger the traffic-agent's injection or removal, and revert to patching workloads if evictions are prevented by the pod's disruption budget. This causes a slight change in the traffic-manager RBAC, as the traffic-manager must be able to create "pod/eviction" objects. + + + + The telepresence-agents configmap is no longer used. + +The traffic-agent configuration was moved into a pod-annotation. This avoids sync problems between the telepresence-agents (which is no no longer present) and the pods. + + + + Drop deprecated current-cluster-id command. + +The clusterID was deprecated some time ago, and replaced by the ID of the namespace where the traffic-manager is installed. + + + + Make telepresence connect --docker work with Rancher Desktop + +Rancher Desktop will start a K3s control-plane and typically expose the Kubernetes API server at `127.0.0.1:6443`. Telepresence can connect to this cluster when running on the host, but the address is not available when connecting in docker mode. +The problem is solved by ensuring that the Kubernetes API server address used when doing a `telepresence connect --docker` is swapped from 127.0.0.1 to the internal address of the control-plane node. This works because that address is available to other docker containers, and the Kubernetes API server is configured with a certificate that accepts it. + + + + Rename charts/telepresence to charts/telepresence-oss. + +The Helm chart name "telepresence-oss" was inconsistent with its contained folder "telepresence". As a result, attempts to install the chart using an argo ApplicationSet failed. The contained folder was renamed to match the chart name. + + + + Conflict detection between namespaced and cluster-wide install. + +The namespace conflict detection mechanism would only discover conflicts between two _namespaced_ Traffic Managers trying to manage the same namespace. This is now fixed so that all types conflicts are discovered. + + + + Don't dispatch DNS discovery queries to the cluster. + +macOS based systems will often PTR queries using nameslike `b._dns-sd._udp`, lb._dns-sd._udp`, or `db-dns-sd._udp`. Those queries are no longer dispatched to the cluster. + + + + Using the --namespace option with telepresence causes a deadlock. + +Using `telepresence list --namespace ` with a namespace different from the one that telepresence was connected to, would cause a deadlock, and then produce an empty list. + + + + Fix problem with exclude-suffix being hidden by DNS search path. + +In some situations, a name ending with an exclude-suffix like "xyz.com" would be expanded by a search path into "xyz.com.<connected namespace>" and therefore not be excluded. Instead, the name was sent to the cluster to be resolved, causing an unnecessary load on its DNS server. + + +## Version 2.21.3 (February 6) + + Using the --proxy-via flag would sometimes cause connection timeouts. + +Typically, a `telepresence connect --proxy-via =` would fail with a "deadline exceeded" message when several workloads were present in the namespace, the one targeted by the proxy-via didn't yet have an agent installed, and other workloads had an agent. This was due to a race condition in the logic for the agent-based port-forwards in the root daemon. The conditions causing this race are now eliminated. + + + + Fix panic in root daemon when using the "allow conflicting subnets" feature on macOS. + +A regression was introduced in version 2.21.0, causing a panic due to an unimplemented method in the TUN-device on macOS based clients. + + + + Ensure that annotation enabled traffic-agents are uninstall when uninstalling the traffic-manager. + +A traffic-agent injected because the workload had the inject annotation enabled would sometimes not get uninstalled when the traffic-manager was uninstalled. + + +## Version 2.21.2 (January 26) + + Fix panic when agentpf.client creates a Tunnel + +A race could occur where several requests where made to `agentpf.client.Tunnel` on a client that had errored when creating its port-forward to the agent. The implementation could handle one such requests but not several, resulting in a panic in situations where multiple simultaneous requests were made to the same client during a very short time period, + + + + Fix goroutine leak in dialer. + +The context passed to the `Tunnel` call that creates a stream for a dialer, was not cancelled when the dialer was finished, so the stream was never properly closed, leading to one dormant goroutine for each stream. + + +## Version 2.21.1 (December 17) + + Allow ingest of serverless deployments without specifying an inject-container-ports annotation + +The ability to intercept a workload without a service is built around the `telepresence.getambassador.io/inject-container-ports` annotation, and it was also required in order to ingest such a workload. This was counterintuitive and the requirement was removed. An ingest doesn't use a port. + + + + Upgrade module dependencies to get rid of critical vulnerability. + +Upgrade module dependencies to latest available stable. This includes upgrading golang.org/x/crypto, which had critical issues, from 0.30.0 to 0.31.0 where those issues are resolved. + + +## Version 2.21.0 (December 13) + + Automatic VPN conflict avoidance + +Telepresence not only detects subnet conflicts between the cluster and workstation VPNs but also resolves them by performing network address translation to move conflicting subnets out of the way. + + + + Virtual Address Translation (VNAT). + +It is now possible to use a virtual subnet without routing the affected IPs to a specific workload. A new `telepresence connect --vnat CIDR` flag was added that will perform virtual network address translation of cluster IPs. This flag is very similar to the `--proxy-via CIDR=WORKLOAD` introduced in 2.19, but without the need to specify a workload. + + + + Intercepts targeting a specific container + +In certain scenarios, the container owning the intercepted port differs from the container the intercept targets. This port owner's sole purpose is to route traffic from the service to the intended container, often using a direct localhost connection. +This update introduces a `--container ` option to the intercept command. While this option doesn't influence the port selection, it guarantees that the environment variables and mounts propagated to the client originate from the specified container. Additionally, if the `--replace` option is used, it ensures that this container is replaced. + + + + New telepresence ingest command + +The new `telepresence ingest` command, similar to `telepresence intercept`, provides local access to the volume mounts and environment variables of a targeted container. However, unlike `telepresence intercept`, `telepresence ingest` does not redirect traffic to the container and ensures that the mounted volumes are read-only. +An ingest requires a traffic-agent to be installed in the pods of the targeted workload. Beyond that, it's a client-side operation. This allows developers to have multiple simultaneous ingests on the same container. + + + + New telepresence curl command + +The new `telepresence curl` command runs curl from within a container. The command requires that a connection has been established using `telepresence connect --docker`, and the container that runs `curl` will share the same network as the containerized telepresence daemon. + + + + New telepresence docker-run command + +The new `telepresence docker-run ` requires that a connection has been established using `telepresence connect --docker` It will perform a `docker run ` and add the flag necessary to ensure that started container shares the same network as the containerized telepresence daemon. + + + + Mount everything read-only during intercept + +It is now possible to append ":ro" to the intercept `--mount` flag value. This ensures that all remote volumes that the intercept mounts are read-only. + + + + Unify client configuration + +Previously, client configuration was divided between the config.yml file and a Kubernetes extension. DNS and routing settings were initially found only in the extension. However, the Helm client structure allowed entries from both. +To simplify this, we've now aligned the config.yml and Kubernetes extension with the Helm client structure. This means DNS and routing settings are now included in both. The Kubernetes extension takes precedence over the config.yml and Helm client object. +While the old-style Kubernetes extension is still supported for compatibility, it cannot be used with the new style. + + + + Use WebSockets for port-forward instead of the now deprecated SPDY. + +Telepresence will now use WebSockets instead of SPDY when creating port-forwards to the Kubernetes Cluster, and will fall back to SPDY when connecting to clusters that don't support SPDY. Use of the deprecated SPDY can be forced by setting `cluster.forceSPDY=true` in the `config.yml`. +See [Streaming Transitions from SPDY to WebSockets](https://kubernetes.io/blog/2024/08/20/websockets-transition/) for more information about this transition. + + + + Make usage data collection configurable using an extension point, and default to no-ops + +The OSS code-base will no longer report usage data to the proprietary collector at Ambassador Labs. The actual calls to the collector remain, but will be no-ops unless a proper collector client is installed using an extension point. + + + + Add deployments, statefulSets, replicaSets to workloads Helm chart value + +The Helm chart value `workloads` now supports the kinds `deployments.enabled`, `statefulSets.enabled`, `replicaSets.enabled`. and `rollouts.enabled`. All except `rollouts` are enabled by default. The traffic-manager will ignore workloads, and Telepresence will not be able to intercept them, if the `enabled` of the corresponding kind is set to `false`. + + + + Improved command auto-completion + +The auto-completion of namespaces, services, and containers have been added where appropriate, and the default file auto completion has been removed from most commands. + + + + Docker run flags --publish, --expose, and --network now work with docker mode connections + +After establishing a connection to a cluster using `telepresence connect --docker`, you can run new containers that share the same network as the containerized daemon that maintains the connection. This enables seamless communication between your local development environment and the remote services. +Normally, Docker has a limitation that prevents combining a shared network configuration with custom networks and exposing ports. However, Telepresence now elegantly circumvents this limitation so that a container started with `telepresence docker-run`, `telepresence intercept --docker-run`, or `telepresence ingest --docker-run` can use flags like `--network`, `--publish`, or `--expose`. +To achieve this, Telepresence temporarily adds the necessary network to the containerized daemon. This allows the new container to join the same network. Additionally, Telepresence starts extra socat containers to handle port mapping, ensuring that the desired ports are exposed to the local environment. + + + + Prevent recursion in the Telepresence Virtual Network Interface (VIF) + +Network problems may arise when running Kubernetes locally (e.g., Docker Desktop, Kind, Minikube, k3s), because the VIF on the host is also accessible from the cluster's nodes. A request that isn't handled by a cluster resource might be routed back into the VIF and cause a recursion. +These recursions can now be prevented by setting the client configuration property `routing.recursionBlockDuration` so that new connection attempts are temporarily blocked for a specific IP:PORT pair immediately after an initial attempt, thereby effectively ending the recursion. + + + + Allow Helm chart to be included as a sub-chart + +The Helm chart previously had the unnecessary restriction that the .Release.Name under which telepresence is installed is literally called "traffic-manager". This restriction was preventing telepresence from being included as a sub-chart in a parent chart called anything but "traffic-manager". This restriction has been lifted. + + + + Add Windows arm64 client build + +Telepresence client is now available for Windows ARM64. Updated the release workflow files in github actions to build and publish the Windows ARM64 client. + + + + The --agents flag to telepresence uninstall is now the default. + +The `telepresence uninstall` was once capable of uninstalling the traffic-manager as well as traffic-agents. This behavior has been deprecated for some time now and in this release, the command is all about uninstalling the agents. Therefore the `--agents` flag was made redundant and whatever arguments that are given to the command must be name of workloads that have an agent installed unless the `--all-agents` is used, in which case no arguments are allowed. + + + + Performance improvement for the telepresence list command + +The `telepresence list` command will now retrieve its data from the traffic-manager, which significantly improves its performance when used on namespaces that have a lot of workloads. + + + + During an intercept, the local port defaults to the targeted port of the intercepted container instead of 8080. + +Telepresence mimics the environment of a target container during an intercept, so it's only natural that the default for the local port is determined by the targeted container port rather than just defaulting to 8080. +A default can still be explicitly defined using the `config.intercept.defaultPort` setting. + + + + Move the telepresence-intercept-env configmap data into traffic-manager configmap. + +There's no need for two configmaps that store configuration data for the traffic manager. The traffic-manager configmap is also watched, so consolidating the configuration there saves some k8s API calls. + + + + Tracing was removed. + +The ability to collect trace has been removed along with the `telepresence gather-traces` and `telepresence upload-traces` commands. The underlying code was complex and has not been well maintained since its inception in 2022. We have received no feedback on it and seen no indication that it has ever been used. + + + + Remove obsolete code checking the Docker Bridge for DNS + +The DNS resolver checked the Docker bridge for messages on Linux. This code was obsolete and caused problems when running in Codespaces. + + + + Fix telepresence connect confusion caused by /.dockerenv file + +A `/.dockerenv` will be present when running in a GitHub Codespaces environment. That doesn't mean that telepresence cannot use docker, or that the root daemon shouldn't start. + + + + Cap timeouts.connectivityCheck at 5 seconds. + +The timeout value of `timeouts.connectivityCheck` is used when checking if a cluster is already reachable without Telepresence setting up an additional network route. If it is, this timeout should be high enough to cover the delay when establishing a connection. If this delay is higher than a second, then chances are very low that the cluster already is reachable, and if it can, that all accesses to it will be very slow. In such cases, Telepresence will create its own network interface and do perform its own tunneling. +The default timeout for the check remains at 500 millisecond, which is more than sufficient for the majority of cases. + + + + Prevent that traffic-manager injects a traffic-agent into itself. + +The traffic-manager can never be a subject for an intercept, ingest, or proxy-via, because that means that it injects the traffic-agent into itself, and it is not designed to do that. A user attempting this will now see a meaningful error message. + + + + Don't include pods in the kube-system namespace when computing pod-subnets from pod IPs + +A user would normally never access pods in the `kube-system` namespace directly, and automatically including pods included there when computing the subnets will often lead to problems when running the cluster locally. This namespace is therefore now excluded in situations when the pod subnets are computed from the IPs of pods. Services in this namespace will still be available through the service subnet. +If a user should require the pod-subnet to be mapped, it can be added to the `client.routing.alsoProxy` list in the helm chart. + + + + Let routes belonging to an allowed conflict be added as a static route on Linux. + +The `allowConflicting` setting didn't always work on Linux because the conflicting subnet was just added as a link to the TUN device, and therefore didn't get subjected to routing rule used to assign priority to the given subnet. + + +## Version 2.20.3 (November 18) + + Ensure that Telepresence works with GitHub Codespaces + +GitHub Codespaces runs in a container, but not as root. Telepresence didn't handle this situation correctly and only started the user daemon. The root daemon was never started. + + + + Mounts not working correctly when connected with --proxy-via + +A mount would try to connect to the sftp/ftp server using the original (cluster side) IP although that IP was translated into a virtual IP when using `--proxy-via`. + + +## Version 2.20.2 (October 21) + + Crash in traffic-manager configured with agentInjector.enabled=false + +A traffic-manager that was installed with the Helm value `agentInjector.enabled=false` crashed when a client used the commands `telepresence version` or `telepresence status`. Those commands would call a method on the traffic-manager that panicked if no traffic-agent was present. This method will now instead return the standard `Unavailable` error code, which is expected by the caller. + + +## Version 2.20.1 (October 10) + + Some workloads missing in the telepresence list output (typically replicasets owned by rollouts). + +Version 2.20.0 introduced a regression in the `telepresence list` command, resulting in the omission of all workloads that were owned by another workload. The correct behavior is to just omit those workloads that are owned by the supported workload kinds `Deployment`, `ReplicaSet`, `StatefulSet`, and `Rollout`. Furthermore, the `Rollout` kind must only be considered supported when the Argo Rollouts feature is enabled in the traffic-manager. + + + + Allow comma separated list of daemons for the gather-logs command. + +The name of the `telepresence gather-logs` flag `--daemons` suggests that the argument can contain more than one daemon, but prior to this fix, it couldn't. It is now possible to use a comma separated list, e.g. `telepresence gather-logs --daemons root,user`. + + +## Version 2.20.0 (October 3) + + Add timestamp to telepresence_logs.zip filename. + +Telepresence is now capable of easily find telepresence gather-logs by certain timestamp. + + + + Enable intercepts of workloads that have no service. + +Telepresence is now capable of intercepting workloads that have no associated service. The intercept will then target container port instead of a service port. The new behavior is enabled by adding a telepresence.getambassador.io/inject-container-ports annotation where the value is a comma separated list of port identifiers consisting of either the name or the port number of a container port, optionally suffixed with `/TCP` or `/UDP`. + + + + Publish the OSS version of the telepresence Helm chart + +The OSS version of the telepresence helm chart is now available at ghcr.io/telepresenceio/telepresence-oss, and can be installed using the command:
helm install traffic-manager oci://ghcr.io/telepresenceio/telepresence-oss --namespace ambassador --version 2.20.0 The chart documentation is published at ArtifactHUB. + +
+ + Control the syntax of the environment file created with the intercept flag --env-file + +A new --env-syntax <syntax> was introduced to allow control over the syntax of the file created when using the intercept flag --env-file <file>. Valid syntaxes are "docker", "compose", "sh", "csh", "cmd", and "ps"; where "sh", "csh", and "ps" can be suffixed with ":export". + + + + Add support for Argo Rollout workloads. + +Telepresence now has an opt-in support for Argo Rollout workloads. The behavior is controlled by `workloads.argoRollouts.enabled` Helm chart value. It is recommended to set the following annotation telepresence.getambassador.io/inject-traffic-agent: enabled to avoid creation of unwanted revisions. + + + + Enable intercepts of containers that bind to podIP + +In previous versions, the traffic-agent would route traffic to localhost during periods when an intercept wasn't active. This made it impossible for an application to bind to the pod's IP, and it also meant that service meshes binding to the podIP would get bypassed, both during and after an intercept had been made. This is now changed, so that the traffic-agent instead forwards non intercepted requests to the pod's IP, thereby enabling the application to either bind to localhost or to that IP. + + + + Use ghcr.io/telepresenceio instead of docker.io/datawire for OSS images and the telemount Docker volume plugin. + +All OSS telepresence images and the telemount Docker plugin are now published at the public registry ghcr.io/telepresenceio and all references from the client and traffic-manager has been updated to use this registry instead of the one at docker.io/datawire. + + + + Use nftables instead of iptables-legacy + +Some time ago, we introduced iptables-legacy because users had problems using Telepresence with Fly.io where nftables wasn't supported by the kernel. Fly.io has since fixed this, so Telepresence will now use nftables again. This in turn, ensures that modern systems that lack support iptables-legacy will work. + + + + Root daemon wouldn't start when sudo timeout was zero. + +The root daemon refused to start when sudo was configured with a timestamp_timeout=0. This was due to logic that first requested root privileges using a sudo call, and then relied on that these privileges were cached, so that a subsequent call using --non-interactive was guaranteed to succeed. This logic will now instead do one single sudo call, and rely solely on sudo to print an informative prompt and start the daemon in the background. + + + + Detect minikube network when connecting with --docker + +A telepresence connect --docker failed when attempting to connect to a minikube that uses a docker driver because the containerized daemon did not have access to the minikube docker network. Telepresence will now detect an attempt to connect to that network and attach it to the daemon container as needed. + + +## Version 2.19.1 (July 12) + + Add brew support for the OSS version of Telepresence. + +The Open-Source Software version of Telepresence can now be installed using the brew formula via brew install telepresenceio/telepresence/telepresence-oss. + + + + Add --create-namespace flag to the telepresence helm install command. + +A --create-namespace (default true) flag was added to the telepresence helm install command. No attempt will be made to create a namespace for the traffic-manager if it is explicitly set to false. The command will then fail if the namespace is missing. + + + + Introduce DNS fallback on Windows. + +A network.defaultDNSWithFallback config option has been introduced on Windows. It will cause the DNS-resolver to fall back to the resolver that was first in the list prior to when Telepresence establishes a connection. The option is default true since it is believed to give the best experience but can be set to false to restore the old behavior. + + + + Brew now supports MacOS (amd64/arm64) / Linux (amd64) + +The brew formula can now dynamically support MacOS (amd64/arm64) / Linux (amd64) in a single formula + + + + Add ability to provide an externally-provisioned webhook secret + +Added supplied as a new option for agentInjector.certificate.method. This fully disables the generation of the Mutating Webhook's secret, allowing the chart to use the values of a pre-existing secret named agentInjector.secret.name. Previously, the install would fail when it attempted to create or update the externally-managed secret. + + + + Let PTR query for DNS server return the cluster domain. + +The nslookup program on Windows uses a PTR query to retrieve its displayed "Server" property. This Telepresence DNS resolver will now return the cluster domain on such a query. + + + + Add scheduler name to PODs templates. + +A new Helm chart value schedulerName has been added. With this feature, we are able to define some particular schedulers from Kubernetes to apply some different strategies to allocate telepresence resources, including the Traffic Manager and hooks pods. + + + + Race in traffic-agent injector when using inject annotation + +Applying multiple deployments that used the telepresence.getambassador.io/inject-traffic-agent: enabled would cause a race condition, resulting in a large number of created pods that eventually had to be deleted, or sometimes in pods that didn't contain a traffic agent. + + + + Fix configuring custom agent security context + +-> The traffic-manager helm chart will now correctly use a custom agent security context if one is provided. + + +## Version 2.19.0 (June 15) + + Warn when an Open Source Client connects to an Enterprise Traffic Manager. + +The difference between the OSS and the Enterprise offering is not well understood, and OSS users often install a traffic-manager using the Helm chart published at getambassador.io. This Helm chart installs an enterprise traffic-manager, which is probably not what the user would expect. Telepresence will now warn when an OSS client connects to an enterprise traffic-manager and suggest switching to an enterprise client, or use telepresence helm install to install an OSS traffic-manager. + + + + Add scheduler name to PODs templates. + +A new Helm chart value schedulerName has been added. With this feature, we are able to define some particular schedulers from Kubernetes to apply some different strategies to allocate telepresence resources, including the Traffic Manager and hooks pods. + + + + Improve traffic-manager performance in very large clusters. + +-> The traffic-manager will now use a shared-informer when keeping track of deployments. This will significantly reduce the load on the Kublet in large clusters and therefore lessen the risk for the traffic-manager being throttled, which can lead to other problems. + + + + Kubeconfig exec authentication failure when connecting with --docker from a WSL linux host + +Clusters like Amazon EKS often use a special authentication binary that is declared in the kubeconfig using an exec authentication strategy. This binary is normally not available inside a container. Consequently, a modified kubeconfig is used when telepresence connect --docker executes, appointing a kubeauth binary which instead retrieves the authentication from a port on the Docker host that communicates with another process outside of Docker. This process then executes the original exec command to retrieve the necessary credentials. +This setup was problematic when using WSL, because even though telepresence connect --docker was executed on a Linux host, the Docker host available from host.docker.internal that the kubeauth connected to was the Windows host running Docker Desktop. The fix for this was to use the local IP of the default route instead of host.docker.internal when running under WSL.. + + + + Fix bug in workload cache, causing endless recursion when a workload uses the same name as its owner. + +The workload cache was keyed by name and namespace, but not by kind, so a workload named the same as its owner workload would be found using the same key. This led to the workload finding itself when looking up its owner, which in turn resulted in an endless recursion when searching for the topmost owner. + + + + FailedScheduling events mentioning node availability considered fatal when waiting for agent to arrive. + +The traffic-manager considers some events as fatal when waiting for a traffic-agent to arrive after an injection has been initiated. This logic would trigger on events like "Warning FailedScheduling 0/63 nodes are available" although those events indicate a recoverable condition and kill the wait. This is now fixed so that the events are logged but the wait continues. + + + + Improve how the traffic-manager resolves DNS when no agent is installed. + +The traffic-manager is typically installed into a namespace different from the one that clients are connected to. It's therefore important that the traffic-manager adds the client's namespace when resolving single label names in situations where there are any agents to dispatch the DNS query to. + + + + Removal of ability import legacy artifact into Helm. + +A helm install would make attempts to find manually installed artifacts and make them managed by Helm by adding the necessary labels and annotations. This was important when the Helm chart was first introduced but is far less so today, and this legacy import was therefore removed. + + + + Docker aliases deprecation caused failure to detect Kind cluster. + +The logic for detecting if a cluster is a local Kind cluster, and therefore needs some special attention when using telepresence connect --docker, relied on the presence of Aliases in the Docker network that a Kind cluster sets up. In Docker versions from 26 and up, this value is no longer used, but the corresponding info can instead be found in the new DNSNames field. + + + + Include svc as a top-level domain in the DNS resolver. + +It's not uncommon that use-cases involving Kafka or other middleware use FQNs that end with "svc". The core-DNS resolver in Kubernetes can resolve such names. With this bugfix, the Telepresence DNS resolver will also be able to resolve them, and thereby remove the need to add ".svc" to the include-suffix list. + + + + Add ability to enable/disable the mutating webhook. + +A new Helm chart boolean value agentInjector.enable has been added that controls the agent-injector service and its associated mutating webhook. If set to false, the service, the webhook, and the secrets and certificates associated with it, will no longer be installed. + + + + Add ability to mount a webhook secret. + +A new Helm chart value agentInjector.certificate.accessMethod which can be set to watch (the default) or mount has been added. The mount setting is intended for clusters with policies that prevent containers from doing a get, list or watch of a Secret, but where a latency of up to 90 seconds is acceptable between the time the secret is regenerated and the agent-injector picks it up. + + + + Make it possible to specify ignored volume mounts using path prefix. + +Volume mounts like /var/run/secrets/kubernetes.io are not declared in the workload. Instead, they are injected during pod-creation and their names are generated. It is now possible to ignore such mounts using a matching path prefix. + + + + Make the telemount Docker Volume plugin configurable + +A telemount object was added to the intercept object in config.yml (or Helm value client.intercept), so that the automatic download and installation of this plugin can be fully customised. + + + + Add option to load the kubeconfig yaml from stdin during connect. + +This allows another process with a kubeconfig already loaded in memory to directly pass it to telepresence connect without needing a separate file. Simply use a dash "-" as the filename for the --kubeconfig flag. + + + + Add ability to specify agent security context. + +A new Helm chart value agent.securityContext that will allow configuring the security context of the injected traffic agent. The value can be set to a valid Kubernetes securityContext object, or can be set to an empty value ({}) to ensure the agent has no defined security context. If no value is specified, the traffic manager will set the agent's security context to the same as the first container's of the workload being injected into. + + + + Tracing is no longer enabled by default. + +Tracing must now be enabled explicitly in order to use the telepresence gather-traces command. + + + + Removal of timeouts that are no longer in use + +The config.yml values timeouts.agentInstall and timeouts.apply haven't been in use since versions prior to 2.6.0, when the client was responsible for installing the traffic-agent. These timeouts are now removed from the code-base, and a warning will be printed when attempts are made to use them. + + + + Search all private subnets to find one open for dnsServerSubnet + +This resolves a bug that did not test all subnets in a private range, sometimes resulting in the warning, "DNS doesn't seem to work properly." + + + + Docker aliases deprecation caused failure to detect Kind cluster. + +The logic for detecting if a cluster is a local Kind cluster, and therefore needs some special attention when using telepresence connect --docker, relied on the presence of Aliases in the Docker network that a Kind cluster sets up. In Docker versions from 26 and up, this value is no longer used, but the corresponding info can instead be found in the new DNSNames field. + + + + Creation of individual pods was blocked by the agent-injector webhook. + +An attempt to create a pod was blocked unless it was provided by a workload. Hence, commands like kubectl run -i busybox --rm --image=curlimages/curl --restart=Never -- curl echo-easy.default would be blocked from executing. + + + + Fix panic due to root daemon not running. + +If a telepresence connect was made at a time when the root daemon was not running (an abnormal condition) and a subsequent intercept was then made, a panic would occur when the port-forward to the agent was set up. This is now fixed so that the initial telepresence connect is refused unless the root daemon is running. + + + + Get rid of telemount plugin stickiness + +The datawire/telemount that is automatically downloaded and installed, would never be updated once the installation was made. Telepresence will now check for the latest release of the plugin and cache the result of that check for 24 hours. If a new version arrives, it will be installed and used. + + + + Use route instead of address for CIDRs with masks that don't allow "via" + +A CIDR with a mask that leaves less than two bits (/31 or /32 for IPv4) cannot be added as an address to the VIF, because such addresses must have bits allowing a "via" IP. +The logic was modified to allow such CIDRs to become static routes, using the VIF base address as their "via", rather than being VIF addresses in their own right. + + + + Containerized daemon created cache files owned by root + +When using telepresence connect --docker to create a containerized daemon, that daemon would sometimes create files in the cache that were owned by root, which then caused problems when connecting without the --docker flag. + + + + Remove large number of requests when traffic-manager is used in large clusters. + +The traffic-manager would make a very large number of API requests during cluster start-up or when many services were changed for other reasons. The logic that did this was refactored and the number of queries were significantly reduced. + + + + Don't patch probes on replaced containers. + +A container that is being replaced by a telepresence intercept --replace invocation will have no liveness-, readiness, nor startup-probes. Telepresence didn't take this into consideration when injecting the traffic-agent, but now it will refrain from patching symbolic port names of those probes. + + + + Don't rely on context name when deciding if a kind cluster is used. + +The code that auto-patches the kubeconfig when connecting to a kind cluster from within a docker container, relied on the context name starting with "kind-", but although all contexts created by kind have that name, the user is still free to rename it or to create other contexts using the same connection properties. The logic was therefore changed to instead look for a loopback service address. + + +## Version 2.18.0 (February 9) + + Include the image for the traffic-agent in the output of the version and status commands. + +The version and status commands will now output the image that the traffic-agent will be using when injected by the agent-injector. + + + + Custom DNS using the client DNS resolver. + +

A new telepresence connect --proxy-via CIDR=WORKLOAD flag was introduced, allowing Telepresence to translate DNS responses matching specific subnets into virtual IPs that are used locally. Those virtual IPs are then routed (with reverse translation) via the pod's of a given workload. This makes it possible to handle custom DNS servers that resolve domains into loopback IPs. The flag may also be used in cases where the cluster's subnets are in conflict with the workstation's VPN.

The CIDR can also be a symbolic name that identifies a subnet or list of subnets:

alsoAll subnets added with --also-proxy
serviceThe cluster's service subnet
podsThe cluster's pod subnets.
allAll of the above.
+ +
+ + Ensure that agent.appProtocolStrategy is propagated correctly. + +The agent.appProtocolStrategy was inadvertently dropped when moving license related code fromm the OSS repository the repository for the Enterprise version of Telepresence. It has now been restored. + + + + Include non-default zero values in output of telepresence config view. + +The telepresence config view command will now print zero values in the output when the default for the value is non-zero. + + + + Restore ability to run the telepresence CLI in a docker container. + +The improvements made to be able to run the telepresence daemon in docker using telepresence connect --docker made it impossible to run both the CLI and the daemon in docker. This commit fixes that and also ensures that the user- and root-daemons are merged in this scenario when the container runs as root. + + + + Remote mounts when intercepting with the --replace flag. + +A telepresence intercept --replace did not correctly mount all volumes, because when the intercepted container was removed, its mounts were no longer visible to the agent-injector when it was subjected to a second invocation. The container is now kept in place, but with an image that just sleeps infinitely. + + + + Intercepting with the --replace flag will no longer require all subsequent intercepts to use --replace. + +A telepresence intercept --replace will no longer switch the mode of the intercepted workload, forcing all subsequent intercepts on that workload to use --replace until the agent is uninstalled. Instead, --replace can be used interchangeably just like any other intercept flag. + + + + Kubeconfig exec authentication with context names containing colon didn't work on Windows + +The logic added to allow the root daemon to connect directly to the cluster using the user daemon as a proxy for exec type authentication in the kube-config, didn't take into account that a context name sometimes contains the colon ":" character. That character cannot be used in filenames on windows because it is the drive letter separator. + + + + Provide agent name and tag as separate values in Helm chart + +The AGENT_IMAGE was a concatenation of the agent's name and tag. This is now changed so that the env instead contains an AGENT_IMAGE_NAME and AGENT_INAGE_TAG. The AGENT_IMAGE is removed. Also, a new env REGISTRY is added, where the registry of the traffic- manager image is provided. The AGENT_REGISTRY is no longer required and will default to REGISTRY if not set. + + + + Environment interpolation expressions were prefixed twice. + +Telepresence would sometimes prefix environment interpolation expressions in the traffic-agent twice so that an expression that looked like $(SOME_NAME) in the app-container, ended up as $(_TEL_APP_A__TEL_APP_A_SOME_NAME) in the corresponding expression in the traffic-agent. + + + + Panic in root-daemon on darwin workstations with full access to cluster network. + +A darwin machine with full access to the cluster's subnets will never create a TUN-device, and a check was missing if the device actually existed, which caused a panic in the root daemon. + + + + Show allow-conflicting-subnets in telepresence status and telepresence config view. + +The telepresence status and telepresence config view commands didn't show the allowConflictingSubnets CIDRs because the value wasn't propagated correctly to the CLI. + + + + It is now possible use a host-based connection and containerized connections simultaneously. + +Only one host-based connection can exist because that connection will alter the DNS to reflect the namespace of the connection. but it's now possible to create additional connections using --docker while retaining the host-based connection. + + + + Ability to set the hostname of a containerized daemon. + +The hostname of a containerized daemon defaults to be the container's ID in Docker. You now can override the hostname using telepresence connect --docker --hostname <a name>. + + + + New <code>--multi-daemon</code>flag to enforce a consistent structure for the status command output. + +The output of the telepresence status when using --output json or --output yaml will either show an object where the user_daemon and root_daemon are top level elements, or when multiple connections are used, an object where a connections list contains objects with those daemons. The flag --multi-daemon will enforce the latter structure even when only one daemon is connected so that the output can be parsed consistently. The reason for keeping the former structure is to retain backward compatibility with existing parsers. + + + + Make output from telepresence quit more consistent. + +A quit (without -s) just disconnects the host user and root daemons but will quit a container based daemon. The message printed was simplified to remove some have/has is/are errors caused by the difference. + + + + Fix "tls: bad certificate" errors when refreshing the mutator-webhook secret + +The agent-injector service will now refresh the secret used by the mutator-webhook each time a new connection is established, thus preventing the certificates to go out-of-sync when the secret is regenerated. + + + + Keep telepresence-agents configmap in sync with pod states. + +An intercept attempt that resulted in a timeout due to failure of injecting the traffic-agent left the telepresence-agents configmap in a state that indicated that an agent had been added, which caused problems for subsequent intercepts after the problem causing the first failure had been fixed. + + + + The <code>telepresence status</code> command will now report the status of all running daemons. + +A telepresence status, issued when multiple containerized daemons were active, would error with "multiple daemons are running, please select one using the --use <match> flag". This is now fixed so that the command instead reports the status of all running daemons. + + + + The <code>telepresence version</code> command will now report the version of all running daemons. + +A telepresence version, issued when multiple containerized daemons were active, would error with "multiple daemons are running, please select one using the --use <match> flag". This is now fixed so that the command instead reports the version of all running daemons. + + + + Multiple containerized daemons can now be disconnected using <code>telepresence quit -s</code> + +A telepresence quit -s, issued when multiple containerized daemons were active, would error with "multiple daemons are running, please select one using the --use <match> flag". This is now fixed so that the command instead quits all daemons. + + + + The DNS search path on Windows is now restored when Telepresence quits + +The DNS search path that Telepresence uses to simulate the DNS lookup functionality in the connected cluster namespace was not removed by a telepresence quit, resulting in connectivity problems from the workstation. Telepresence will now remove the entries that it has added to the search list when it quits. + + + + The user-daemon would sometimes get killed when used by multiple simultaneous CLI clients. + +The user-daemon would die with a fatal "fatal error: concurrent map writes" error in the connector.log, effectively killing the ongoing connection. + + + + Multiple services ports using the same target port would not get intercepted correctly. + +Intercepts didn't work when multiple service ports were using the same container port. Telepresence would think that one of the ports wasn't intercepted and therefore disable the intercept of the container port. + + + + Root daemon refuses to disconnect. + +The root daemon would sometimes hang forever when attempting to disconnect due to a deadlock in the VIF-device. + + + + Fix panic in user daemon when traffic-manager was unreachable + +The user daemon would panic if the traffic-manager was unreachable. It will now instead report a proper error to the client. + + + + Removal of backward support for versions predating 2.6.0 + +The telepresence helm installer will no longer discover and convert workloads that were modified by versions prior to 2.6.0. The traffic manager will and no longer support the muxed tunnels used in versions prior to 2.5.0. + + +## Version 2.17.0 (November 14) + + Additional Prometheus metrics to track intercept/connect activity + +This feature adds the following metrics to the Prometheus endpoint: connect_count, connect_active_status, intercept_count, and intercept_active_status. These are labeled by client/install_id. Additionally, the intercept_count metric has been renamed to active_intercept_count for clarity. + + + + Make the Telepresence client docker image configurable. + +The docker image used when running a Telepresence intercept in docker mode can now be configured using the setting images.clientImage and will default first to the value of the environment TELEPRESENCE_CLIENT_IMAGE, and then to the value preset by the telepresence binary. This configuration setting is primarily intended for testing purposes. + + + + Use traffic-agent port-forwards for outbound and intercepted traffic. + +The telepresence TUN-device is now capable of establishing direct port-forwards to a traffic-agent in the connected namespace. That port-forward is then used for all outbound traffic to the device, and also for all traffic that arrives from intercepted workloads. Getting rid of the extra hop via the traffic-manager improves performance and reduces the load on the traffic-manager. The feature can only be used if the client has Kubernetes port-forward permissions to the connected namespace. It can be disabled by setting cluster.agentPortForward to false in config.yml. + + + + Improve outbound traffic performance. + +The root-daemon now communicates directly with the traffic-manager instead of routing all outbound traffic through the user-daemon. The root-daemon uses a patched kubeconfig where exec configurations to obtain credentials are dispatched to the user-daemon. This to ensure that all authentication plugins will execute in user-space. The old behavior of routing everything through the user-daemon can be restored by setting cluster.connectFromRootDaemon to false in config.yml. + + + + New networking CLI flag --allow-conflicting-subnets + +telepresence connect (and other commands that kick off a connect) now accepts an --allow-conflicting-subnets CLI flag. This is equivalent to client.routing.allowConflictingSubnets in the helm chart, but can be specified at connect time. It will be appended to any configuration pushed from the traffic manager. + + + + Warn if large version mismatch between traffic manager and client. + +Print a warning if the minor version diff between the client and the traffic manager is greater than three. + + + + The authenticator binary was removed from the docker image. + +The authenticator binary, used when serving proxied exec kubeconfig credential retrieval, has been removed. The functionality was instead added as a subcommand to the telepresence binary. + + +## Version 2.16.1 (October 12) + + Add --docker-debug flag to the telepresence intercept command. + +This flag is similar to --docker-build but will start the container with more relaxed security using the docker run flags --security-opt apparmor=unconfined --cap-add SYS_PTRACE. + + + + Add a --export option to the telepresence connect command. + +In some situations it is necessary to make some ports available to the host from a containerized telepresence daemon. This commit adds a repeatable --expose <docker port exposure> flag to the connect command. + + + + Prevent agent-injector webhook from selecting from kube-xxx namespaces. + +The kube-system and kube-node-lease namespaces should not be affected by a global agent-injector webhook by default. A default namespaceSelector was therefore added to the Helm Chart agentInjector.webhook that contains a NotIn preventing those namespaces from being selected. + + + + Backward compatibility for pod template TLS annotations. + +Users of Telepresence < 2.9.0 that make use of the pod template TLS annotations were unable to upgrade because the annotation names have changed (now prefixed by "telepresence."), and the environment expansion of the annotation values was dropped. This fix restores support for the old names (while retaining the new ones) and the environment expansion. + + + + Built with go 1.21.3 + +Built Telepresence with go 1.21.3 to address CVEs. + + + + Match service selector against pod template labels + +When listing intercepts (typically by calling telepresence list) selectors of services are matched against workloads. Previously the match was made against the labels of the workload, but now they are matched against the labels pod template of the workload. Since the service would actually be matched against pods this is more correct. The most common case when this makes a difference is that statefulsets now are listed when they should. + + +## Version 2.16.0 (October 2) + + The helm sub-commands will no longer start the user daemon. + +The telepresence helm install/upgrade/uninstall commands will no longer start the telepresence user daemon because there's no need to connect to the traffic-manager in order for them to execute. + + + + Routing table race condition + +A race condition would sometimes occur when a Telepresence TUN device was deleted and another created in rapid succession that caused the routing table to reference interfaces that no longer existed. + + + + Stop lingering daemon container + +When using telepresence connect --docker, a lingering container could be present, causing errors like "The container name NN is already in use by container XX ...". When this happens, the connect logic will now give the container some time to stop and then call docker stop NN to stop it before retrying to start it. + + + + Add file locking to the Telepresence cache + +Files in the Telepresence cache are accesses by multiple processes. The processes will now use advisory locks on the files to guarantee consistency. + + + + Lock connection to namespace + +The behavior changed so that a connected Telepresence client is bound to a namespace. The namespace can then not be changed unless the client disconnects and reconnects. A connection is also given a name. The default name is composed from <kube context name>-<namespace> but can be given explicitly when connecting using --name. The connection can optionally be identified using the option --use <name match> (only needed when docker is used and more than one connection is active). + + + + Deprecation of global --context and --docker flags. + +The global flags --context and --docker will now be considered deprecated unless used with commands that accept the full set of Kubernetes flags (e.g. telepresence connect). + + + + Deprecation of the --namespace flag for the intercept command. + +The --namespace flag is now deprecated for telepresence intercept command. The flag can instead be used with all commands that accept the full set of Kubernetes flags (e.g. telepresence connect). + + + + Legacy code predating version 2.6.0 was removed. + +The telepresence code-base still contained a lot of code that would modify workloads instead of relying on the mutating webhook installer when a traffic-manager version predating version 2.6.0 was discovered. This code has now been removed. + + + + Add `telepresence list-namespaces` and `telepresence list-contexts` commands + +These commands can be used to check accessible namespaces and for automation. + + + + Implicit connect warning + +A deprecation warning will be printed if a command other than telepresence connect causes an implicit connect to happen. Implicit connects will be removed in a future release. + + +## Version 2.15.1 (September 6) + + Rebuild with go 1.21.1 + +Rebuild Telepresence with go 1.21.1 to address CVEs. + + + + Set security context for traffic agent + +Openshift users reported that the traffic agent injection was failing due to a missing security context. + + +## Version 2.15.0 (August 29) + + Add ASLR to telepresence binaries + +ASLR hardens binary sercurity against fixed memory attacks. + + + + Added client builds for arm64 architecture. + +Updated the release workflow files in github actions to including building and publishing the client binaries for arm64 architecture. + + + + KUBECONFIG env var can now be used with the docker mode. + +If provided, the KUBECONFIG environment variable was passed to the kubeauth-foreground service as a parameter. However, since it didn't exist, the CLI was throwing an error when using telepresence connect --docker. + + + + Fix deadlock while watching workloads + +The telepresence list --output json-stream wasn't releasing the session's lock after being stopped, including with a telepresence quit. The user could be blocked as a result. + + + + Change json output of telepresence list command + +Replace deprecated info in the JSON output of the telepresence list command. + + +## Version 2.14.4 (August 21) + + Nil pointer exception when upgrading the traffic-manager. + +Upgrading the traffic-manager using telepresence helm upgrade would sometimes result in a helm error message executing "telepresence/templates/intercept-env-configmap.yaml" at <.Values.intercept.environment.excluded>: nil pointer evaluating interface {}.excluded" + + +## Version 2.14.2 (July 26) + + Telepresence now use the OSS agent in its latest version by default. + +The traffic manager admin was forced to set it manually during the chart installation. + + +## Version 2.14.1 (July 7) + + Envoy's http idle timout is now configurable. + +A new agent.helm.httpIdleTimeout setting was added to the Helm chart that controls the proprietary Traffic agent's http idle timeout. The default of one hour, which in some situations would cause a lot of resource consuming and lingering connections, was changed to 70 seconds. + + + + Add more gauges to the Traffic manager's Prometheus client. + +Several gauges were added to the Prometheus client to make it easier to monitor what the Traffic manager spends resources on. + + + + Agent Pull Policy + +Add option to set traffic agent pull policy in helm chart. + + + + Resource leak in the Traffic manager. + +Fixes a resource leak in the Traffic manager caused by lingering tunnels between the clients and Traffic agents. The tunnels are now closed correctly when terminated from the side that created them. + + + + Fixed problem setting traffic manager namespace using the kubeconfig extension. + +Fixes a regression introduced in version 2.10.5, making it impossible to set the traffic-manager namespace using the telepresence.io kubeconfig extension. + + +## Version 2.14.0 (June 12) + + DNS configuration now supports excludes and mappings. + +The DNS configuration now supports two new fields, excludes and mappings. The excludes field allows you to exclude a given list of hostnames from resolution, while the mappings field can be used to resolve a hostname with another. + + + + Added the ability to exclude environment variables + +Added a new config map that can take an array of environment variables that will then be excluded from an intercept that retrieves the environment of a pod. + + + + Fixed traffic-agent backward incompatibility issue causing lack of remote mounts + +A traffic-agent of version 2.13.3 (or 1.13.15) would not propagate the directories under /var/run/secrets when used with a traffic manager older than 2.13.3. + + + + Fixed race condition causing segfaults on rare occasions when a tunnel stream timed out. + +A context cancellation could sometimes be trapped in a stream reader, causing it to incorrectly return an undefined message which in turn caused the parent reader to panic on a nil pointer reference. + + + + Routing conflict reporting. + +Telepresence will now attempt to detect and report routing conflicts with other running VPN software on client machines. There is a new configuration flag that can be tweaked to allow certain CIDRs to be overridden by Telepresence. + + + + test-vpn command deprecated + +Running telepresence test-vpn will now print a deprecation warning and exit. The command will be removed in a future release. Instead, please configure telepresence for your VPN's routes. + + +## Version 2.13.3 (May 25) + + Add imagePullSecrets to hooks + +Add .Values.hooks.curl.imagePullSecrets and .Values.hooks curl.imagePullSecrets to Helm values. + + + + Change reinvocation policy to Never for the mutating webhook + +The default setting of the reinvocationPolicy for the mutating webhook dealing with agent injections changed from Never to IfNeeded. + + + + Fix mounting fail of IAM roles for service accounts web identity token + +The eks.amazonaws.com/serviceaccount volume injected by EKS is now exported and remotely mounted during an intercept. + + + + Correct namespace selector for cluster versions with non-numeric characters + +The mutating webhook now correctly applies the namespace selector even if the cluster version contains non-numeric characters. For example, it can now handle versions such as Major:"1", Minor:"22+". + + + + Enable IPv6 on the telepresence docker network + +The "telepresence" Docker network will now propagate DNS AAAA queries to the Telepresence DNS resolver when it runs in a Docker container. + + + + Fix the crash when intercepting with --local-only and --docker-run + +Running telepresence intercept --local-only --docker-run no longer results in a panic. + + + + Fix incorrect error message with local-only mounts + +Running telepresence intercept --local-only --mount false no longer results in an incorrect error message saying "a local-only intercept cannot have mounts". + + + + specify port in hook urls + +The helm chart now correctly handles custom agentInjector.webhook.port that was not being set in hook URLs. + + + + Fix wrong default value for disableGlobal and agentArrival + +Params .intercept.disableGlobal and .timeouts.agentArrival are now correctly honored. + + diff --git a/versioned_docs/version-2.22/troubleshooting.md b/versioned_docs/version-2.22/troubleshooting.md new file mode 100644 index 00000000..84c0c610 --- /dev/null +++ b/versioned_docs/version-2.22/troubleshooting.md @@ -0,0 +1,152 @@ +--- +title: Troubleshooting +description: "Learn how to troubleshoot common issues related to Telepresence, including intercept issues, cluster connection issues, and errors related to Ambassador Cloud." +--- + +# Troubleshooting + +## Connecting to a cluster via VPN doesn't work. + +There are a few different issues that could arise when working with a VPN. Please see the [dedicated page](reference/vpn.md) on Telepresence and VPNs to learn more on how to fix these. + +## Connecting to a cluster hosted in a Docker Container or a VM on the workstation doesn't work + +The cluster probably has access to the host's network and gets confused when it is mapped by Telepresence. +Please check the [cluster in hosted container or vm](howtos/cluster-in-vm.md) for more details. + +## Volume mounts are not working on macOS + +It's necessary to have `sshfs` installed in order for volume mounts to work correctly during intercepts. Lately there's been some issues using `brew install sshfs` a macOS workstation because the required component `osxfuse` (now named `macfuse`) isn't open source and hence, no longer supported. As a workaround, you can now use `gromgit/fuse/sshfs-mac` instead. Follow these steps: + +1. Remove old sshfs, macfuse, osxfuse using `brew uninstall` +2. `brew install --cask macfuse` +3. `brew install gromgit/fuse/sshfs-mac` +4. `brew link --overwrite sshfs-mac` + +Now sshfs -V shows you the correct version, e.g.: +``` +$ sshfs -V +SSHFS version 2.10 +FUSE library version: 2.9.9 +fuse: no mount point +``` + +5. Next, try a mount (or an replace/ingest/intercept that performs a mount). It will fail because you need to give permission to “Benjamin Fleischer” to execute a kernel extension (a pop-up appears that takes you to the system preferences). +6. Approve the needed permission +7. Reboot your computer. + +## Volume mounts are not working on Linux +It's necessary to have `sshfs` installed in order for volume mounts to work correctly when Telepresence engages with remote containers. + +After you've installed `sshfs`, if mounts still aren't working: +1. Uncomment `user_allow_other` in `/etc/fuse.conf` +2. Add your user to the "fuse" group with: `sudo usermod -a -G fuse ` +3. Restart your computer after uncommenting `user_allow_other` + +## DNS is broken on macOS + +Commands like `dig` cannot find cluster resources even though Telepresence is connected to the cluster, but it works +with `curl`. + +This is because `dig`, and some other utilities on macOS have their own built-in DNS client which bypasses the macOS +native DNS system and use the libc resolver directly. Here's an excerpt from the `dig` command's man-page: +> Mac OS X NOTICE +> +> The nslookup command does not use the host name and address resolution or the DNS query routing +> mechanisms used by other processes running on Mac OS X. The results of name or address queries +> printed by nslookup may differ from those found by other processes that use the Mac OS X native +> name and address resolution mechanisms. The results of DNS queries may also differ from queries +> that use the Mac OS X DNS routing library. + +A command that should always work is: +```console +$ dscacheutil -q host -a name +``` + +## No Sidecar Injected in GKE private clusters + +An attempt to `telepresence intercept` results in a timeout, and upon examination of the pods (`kubectl get pods`) it's discovered that the intercept command did not inject a sidecar into the workload's pods: + +```bash +$ kubectl get pod +NAME READY STATUS RESTARTS AGE +echo-easy-7f6d54cff8-rz44k 1/1 Running 0 5m5s + +$ telepresence intercept echo-easy -p 8080 +telepresence: error: connector.CreateIntercept: request timed out while waiting for agent echo-easy.default to arrive +$ kubectl get pod +NAME READY STATUS RESTARTS AGE +echo-easy-d8dc4cc7c-27567 1/1 Running 0 2m9s + +# Notice how 1/1 containers are ready. +``` + +If this is occurring in a GKE cluster with private networking enabled, it is likely due to firewall rules blocking the +Traffic Manager's webhook injector from the API server. +To fix this, add a firewall rule allowing your cluster's master nodes to access TCP port `443` in your cluster's pods, +or change the port number that Telepresence is using for the agent injector by providing the number of an allowed port +using the Helm chart value `agentInjector.webhook.port`. +Please refer to the [telepresence install instructions](install/cloud#gke) or the [GCP docs](https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters#add_firewall_rules) for information to resolve this. + +## Injected init-container doesn't function properly + +The init-container is injected to insert `iptables` rules that redirects port numbers from the app container to the +traffic-agent sidecar. This is necessary when the service's `targetPort` is numeric. It requires elevated privileges +(`NET_ADMIN` capabilities), and the inserted rules may get overridden by `iptables` rules inserted by other vendors, +such as Istio or Linkerd. + +Injection of the init-container can often be avoided by using a `targetPort` _name_ instead of a number, and ensure +that the corresponding container's `containerPort` is also named. This example uses the name "http", but any valid +name will do: +```yaml +apiVersion: v1 +kind: Pod +metadata: + ... +spec: + containers: + - ports: + - name: http + containerPort: 8080 +--- +apiVersion: v1 +kind: Service +metadata: + ... +spec: + ports: + - port: 80 + targetPort: http +``` + +Telepresence injects an init-container into the pods of a workload, only if at least one service specifies a numeric +`tagertPort` that references a `containerPort` in the workload. When this isn't the case, it will instead do the +following during the injection of the traffic-agent: + +1. Rename the designated container's port by prefixing it (i.e., containerPort: http becomes containerPort: tm-http). +2. Let the container port of our injected traffic-agent use the original name (i.e., containerPort: http). + +Kubernetes takes care of the rest and will now associate the service's `targetPort` with our traffic-agent's +`containerPort`. + +> [!IMPORTANT] +> If the service is "headless" (using `ClusterIP: None`), then using named ports won't help because the `targetPort` will +> not get remapped. A headless service will always require the init-container. + +## EKS, Calico, and Traffic Agent injection timeouts + +When using EKS with Calico CNI, the Kubernetes API server cannot reach the mutating webhook +used for triggering the traffic agent injection. To solve this problem, try providing the +Helm chart value `"hostNetwork=true"` when installing or upgrading the traffic-manager. + +More information can be found in this [blog post](https://medium.com/@denisstortisilva/kubernetes-eks-calico-and-custom-admission-webhooks-a2956b49bd0d). + +## Error connecting to GKE or EKS cluster + +GKE and EKS require a plugin that utilizes their resepective IAM providers. +You will need to install the [gke](install/cloud#gke-authentication-plugin) or [eks](install/cloud#eks-authentication-plugin) plugins +for Telepresence to connect to your cluster. + +## `too many files open` error when running `telepresence connect` on Linux + +If `telepresence connect` on linux fails with a message in the logs `too many files open`, then check if `fs.inotify.max_user_instances` is set too low. Check the current settings with `sysctl fs.inotify.max_user_instances` and increase it temporarily with `sudo sysctl -w fs.inotify.max_user_instances=512`. For more information about permanently increasing it see [Kernel inotify watch limit reached](https://unix.stackexchange.com/a/13757/514457). diff --git a/versioned_docs/version-2.22/variables.yml b/versioned_docs/version-2.22/variables.yml new file mode 100644 index 00000000..97c94fde --- /dev/null +++ b/versioned_docs/version-2.22/variables.yml @@ -0,0 +1,2 @@ +version: "2.22.0" +dlVersion: "v2.22.0" diff --git a/versioned_sidebars/version-2.22-sidebars.json b/versioned_sidebars/version-2.22-sidebars.json new file mode 100644 index 00000000..cff0c94e --- /dev/null +++ b/versioned_sidebars/version-2.22-sidebars.json @@ -0,0 +1,8 @@ +{ + "defaultSidebar": [ + { + "type": "autogenerated", + "dirName": "." + } + ] +} diff --git a/versions.json b/versions.json index 9b3422b7..05edc465 100644 --- a/versions.json +++ b/versions.json @@ -1,4 +1,5 @@ [ + "2.22", "2.21", "2.20", "2.19" diff --git a/yarn.lock b/yarn.lock index e46131ae..81d9835c 100644 --- a/yarn.lock +++ b/yarn.lock @@ -7,32 +7,32 @@ resolved "https://registry.yarnpkg.com/@adobe/css-tools/-/css-tools-4.3.3.tgz#90749bde8b89cd41764224f5aac29cd4138f75ff" integrity sha512-rE0Pygv0sEZ4vBWHlAgJLGDU7Pm8xoO6p3wsEceb7GYAjScrOHpEo8KK/eVkAcnSM+slAEtXjA2JpdjLp4fJQQ== -"@algolia/autocomplete-core@1.17.7": - version "1.17.7" - resolved "https://registry.yarnpkg.com/@algolia/autocomplete-core/-/autocomplete-core-1.17.7.tgz#2c410baa94a47c5c5f56ed712bb4a00ebe24088b" - integrity sha512-BjiPOW6ks90UKl7TwMv7oNQMnzU+t/wk9mgIDi6b1tXpUek7MW0lbNOUHpvam9pe3lVCf4xPFT+lK7s+e+fs7Q== +"@algolia/autocomplete-core@1.17.9": + version "1.17.9" + resolved "https://registry.yarnpkg.com/@algolia/autocomplete-core/-/autocomplete-core-1.17.9.tgz#83374c47dc72482aa45d6b953e89377047f0dcdc" + integrity sha512-O7BxrpLDPJWWHv/DLA9DRFWs+iY1uOJZkqUwjS5HSZAGcl0hIVCQ97LTLewiZmZ402JYUrun+8NqFP+hCknlbQ== dependencies: - "@algolia/autocomplete-plugin-algolia-insights" "1.17.7" - "@algolia/autocomplete-shared" "1.17.7" + "@algolia/autocomplete-plugin-algolia-insights" "1.17.9" + "@algolia/autocomplete-shared" "1.17.9" -"@algolia/autocomplete-plugin-algolia-insights@1.17.7": - version "1.17.7" - resolved "https://registry.yarnpkg.com/@algolia/autocomplete-plugin-algolia-insights/-/autocomplete-plugin-algolia-insights-1.17.7.tgz#7d2b105f84e7dd8f0370aa4c4ab3b704e6760d82" - integrity sha512-Jca5Ude6yUOuyzjnz57og7Et3aXjbwCSDf/8onLHSQgw1qW3ALl9mrMWaXb5FmPVkV3EtkD2F/+NkT6VHyPu9A== +"@algolia/autocomplete-plugin-algolia-insights@1.17.9": + version "1.17.9" + resolved "https://registry.yarnpkg.com/@algolia/autocomplete-plugin-algolia-insights/-/autocomplete-plugin-algolia-insights-1.17.9.tgz#74c86024d09d09e8bfa3dd90b844b77d9f9947b6" + integrity sha512-u1fEHkCbWF92DBeB/KHeMacsjsoI0wFhjZtlCq2ddZbAehshbZST6Hs0Avkc0s+4UyBGbMDnSuXHLuvRWK5iDQ== dependencies: - "@algolia/autocomplete-shared" "1.17.7" + "@algolia/autocomplete-shared" "1.17.9" -"@algolia/autocomplete-preset-algolia@1.17.7": - version "1.17.7" - resolved "https://registry.yarnpkg.com/@algolia/autocomplete-preset-algolia/-/autocomplete-preset-algolia-1.17.7.tgz#c9badc0d73d62db5bf565d839d94ec0034680ae9" - integrity sha512-ggOQ950+nwbWROq2MOCIL71RE0DdQZsceqrg32UqnhDz8FlO9rL8ONHNsI2R1MH0tkgVIDKI/D0sMiUchsFdWA== +"@algolia/autocomplete-preset-algolia@1.17.9": + version "1.17.9" + resolved "https://registry.yarnpkg.com/@algolia/autocomplete-preset-algolia/-/autocomplete-preset-algolia-1.17.9.tgz#911f3250544eb8ea4096fcfb268f156b085321b5" + integrity sha512-Na1OuceSJeg8j7ZWn5ssMu/Ax3amtOwk76u4h5J4eK2Nx2KB5qt0Z4cOapCsxot9VcEN11ADV5aUSlQF4RhGjQ== dependencies: - "@algolia/autocomplete-shared" "1.17.7" + "@algolia/autocomplete-shared" "1.17.9" -"@algolia/autocomplete-shared@1.17.7": - version "1.17.7" - resolved "https://registry.yarnpkg.com/@algolia/autocomplete-shared/-/autocomplete-shared-1.17.7.tgz#105e84ad9d1a31d3fb86ba20dc890eefe1a313a0" - integrity sha512-o/1Vurr42U/qskRSuhBH+VKxMvkkUVTLU6WZQr+L5lGZZLYWyhdzWjW0iGXY7EkwRTjBqvN2EsR81yCTGV/kmg== +"@algolia/autocomplete-shared@1.17.9": + version "1.17.9" + resolved "https://registry.yarnpkg.com/@algolia/autocomplete-shared/-/autocomplete-shared-1.17.9.tgz#5f38868f7cb1d54b014b17a10fc4f7e79d427fa8" + integrity sha512-iDf05JDQ7I0b7JEA/9IektxN/80a2MZ1ToohfmNS3rfeuQnIKI3IJlIafD0xu4StbtQTghx9T3Maa97ytkXenQ== "@algolia/cache-browser-local-storage@4.24.0": version "4.24.0" @@ -53,15 +53,15 @@ dependencies: "@algolia/cache-common" "4.24.0" -"@algolia/client-abtesting@5.15.0": - version "5.15.0" - resolved "https://registry.yarnpkg.com/@algolia/client-abtesting/-/client-abtesting-5.15.0.tgz#6414895e2246dc7b7facd97bd98c3abe13cabe59" - integrity sha512-FaEM40iuiv1mAipYyiptP4EyxkJ8qHfowCpEeusdHUC4C7spATJYArD2rX3AxkVeREkDIgYEOuXcwKUbDCr7Nw== +"@algolia/client-abtesting@5.21.0": + version "5.21.0" + resolved "https://registry.yarnpkg.com/@algolia/client-abtesting/-/client-abtesting-5.21.0.tgz#565c79275769c614ecf75bd8679dbd510a0c88c1" + integrity sha512-I239aSmXa3pXDhp3AWGaIfesqJBNFA7drUM8SIfNxMIzvQXUnHRf4rW1o77QXLI/nIClNsb8KOLaB62gO9LnlQ== dependencies: - "@algolia/client-common" "5.15.0" - "@algolia/requester-browser-xhr" "5.15.0" - "@algolia/requester-fetch" "5.15.0" - "@algolia/requester-node-http" "5.15.0" + "@algolia/client-common" "5.21.0" + "@algolia/requester-browser-xhr" "5.21.0" + "@algolia/requester-fetch" "5.21.0" + "@algolia/requester-node-http" "5.21.0" "@algolia/client-account@4.24.0": version "4.24.0" @@ -82,15 +82,15 @@ "@algolia/requester-common" "4.24.0" "@algolia/transporter" "4.24.0" -"@algolia/client-analytics@5.15.0": - version "5.15.0" - resolved "https://registry.yarnpkg.com/@algolia/client-analytics/-/client-analytics-5.15.0.tgz#7ca1043cba7ac225d30e8bb52579504946b95f58" - integrity sha512-lho0gTFsQDIdCwyUKTtMuf9nCLwq9jOGlLGIeQGKDxXF7HbiAysFIu5QW/iQr1LzMgDyM9NH7K98KY+BiIFriQ== +"@algolia/client-analytics@5.21.0": + version "5.21.0" + resolved "https://registry.yarnpkg.com/@algolia/client-analytics/-/client-analytics-5.21.0.tgz#4c4863b3cb7380de5bd1ba82691516e0a60ad167" + integrity sha512-OxoUfeG9G4VE4gS7B4q65KkHzdGsQsDwxQfR5J9uKB8poSGuNlHJWsF3ABqCkc5VliAR0m8KMjsQ9o/kOpEGnQ== dependencies: - "@algolia/client-common" "5.15.0" - "@algolia/requester-browser-xhr" "5.15.0" - "@algolia/requester-fetch" "5.15.0" - "@algolia/requester-node-http" "5.15.0" + "@algolia/client-common" "5.21.0" + "@algolia/requester-browser-xhr" "5.21.0" + "@algolia/requester-fetch" "5.21.0" + "@algolia/requester-node-http" "5.21.0" "@algolia/client-common@4.24.0": version "4.24.0" @@ -100,20 +100,20 @@ "@algolia/requester-common" "4.24.0" "@algolia/transporter" "4.24.0" -"@algolia/client-common@5.15.0": - version "5.15.0" - resolved "https://registry.yarnpkg.com/@algolia/client-common/-/client-common-5.15.0.tgz#cd47ae07a3afc7065438a2dab29f8434f848928e" - integrity sha512-IofrVh213VLsDkPoSKMeM9Dshrv28jhDlBDLRcVJQvlL8pzue7PEB1EZ4UoJFYS3NSn7JOcJ/V+olRQzXlJj1w== +"@algolia/client-common@5.21.0": + version "5.21.0" + resolved "https://registry.yarnpkg.com/@algolia/client-common/-/client-common-5.21.0.tgz#f32c28d25ccaf2954aca5ae5954a810fdef5b85e" + integrity sha512-iHLgDQFyZNe9M16vipbx6FGOA8NoMswHrfom/QlCGoyh7ntjGvfMb+J2Ss8rRsAlOWluv8h923Ku3QVaB0oWDQ== -"@algolia/client-insights@5.15.0": - version "5.15.0" - resolved "https://registry.yarnpkg.com/@algolia/client-insights/-/client-insights-5.15.0.tgz#f3bead0edd10e69365895da4a96044064b504f4d" - integrity sha512-bDDEQGfFidDi0UQUCbxXOCdphbVAgbVmxvaV75cypBTQkJ+ABx/Npw7LkFGw1FsoVrttlrrQbwjvUB6mLVKs/w== +"@algolia/client-insights@5.21.0": + version "5.21.0" + resolved "https://registry.yarnpkg.com/@algolia/client-insights/-/client-insights-5.21.0.tgz#971c76f795923c1210f89c830d43bc14fa76de61" + integrity sha512-y7XBO9Iwb75FLDl95AYcWSLIViJTpR5SUUCyKsYhpP9DgyUqWbISqDLXc96TS9shj+H+7VsTKA9cJK8NUfVN6g== dependencies: - "@algolia/client-common" "5.15.0" - "@algolia/requester-browser-xhr" "5.15.0" - "@algolia/requester-fetch" "5.15.0" - "@algolia/requester-node-http" "5.15.0" + "@algolia/client-common" "5.21.0" + "@algolia/requester-browser-xhr" "5.21.0" + "@algolia/requester-fetch" "5.21.0" + "@algolia/requester-node-http" "5.21.0" "@algolia/client-personalization@4.24.0": version "4.24.0" @@ -124,25 +124,25 @@ "@algolia/requester-common" "4.24.0" "@algolia/transporter" "4.24.0" -"@algolia/client-personalization@5.15.0": - version "5.15.0" - resolved "https://registry.yarnpkg.com/@algolia/client-personalization/-/client-personalization-5.15.0.tgz#e962793ebf737a5ffa4867d2dfdfe17924be3833" - integrity sha512-LfaZqLUWxdYFq44QrasCDED5bSYOswpQjSiIL7Q5fYlefAAUO95PzBPKCfUhSwhb4rKxigHfDkd81AvEicIEoA== +"@algolia/client-personalization@5.21.0": + version "5.21.0" + resolved "https://registry.yarnpkg.com/@algolia/client-personalization/-/client-personalization-5.21.0.tgz#0ab7c370a115d0b83edd8db55a4ea2f2b9212190" + integrity sha512-6KU658lD9Tss4oCX6c/O15tNZxw7vR+WAUG95YtZzYG/KGJHTpy2uckqbMmC2cEK4a86FAq4pH5azSJ7cGMjuw== dependencies: - "@algolia/client-common" "5.15.0" - "@algolia/requester-browser-xhr" "5.15.0" - "@algolia/requester-fetch" "5.15.0" - "@algolia/requester-node-http" "5.15.0" + "@algolia/client-common" "5.21.0" + "@algolia/requester-browser-xhr" "5.21.0" + "@algolia/requester-fetch" "5.21.0" + "@algolia/requester-node-http" "5.21.0" -"@algolia/client-query-suggestions@5.15.0": - version "5.15.0" - resolved "https://registry.yarnpkg.com/@algolia/client-query-suggestions/-/client-query-suggestions-5.15.0.tgz#d9a2d0d0660241bdae5fc36a6f1fcf339abbafeb" - integrity sha512-wu8GVluiZ5+il8WIRsGKu8VxMK9dAlr225h878GGtpTL6VBvwyJvAyLdZsfFIpY0iN++jiNb31q2C1PlPL+n/A== +"@algolia/client-query-suggestions@5.21.0": + version "5.21.0" + resolved "https://registry.yarnpkg.com/@algolia/client-query-suggestions/-/client-query-suggestions-5.21.0.tgz#14291a63db8ccd53e415d46578390fa5e1d1d35f" + integrity sha512-pG6MyVh1v0X+uwrKHn3U+suHdgJ2C+gug+UGkNHfMELHMsEoWIAQhxMBOFg7hCnWBFjQnuq6qhM3X9X5QO3d9Q== dependencies: - "@algolia/client-common" "5.15.0" - "@algolia/requester-browser-xhr" "5.15.0" - "@algolia/requester-fetch" "5.15.0" - "@algolia/requester-node-http" "5.15.0" + "@algolia/client-common" "5.21.0" + "@algolia/requester-browser-xhr" "5.21.0" + "@algolia/requester-fetch" "5.21.0" + "@algolia/requester-node-http" "5.21.0" "@algolia/client-search@4.24.0": version "4.24.0" @@ -153,30 +153,30 @@ "@algolia/requester-common" "4.24.0" "@algolia/transporter" "4.24.0" -"@algolia/client-search@5.15.0": - version "5.15.0" - resolved "https://registry.yarnpkg.com/@algolia/client-search/-/client-search-5.15.0.tgz#8645f5bc87a959b8008e021d8b31d55a47920b94" - integrity sha512-Z32gEMrRRpEta5UqVQA612sLdoqY3AovvUPClDfMxYrbdDAebmGDVPtSogUba1FZ4pP5dx20D3OV3reogLKsRA== +"@algolia/client-search@5.21.0": + version "5.21.0" + resolved "https://registry.yarnpkg.com/@algolia/client-search/-/client-search-5.21.0.tgz#37807d286a18e59b32af06dc62d4bd853d50121c" + integrity sha512-nZfgJH4njBK98tFCmCW1VX/ExH4bNOl9DSboxeXGgvhoL0fG1+4DDr/mrLe21OggVCQqHwXBMh6fFInvBeyhiQ== dependencies: - "@algolia/client-common" "5.15.0" - "@algolia/requester-browser-xhr" "5.15.0" - "@algolia/requester-fetch" "5.15.0" - "@algolia/requester-node-http" "5.15.0" + "@algolia/client-common" "5.21.0" + "@algolia/requester-browser-xhr" "5.21.0" + "@algolia/requester-fetch" "5.21.0" + "@algolia/requester-node-http" "5.21.0" "@algolia/events@^4.0.1": version "4.0.1" resolved "https://registry.yarnpkg.com/@algolia/events/-/events-4.0.1.tgz#fd39e7477e7bc703d7f893b556f676c032af3950" integrity sha512-FQzvOCgoFXAbf5Y6mYozw2aj5KCJoA3m4heImceldzPSMbdyS4atVjJzXKMsfX3wnZTFYwkkt8/z8UesLHlSBQ== -"@algolia/ingestion@1.15.0": - version "1.15.0" - resolved "https://registry.yarnpkg.com/@algolia/ingestion/-/ingestion-1.15.0.tgz#a3f3ec2139042f8597c2a975430a6f77cd764db3" - integrity sha512-MkqkAxBQxtQ5if/EX2IPqFA7LothghVyvPoRNA/meS2AW2qkHwcxjuiBxv4H6mnAVEPfJlhu9rkdVz9LgCBgJg== +"@algolia/ingestion@1.21.0": + version "1.21.0" + resolved "https://registry.yarnpkg.com/@algolia/ingestion/-/ingestion-1.21.0.tgz#7524dcc848abc44656508ea0951cceaf18e3f51b" + integrity sha512-k6MZxLbZphGN5uRri9J/krQQBjUrqNcScPh985XXEFXbSCRvOPKVtjjLdVjGVHXXPOQgKrIZHxIdRNbHS+wVuA== dependencies: - "@algolia/client-common" "5.15.0" - "@algolia/requester-browser-xhr" "5.15.0" - "@algolia/requester-fetch" "5.15.0" - "@algolia/requester-node-http" "5.15.0" + "@algolia/client-common" "5.21.0" + "@algolia/requester-browser-xhr" "5.21.0" + "@algolia/requester-fetch" "5.21.0" + "@algolia/requester-node-http" "5.21.0" "@algolia/logger-common@4.24.0": version "4.24.0" @@ -190,15 +190,15 @@ dependencies: "@algolia/logger-common" "4.24.0" -"@algolia/monitoring@1.15.0": - version "1.15.0" - resolved "https://registry.yarnpkg.com/@algolia/monitoring/-/monitoring-1.15.0.tgz#1eb58722ec9ea6e5de3621150f97a43571bd312e" - integrity sha512-QPrFnnGLMMdRa8t/4bs7XilPYnoUXDY8PMQJ1sf9ZFwhUysYYhQNX34/enoO0LBjpoOY6rLpha39YQEFbzgKyQ== +"@algolia/monitoring@1.21.0": + version "1.21.0" + resolved "https://registry.yarnpkg.com/@algolia/monitoring/-/monitoring-1.21.0.tgz#9daab7fe728b44ae998c2425d12e4bd77efe07f5" + integrity sha512-FiW5nnmyHvaGdorqLClw3PM6keXexAMiwbwJ9xzQr4LcNefLG3ln82NafRPgJO/z0dETAOKjds5aSmEFMiITHQ== dependencies: - "@algolia/client-common" "5.15.0" - "@algolia/requester-browser-xhr" "5.15.0" - "@algolia/requester-fetch" "5.15.0" - "@algolia/requester-node-http" "5.15.0" + "@algolia/client-common" "5.21.0" + "@algolia/requester-browser-xhr" "5.21.0" + "@algolia/requester-fetch" "5.21.0" + "@algolia/requester-node-http" "5.21.0" "@algolia/recommend@4.24.0": version "4.24.0" @@ -217,15 +217,15 @@ "@algolia/requester-node-http" "4.24.0" "@algolia/transporter" "4.24.0" -"@algolia/recommend@5.15.0": - version "5.15.0" - resolved "https://registry.yarnpkg.com/@algolia/recommend/-/recommend-5.15.0.tgz#8f3359ee7e855849ac3872f67c0672f6835c8f79" - integrity sha512-5eupMwSqMLDObgSMF0XG958zR6GJP3f7jHDQ3/WlzCM9/YIJiWIUoJFGsko9GYsA5xbLDHE/PhWtq4chcCdaGQ== +"@algolia/recommend@5.21.0": + version "5.21.0" + resolved "https://registry.yarnpkg.com/@algolia/recommend/-/recommend-5.21.0.tgz#4c9a2e90bab87c9d63f8eebaf56c12e4f9e517c0" + integrity sha512-+JXavbbliaLmah5QNgc/TDW/+r0ALa+rGhg5Y7+pF6GpNnzO0L+nlUaDNE8QbiJfz54F9BkwFUnJJeRJAuzTFw== dependencies: - "@algolia/client-common" "5.15.0" - "@algolia/requester-browser-xhr" "5.15.0" - "@algolia/requester-fetch" "5.15.0" - "@algolia/requester-node-http" "5.15.0" + "@algolia/client-common" "5.21.0" + "@algolia/requester-browser-xhr" "5.21.0" + "@algolia/requester-fetch" "5.21.0" + "@algolia/requester-node-http" "5.21.0" "@algolia/requester-browser-xhr@4.24.0": version "4.24.0" @@ -234,24 +234,24 @@ dependencies: "@algolia/requester-common" "4.24.0" -"@algolia/requester-browser-xhr@5.15.0": - version "5.15.0" - resolved "https://registry.yarnpkg.com/@algolia/requester-browser-xhr/-/requester-browser-xhr-5.15.0.tgz#5ffdccdf5cd7814ed3486bed418edb6db25c32a2" - integrity sha512-Po/GNib6QKruC3XE+WKP1HwVSfCDaZcXu48kD+gwmtDlqHWKc7Bq9lrS0sNZ456rfCKhXksOmMfUs4wRM/Y96w== +"@algolia/requester-browser-xhr@5.21.0": + version "5.21.0" + resolved "https://registry.yarnpkg.com/@algolia/requester-browser-xhr/-/requester-browser-xhr-5.21.0.tgz#7840e52a45fd8a7b58340470c4700492d32fdf7d" + integrity sha512-Iw+Yj5hOmo/iixHS94vEAQ3zi5GPpJywhfxn1el/zWo4AvPIte/+1h9Ywgw/+3M7YBj4jgAkScxjxQCxzLBsjA== dependencies: - "@algolia/client-common" "5.15.0" + "@algolia/client-common" "5.21.0" "@algolia/requester-common@4.24.0": version "4.24.0" resolved "https://registry.yarnpkg.com/@algolia/requester-common/-/requester-common-4.24.0.tgz#1c60c198031f48fcdb9e34c4057a3ea987b9a436" integrity sha512-k3CXJ2OVnvgE3HMwcojpvY6d9kgKMPRxs/kVohrwF5WMr2fnqojnycZkxPoEg+bXm8fi5BBfFmOqgYztRtHsQA== -"@algolia/requester-fetch@5.15.0": - version "5.15.0" - resolved "https://registry.yarnpkg.com/@algolia/requester-fetch/-/requester-fetch-5.15.0.tgz#2ce94d4855090fac192b208d95eeea22e1ca4489" - integrity sha512-rOZ+c0P7ajmccAvpeeNrUmEKoliYFL8aOR5qGW5pFq3oj3Iept7Y5mEtEsOBYsRt6qLnaXn4zUKf+N8nvJpcIw== +"@algolia/requester-fetch@5.21.0": + version "5.21.0" + resolved "https://registry.yarnpkg.com/@algolia/requester-fetch/-/requester-fetch-5.21.0.tgz#8c4caf767995aaf24c8fc5f873e9075df98fbf44" + integrity sha512-Z00SRLlIFj3SjYVfsd9Yd3kB3dUwQFAkQG18NunWP7cix2ezXpJqA+xAoEf9vc4QZHdxU3Gm8gHAtRiM2iVaTQ== dependencies: - "@algolia/client-common" "5.15.0" + "@algolia/client-common" "5.21.0" "@algolia/requester-node-http@4.24.0": version "4.24.0" @@ -260,12 +260,12 @@ dependencies: "@algolia/requester-common" "4.24.0" -"@algolia/requester-node-http@5.15.0": - version "5.15.0" - resolved "https://registry.yarnpkg.com/@algolia/requester-node-http/-/requester-node-http-5.15.0.tgz#e2020afcdaea56dc204bc6c82daab41478b32d87" - integrity sha512-b1jTpbFf9LnQHEJP5ddDJKE2sAlhYd7EVSOWgzo/27n/SfCoHfqD0VWntnWYD83PnOKvfe8auZ2+xCb0TXotrQ== +"@algolia/requester-node-http@5.21.0": + version "5.21.0" + resolved "https://registry.yarnpkg.com/@algolia/requester-node-http/-/requester-node-http-5.21.0.tgz#c1a8cd0f33e375c147bc5efda73f9677a47416c9" + integrity sha512-WqU0VumUILrIeVYCTGZlyyZoC/tbvhiyPxfGRRO1cSjxN558bnJLlR2BvS0SJ5b75dRNK7HDvtXo2QoP9eLfiA== dependencies: - "@algolia/client-common" "5.15.0" + "@algolia/client-common" "5.21.0" "@algolia/transporter@4.24.0": version "4.24.0" @@ -284,7 +284,7 @@ "@jridgewell/gen-mapping" "^0.3.5" "@jridgewell/trace-mapping" "^0.3.24" -"@babel/code-frame@^7.0.0", "@babel/code-frame@^7.16.0", "@babel/code-frame@^7.25.9", "@babel/code-frame@^7.26.0", "@babel/code-frame@^7.26.2", "@babel/code-frame@^7.8.3": +"@babel/code-frame@^7.0.0", "@babel/code-frame@^7.16.0", "@babel/code-frame@^7.26.2", "@babel/code-frame@^7.8.3": version "7.26.2" resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.26.2.tgz#4b5fab97d33338eff916235055f0ebc21e573a85" integrity sha512-RJlIHRueQgwWitWgF8OdFYGZX328Ax5BCemNGlqHfplnRT9ESi8JkFlvaVYbS+UubVY6dpv87Fs2u5M29iNFVQ== @@ -293,38 +293,12 @@ js-tokens "^4.0.0" picocolors "^1.0.0" -"@babel/compat-data@^7.22.6", "@babel/compat-data@^7.25.9", "@babel/compat-data@^7.26.0": - version "7.26.2" - resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.26.2.tgz#278b6b13664557de95b8f35b90d96785850bb56e" - integrity sha512-Z0WgzSEa+aUcdiJuCIqgujCshpMWgUpgOxXotrYPSA53hA3qopNaqcJpyr0hVb1FeWdnqFA35/fUtXgBK8srQg== - -"@babel/compat-data@^7.26.5", "@babel/compat-data@^7.26.8": +"@babel/compat-data@^7.22.6", "@babel/compat-data@^7.26.5", "@babel/compat-data@^7.26.8": version "7.26.8" resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.26.8.tgz#821c1d35641c355284d4a870b8a4a7b0c141e367" integrity sha512-oH5UPLMWR3L2wEFLnFJ1TZXqHufiTKAiLfqw5zkhS4dKXLJ10yVztfil/twG8EDTA4F/tvVNw9nOl4ZMslB8rQ== -"@babel/core@^7.21.3", "@babel/core@^7.23.3": - version "7.26.0" - resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.26.0.tgz#d78b6023cc8f3114ccf049eb219613f74a747b40" - integrity sha512-i1SLeK+DzNnQ3LL/CswPCa/E5u4lh1k6IAEphON8F+cXt0t9euTshDru0q7/IqMa1PMPz5RnHuHscF8/ZJsStg== - dependencies: - "@ampproject/remapping" "^2.2.0" - "@babel/code-frame" "^7.26.0" - "@babel/generator" "^7.26.0" - "@babel/helper-compilation-targets" "^7.25.9" - "@babel/helper-module-transforms" "^7.26.0" - "@babel/helpers" "^7.26.0" - "@babel/parser" "^7.26.0" - "@babel/template" "^7.25.9" - "@babel/traverse" "^7.25.9" - "@babel/types" "^7.26.0" - convert-source-map "^2.0.0" - debug "^4.1.0" - gensync "^1.0.0-beta.2" - json5 "^2.2.3" - semver "^6.3.1" - -"@babel/core@^7.25.9": +"@babel/core@^7.21.3", "@babel/core@^7.23.3", "@babel/core@^7.25.9": version "7.26.10" resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.26.10.tgz#5c876f83c8c4dcb233ee4b670c0606f2ac3000f9" integrity sha512-vMqyb7XCDMPvJFFOaT9kxtiRh42GwlZEg1/uIgtZshS5a/8OaduUfCi7kynKgc3Tw/6Uo2D+db9qBttghhmxwQ== @@ -345,18 +319,7 @@ json5 "^2.2.3" semver "^6.3.1" -"@babel/generator@^7.23.3", "@babel/generator@^7.25.9", "@babel/generator@^7.26.0": - version "7.26.2" - resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.26.2.tgz#87b75813bec87916210e5e01939a4c823d6bb74f" - integrity sha512-zevQbhbau95nkoxSq3f/DC/SC+EEOUZd3DYqfSkMhY2/wfSeaHV1Ew4vk8e+x8lja31IbyuUa2uQ3JONqKbysw== - dependencies: - "@babel/parser" "^7.26.2" - "@babel/types" "^7.26.0" - "@jridgewell/gen-mapping" "^0.3.5" - "@jridgewell/trace-mapping" "^0.3.25" - jsesc "^3.0.2" - -"@babel/generator@^7.26.10": +"@babel/generator@^7.23.3", "@babel/generator@^7.25.9", "@babel/generator@^7.26.10": version "7.26.10" resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.26.10.tgz#a60d9de49caca16744e6340c3658dfef6138c3f7" integrity sha512-rRHT8siFIXQrAYOYqZQVsAr8vJ+cBNqcVAY6m5V8/4QqzaPl+zDBe6cLEPRDuNOUf3ww8RfJVlOyQMoSI+5Ang== @@ -374,26 +337,7 @@ dependencies: "@babel/types" "^7.25.9" -"@babel/helper-builder-binary-assignment-operator-visitor@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.25.9.tgz#f41752fe772a578e67286e6779a68a5a92de1ee9" - integrity sha512-C47lC7LIDCnz0h4vai/tpNOI95tCd5ZT3iBt/DBH5lXKHZsyNQv18yf1wIIg2ntiQNgmAvA+DgZ82iW8Qdym8g== - dependencies: - "@babel/traverse" "^7.25.9" - "@babel/types" "^7.25.9" - -"@babel/helper-compilation-targets@^7.22.6", "@babel/helper-compilation-targets@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/helper-compilation-targets/-/helper-compilation-targets-7.25.9.tgz#55af025ce365be3cdc0c1c1e56c6af617ce88875" - integrity sha512-j9Db8Suy6yV/VHa4qzrj9yZfZxhLWQdVnRlXxmKLYlhWUVB1sB2G5sxuWYXk/whHD9iW76PmNzxZ4UCnTQTVEQ== - dependencies: - "@babel/compat-data" "^7.25.9" - "@babel/helper-validator-option" "^7.25.9" - browserslist "^4.24.0" - lru-cache "^5.1.1" - semver "^6.3.1" - -"@babel/helper-compilation-targets@^7.26.5": +"@babel/helper-compilation-targets@^7.22.6", "@babel/helper-compilation-targets@^7.25.9", "@babel/helper-compilation-targets@^7.26.5": version "7.26.5" resolved "https://registry.yarnpkg.com/@babel/helper-compilation-targets/-/helper-compilation-targets-7.26.5.tgz#75d92bb8d8d51301c0d49e52a65c9a7fe94514d8" integrity sha512-IXuyn5EkouFJscIDuFF5EsiSolseme1s0CZB+QxVugqJLYmKdxI1VfIBOst0SUu4rnk2Z7kqTwmoO1lp3HIfnA== @@ -405,28 +349,28 @@ semver "^6.3.1" "@babel/helper-create-class-features-plugin@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.25.9.tgz#7644147706bb90ff613297d49ed5266bde729f83" - integrity sha512-UTZQMvt0d/rSz6KI+qdu7GQze5TIajwTS++GUozlw8VBJDEOAqSXwm1WvmYEZwqdqSGQshRocPDqrt4HBZB3fQ== + version "7.26.9" + resolved "https://registry.yarnpkg.com/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.26.9.tgz#d6f83e3039547fbb39967e78043cd3c8b7820c71" + integrity sha512-ubbUqCofvxPRurw5L8WTsCLSkQiVpov4Qx0WMA+jUN+nXBK8ADPlJO1grkFw5CWKC5+sZSOfuGMdX1aI1iT9Sg== dependencies: "@babel/helper-annotate-as-pure" "^7.25.9" "@babel/helper-member-expression-to-functions" "^7.25.9" "@babel/helper-optimise-call-expression" "^7.25.9" - "@babel/helper-replace-supers" "^7.25.9" + "@babel/helper-replace-supers" "^7.26.5" "@babel/helper-skip-transparent-expression-wrappers" "^7.25.9" - "@babel/traverse" "^7.25.9" + "@babel/traverse" "^7.26.9" semver "^6.3.1" "@babel/helper-create-regexp-features-plugin@^7.18.6", "@babel/helper-create-regexp-features-plugin@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.25.9.tgz#3e8999db94728ad2b2458d7a470e7770b7764e26" - integrity sha512-ORPNZ3h6ZRkOyAa/SaHU+XsLZr0UQzRwuDQ0cczIA17nAzZ+85G5cVkOJIj7QavLZGSe8QXUmNFxSZzjcZF9bw== + version "7.26.3" + resolved "https://registry.yarnpkg.com/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.26.3.tgz#5169756ecbe1d95f7866b90bb555b022595302a0" + integrity sha512-G7ZRb40uUgdKOQqPLjfD12ZmGA54PzqDFUv2BKImnC9QIfGhIHKvVML0oN8IUiDq4iRqpq74ABpvOaerfWdong== dependencies: "@babel/helper-annotate-as-pure" "^7.25.9" - regexpu-core "^6.1.1" + regexpu-core "^6.2.0" semver "^6.3.1" -"@babel/helper-define-polyfill-provider@^0.6.2", "@babel/helper-define-polyfill-provider@^0.6.3": +"@babel/helper-define-polyfill-provider@^0.6.3": version "0.6.3" resolved "https://registry.yarnpkg.com/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.6.3.tgz#f4f2792fae2ef382074bc2d713522cf24e6ddb21" integrity sha512-HK7Bi+Hj6H+VTHA3ZvBis7V/6hu9QuTrnMXNybfUf2iiuU/N97I8VjB+KbhFF8Rld/Lx5MzoCwPCpPjfK+n8Cg== @@ -469,12 +413,7 @@ dependencies: "@babel/types" "^7.25.9" -"@babel/helper-plugin-utils@^7.0.0", "@babel/helper-plugin-utils@^7.18.6", "@babel/helper-plugin-utils@^7.22.5", "@babel/helper-plugin-utils@^7.25.9", "@babel/helper-plugin-utils@^7.8.0": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/helper-plugin-utils/-/helper-plugin-utils-7.25.9.tgz#9cbdd63a9443a2c92a725cca7ebca12cc8dd9f46" - integrity sha512-kSMlyUVdWe25rEsRGviIgOWnoT/nfABVWlqt9N19/dIPWViAOW2s9wznP5tURbs/IDuNk4gPy3YdYRgH3uxhBw== - -"@babel/helper-plugin-utils@^7.26.5": +"@babel/helper-plugin-utils@^7.0.0", "@babel/helper-plugin-utils@^7.18.6", "@babel/helper-plugin-utils@^7.22.5", "@babel/helper-plugin-utils@^7.25.9", "@babel/helper-plugin-utils@^7.26.5", "@babel/helper-plugin-utils@^7.8.0": version "7.26.5" resolved "https://registry.yarnpkg.com/@babel/helper-plugin-utils/-/helper-plugin-utils-7.26.5.tgz#18580d00c9934117ad719392c4f6585c9333cc35" integrity sha512-RS+jZcRdZdRFzMyr+wcsaqOmld1/EqTghfaBGQQd/WnRdzdlvSZ//kF7U8VQTxf1ynZ4cjUcYgjVGx13ewNPMg== @@ -488,22 +427,14 @@ "@babel/helper-wrap-function" "^7.25.9" "@babel/traverse" "^7.25.9" -"@babel/helper-replace-supers@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/helper-replace-supers/-/helper-replace-supers-7.25.9.tgz#ba447224798c3da3f8713fc272b145e33da6a5c5" - integrity sha512-IiDqTOTBQy0sWyeXyGSC5TBJpGFXBkRynjBeXsvbhQFKj2viwJC76Epz35YLU1fpe/Am6Vppb7W7zM4fPQzLsQ== +"@babel/helper-replace-supers@^7.25.9", "@babel/helper-replace-supers@^7.26.5": + version "7.26.5" + resolved "https://registry.yarnpkg.com/@babel/helper-replace-supers/-/helper-replace-supers-7.26.5.tgz#6cb04e82ae291dae8e72335dfe438b0725f14c8d" + integrity sha512-bJ6iIVdYX1YooY2X7w1q6VITt+LnUILtNk7zT78ykuwStx8BauCzxvFqFaHjOpW1bVnSUM1PN1f0p5P21wHxvg== dependencies: "@babel/helper-member-expression-to-functions" "^7.25.9" "@babel/helper-optimise-call-expression" "^7.25.9" - "@babel/traverse" "^7.25.9" - -"@babel/helper-simple-access@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/helper-simple-access/-/helper-simple-access-7.25.9.tgz#6d51783299884a2c74618d6ef0f86820ec2e7739" - integrity sha512-c6WHXuiaRsJTyHYLJV75t9IqsmTbItYfdj99PnzYGQZkYKvan5/2jKJ7gu31J3/BJ/A18grImSPModuyG/Eo0Q== - dependencies: - "@babel/traverse" "^7.25.9" - "@babel/types" "^7.25.9" + "@babel/traverse" "^7.26.5" "@babel/helper-skip-transparent-expression-wrappers@^7.25.9": version "7.25.9" @@ -537,14 +468,6 @@ "@babel/traverse" "^7.25.9" "@babel/types" "^7.25.9" -"@babel/helpers@^7.26.0": - version "7.26.0" - resolved "https://registry.yarnpkg.com/@babel/helpers/-/helpers-7.26.0.tgz#30e621f1eba5aa45fe6f4868d2e9154d884119a4" - integrity sha512-tbhNuIxNcVb21pInl3ZSjksLCvgdZy9KwJ8brv993QtIVKJBBkYXz4q4ZbAv31GdnC+R90np23L5FbEBlthAEw== - dependencies: - "@babel/template" "^7.25.9" - "@babel/types" "^7.26.0" - "@babel/helpers@^7.26.10": version "7.26.10" resolved "https://registry.yarnpkg.com/@babel/helpers/-/helpers-7.26.10.tgz#6baea3cd62ec2d0c1068778d63cb1314f6637384" @@ -553,13 +476,6 @@ "@babel/template" "^7.26.9" "@babel/types" "^7.26.10" -"@babel/parser@^7.25.9", "@babel/parser@^7.26.0", "@babel/parser@^7.26.2": - version "7.26.2" - resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.26.2.tgz#fd7b6f487cfea09889557ef5d4eeb9ff9a5abd11" - integrity sha512-DWMCZH9WA4Maitz2q21SRKHo9QXZxkDsbNZoVD62gusNtNBBqDg9i7uOhASfTfIGNzW+O+r7+jAlM8dwphcJKQ== - dependencies: - "@babel/types" "^7.26.0" - "@babel/parser@^7.26.10", "@babel/parser@^7.26.9": version "7.26.10" resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.26.10.tgz#e9bdb82f14b97df6569b0b038edd436839c57749" @@ -661,15 +577,6 @@ dependencies: "@babel/helper-plugin-utils" "^7.25.9" -"@babel/plugin-transform-async-generator-functions@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-async-generator-functions/-/plugin-transform-async-generator-functions-7.25.9.tgz#1b18530b077d18a407c494eb3d1d72da505283a2" - integrity sha512-RXV6QAzTBbhDMO9fWwOmwwTuYaiPbggWQ9INdZqAYeSHyG7FzQ+nOZaUUjNwKv9pV3aE4WFqFm1Hnbci5tBCAw== - dependencies: - "@babel/helper-plugin-utils" "^7.25.9" - "@babel/helper-remap-async-to-generator" "^7.25.9" - "@babel/traverse" "^7.25.9" - "@babel/plugin-transform-async-generator-functions@^7.26.8": version "7.26.8" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-async-generator-functions/-/plugin-transform-async-generator-functions-7.26.8.tgz#5e3991135e3b9c6eaaf5eff56d1ae5a11df45ff8" @@ -688,13 +595,6 @@ "@babel/helper-plugin-utils" "^7.25.9" "@babel/helper-remap-async-to-generator" "^7.25.9" -"@babel/plugin-transform-block-scoped-functions@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.25.9.tgz#5700691dbd7abb93de300ca7be94203764fce458" - integrity sha512-toHc9fzab0ZfenFpsyYinOX0J/5dgJVA2fm64xPewu7CoYHWEivIWKxkK2rMi4r3yQqLnVmheMXRdG+k239CgA== - dependencies: - "@babel/helper-plugin-utils" "^7.25.9" - "@babel/plugin-transform-block-scoped-functions@^7.26.5": version "7.26.5" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.26.5.tgz#3dc4405d31ad1cbe45293aa57205a6e3b009d53e" @@ -782,14 +682,6 @@ dependencies: "@babel/helper-plugin-utils" "^7.25.9" -"@babel/plugin-transform-exponentiation-operator@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.25.9.tgz#ece47b70d236c1d99c263a1e22b62dc20a4c8b0f" - integrity sha512-KRhdhlVk2nObA5AYa7QMgTMTVJdfHprfpAk4DjZVtllqRg9qarilstTKEhpVjyt+Npi8ThRyiV8176Am3CodPA== - dependencies: - "@babel/helper-builder-binary-assignment-operator-visitor" "^7.25.9" - "@babel/helper-plugin-utils" "^7.25.9" - "@babel/plugin-transform-exponentiation-operator@^7.26.3": version "7.26.3" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.26.3.tgz#e29f01b6de302c7c2c794277a48f04a9ca7f03bc" @@ -804,14 +696,6 @@ dependencies: "@babel/helper-plugin-utils" "^7.25.9" -"@babel/plugin-transform-for-of@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.25.9.tgz#4bdc7d42a213397905d89f02350c5267866d5755" - integrity sha512-LqHxduHoaGELJl2uhImHwRQudhCM50pT46rIBNvtT/Oql3nqiS3wOwP+5ten7NpYSXrrVLgtZU3DZmPtWZo16A== - dependencies: - "@babel/helper-plugin-utils" "^7.25.9" - "@babel/helper-skip-transparent-expression-wrappers" "^7.25.9" - "@babel/plugin-transform-for-of@^7.26.9": version "7.26.9" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.26.9.tgz#27231f79d5170ef33b5111f07fe5cafeb2c96a56" @@ -865,16 +749,7 @@ "@babel/helper-module-transforms" "^7.25.9" "@babel/helper-plugin-utils" "^7.25.9" -"@babel/plugin-transform-modules-commonjs@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.25.9.tgz#d165c8c569a080baf5467bda88df6425fc060686" - integrity sha512-dwh2Ol1jWwL2MgkCzUSOvfmKElqQcuswAZypBSUsScMXvgdT8Ekq5YA6TtqpTVWH+4903NmboMuH1o9i8Rxlyg== - dependencies: - "@babel/helper-module-transforms" "^7.25.9" - "@babel/helper-plugin-utils" "^7.25.9" - "@babel/helper-simple-access" "^7.25.9" - -"@babel/plugin-transform-modules-commonjs@^7.26.3": +"@babel/plugin-transform-modules-commonjs@^7.25.9", "@babel/plugin-transform-modules-commonjs@^7.26.3": version "7.26.3" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.26.3.tgz#8f011d44b20d02c3de44d8850d971d8497f981fb" integrity sha512-MgR55l4q9KddUDITEzEFYn5ZsGDXMSsU9E+kh7fjRXTIC3RHqfCo8RPRbyReYJh44HQ/yomFkqbOFohXvDCiIQ== @@ -915,13 +790,6 @@ dependencies: "@babel/helper-plugin-utils" "^7.25.9" -"@babel/plugin-transform-nullish-coalescing-operator@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-nullish-coalescing-operator/-/plugin-transform-nullish-coalescing-operator-7.25.9.tgz#bcb1b0d9e948168102d5f7104375ca21c3266949" - integrity sha512-ENfftpLZw5EItALAD4WsY/KUWvhUlZndm5GC7G3evUsVeSJB6p0pBeLQUnRnBCBx7zV0RKQjR9kCuwrsIrjWog== - dependencies: - "@babel/helper-plugin-utils" "^7.25.9" - "@babel/plugin-transform-nullish-coalescing-operator@^7.26.6": version "7.26.6" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-nullish-coalescing-operator/-/plugin-transform-nullish-coalescing-operator-7.26.6.tgz#fbf6b3c92cb509e7b319ee46e3da89c5bedd31fe" @@ -1062,19 +930,7 @@ dependencies: "@babel/helper-plugin-utils" "^7.25.9" -"@babel/plugin-transform-runtime@^7.22.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.25.9.tgz#62723ea3f5b31ffbe676da9d6dae17138ae580ea" - integrity sha512-nZp7GlEl+yULJrClz0SwHPqir3lc0zsPrDHQUcxGspSL7AKrexNSEfTbfqnDNJUO13bgKyfuOLMF8Xqtu8j3YQ== - dependencies: - "@babel/helper-module-imports" "^7.25.9" - "@babel/helper-plugin-utils" "^7.25.9" - babel-plugin-polyfill-corejs2 "^0.4.10" - babel-plugin-polyfill-corejs3 "^0.10.6" - babel-plugin-polyfill-regenerator "^0.6.1" - semver "^6.3.1" - -"@babel/plugin-transform-runtime@^7.25.9": +"@babel/plugin-transform-runtime@^7.22.9", "@babel/plugin-transform-runtime@^7.25.9": version "7.26.10" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.26.10.tgz#6b4504233de8238e7d666c15cde681dc62adff87" integrity sha512-NWaL2qG6HRpONTnj4JvDU6th4jYeZOJgu3QhmFTCihib0ermtOJqktA5BduGm3suhhVe9EMP9c9+mfJ/I9slqw== @@ -1108,13 +964,6 @@ dependencies: "@babel/helper-plugin-utils" "^7.25.9" -"@babel/plugin-transform-template-literals@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.25.9.tgz#6dbd4a24e8fad024df76d1fac6a03cf413f60fe1" - integrity sha512-o97AE4syN71M/lxrCtQByzphAdlYluKPDBzDVzMmfCobUjjhAryZV0AIpRPrxN0eAkxXO6ZLEScmt+PNhj2OTw== - dependencies: - "@babel/helper-plugin-utils" "^7.25.9" - "@babel/plugin-transform-template-literals@^7.26.8": version "7.26.8" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.26.8.tgz#966b15d153a991172a540a69ad5e1845ced990b5" @@ -1122,13 +971,6 @@ dependencies: "@babel/helper-plugin-utils" "^7.26.5" -"@babel/plugin-transform-typeof-symbol@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.25.9.tgz#224ba48a92869ddbf81f9b4a5f1204bbf5a2bc4b" - integrity sha512-v61XqUMiueJROUv66BVIOi0Fv/CUuZuZMl5NkRoCVxLAnMexZ0A3kMe7vvZ0nulxMuMp0Mk6S5hNh48yki08ZA== - dependencies: - "@babel/helper-plugin-utils" "^7.25.9" - "@babel/plugin-transform-typeof-symbol@^7.26.7": version "7.26.7" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.26.7.tgz#d0e33acd9223744c1e857dbd6fa17bd0a3786937" @@ -1137,13 +979,13 @@ "@babel/helper-plugin-utils" "^7.26.5" "@babel/plugin-transform-typescript@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-typescript/-/plugin-transform-typescript-7.25.9.tgz#69267905c2b33c2ac6d8fe765e9dc2ddc9df3849" - integrity sha512-7PbZQZP50tzv2KGGnhh82GSyMB01yKY9scIjf1a+GfZCtInOWqUH5+1EBU4t9fyR5Oykkkc9vFTs4OHrhHXljQ== + version "7.26.8" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-typescript/-/plugin-transform-typescript-7.26.8.tgz#2e9caa870aa102f50d7125240d9dbf91334b0950" + integrity sha512-bME5J9AC8ChwA7aEPJ6zym3w7aObZULHhbNLU0bKUhKsAkylkzUdq+0kdymh9rzi8nlNFl2bmldFBCKNJBUpuw== dependencies: "@babel/helper-annotate-as-pure" "^7.25.9" "@babel/helper-create-class-features-plugin" "^7.25.9" - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-plugin-utils" "^7.26.5" "@babel/helper-skip-transparent-expression-wrappers" "^7.25.9" "@babel/plugin-syntax-typescript" "^7.25.9" @@ -1178,82 +1020,7 @@ "@babel/helper-create-regexp-features-plugin" "^7.25.9" "@babel/helper-plugin-utils" "^7.25.9" -"@babel/preset-env@^7.20.2", "@babel/preset-env@^7.22.9": - version "7.26.0" - resolved "https://registry.yarnpkg.com/@babel/preset-env/-/preset-env-7.26.0.tgz#30e5c6bc1bcc54865bff0c5a30f6d4ccdc7fa8b1" - integrity sha512-H84Fxq0CQJNdPFT2DrfnylZ3cf5K43rGfWK4LJGPpjKHiZlk0/RzwEus3PDDZZg+/Er7lCA03MVacueUuXdzfw== - dependencies: - "@babel/compat-data" "^7.26.0" - "@babel/helper-compilation-targets" "^7.25.9" - "@babel/helper-plugin-utils" "^7.25.9" - "@babel/helper-validator-option" "^7.25.9" - "@babel/plugin-bugfix-firefox-class-in-computed-class-key" "^7.25.9" - "@babel/plugin-bugfix-safari-class-field-initializer-scope" "^7.25.9" - "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression" "^7.25.9" - "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining" "^7.25.9" - "@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly" "^7.25.9" - "@babel/plugin-proposal-private-property-in-object" "7.21.0-placeholder-for-preset-env.2" - "@babel/plugin-syntax-import-assertions" "^7.26.0" - "@babel/plugin-syntax-import-attributes" "^7.26.0" - "@babel/plugin-syntax-unicode-sets-regex" "^7.18.6" - "@babel/plugin-transform-arrow-functions" "^7.25.9" - "@babel/plugin-transform-async-generator-functions" "^7.25.9" - "@babel/plugin-transform-async-to-generator" "^7.25.9" - "@babel/plugin-transform-block-scoped-functions" "^7.25.9" - "@babel/plugin-transform-block-scoping" "^7.25.9" - "@babel/plugin-transform-class-properties" "^7.25.9" - "@babel/plugin-transform-class-static-block" "^7.26.0" - "@babel/plugin-transform-classes" "^7.25.9" - "@babel/plugin-transform-computed-properties" "^7.25.9" - "@babel/plugin-transform-destructuring" "^7.25.9" - "@babel/plugin-transform-dotall-regex" "^7.25.9" - "@babel/plugin-transform-duplicate-keys" "^7.25.9" - "@babel/plugin-transform-duplicate-named-capturing-groups-regex" "^7.25.9" - "@babel/plugin-transform-dynamic-import" "^7.25.9" - "@babel/plugin-transform-exponentiation-operator" "^7.25.9" - "@babel/plugin-transform-export-namespace-from" "^7.25.9" - "@babel/plugin-transform-for-of" "^7.25.9" - "@babel/plugin-transform-function-name" "^7.25.9" - "@babel/plugin-transform-json-strings" "^7.25.9" - "@babel/plugin-transform-literals" "^7.25.9" - "@babel/plugin-transform-logical-assignment-operators" "^7.25.9" - "@babel/plugin-transform-member-expression-literals" "^7.25.9" - "@babel/plugin-transform-modules-amd" "^7.25.9" - "@babel/plugin-transform-modules-commonjs" "^7.25.9" - "@babel/plugin-transform-modules-systemjs" "^7.25.9" - "@babel/plugin-transform-modules-umd" "^7.25.9" - "@babel/plugin-transform-named-capturing-groups-regex" "^7.25.9" - "@babel/plugin-transform-new-target" "^7.25.9" - "@babel/plugin-transform-nullish-coalescing-operator" "^7.25.9" - "@babel/plugin-transform-numeric-separator" "^7.25.9" - "@babel/plugin-transform-object-rest-spread" "^7.25.9" - "@babel/plugin-transform-object-super" "^7.25.9" - "@babel/plugin-transform-optional-catch-binding" "^7.25.9" - "@babel/plugin-transform-optional-chaining" "^7.25.9" - "@babel/plugin-transform-parameters" "^7.25.9" - "@babel/plugin-transform-private-methods" "^7.25.9" - "@babel/plugin-transform-private-property-in-object" "^7.25.9" - "@babel/plugin-transform-property-literals" "^7.25.9" - "@babel/plugin-transform-regenerator" "^7.25.9" - "@babel/plugin-transform-regexp-modifiers" "^7.26.0" - "@babel/plugin-transform-reserved-words" "^7.25.9" - "@babel/plugin-transform-shorthand-properties" "^7.25.9" - "@babel/plugin-transform-spread" "^7.25.9" - "@babel/plugin-transform-sticky-regex" "^7.25.9" - "@babel/plugin-transform-template-literals" "^7.25.9" - "@babel/plugin-transform-typeof-symbol" "^7.25.9" - "@babel/plugin-transform-unicode-escapes" "^7.25.9" - "@babel/plugin-transform-unicode-property-regex" "^7.25.9" - "@babel/plugin-transform-unicode-regex" "^7.25.9" - "@babel/plugin-transform-unicode-sets-regex" "^7.25.9" - "@babel/preset-modules" "0.1.6-no-external-plugins" - babel-plugin-polyfill-corejs2 "^0.4.10" - babel-plugin-polyfill-corejs3 "^0.10.6" - babel-plugin-polyfill-regenerator "^0.6.1" - core-js-compat "^3.38.1" - semver "^6.3.1" - -"@babel/preset-env@^7.25.9": +"@babel/preset-env@^7.20.2", "@babel/preset-env@^7.22.9", "@babel/preset-env@^7.25.9": version "7.26.9" resolved "https://registry.yarnpkg.com/@babel/preset-env/-/preset-env-7.26.9.tgz#2ec64e903d0efe743699f77a10bdf7955c2123c3" integrity sha512-vX3qPGE8sEKEAZCWk05k3cpTAE3/nOYca++JA+Rd0z2NCNzabmYvEiSShKzm10zdquOIAVXsy2Ei/DTW34KlKQ== @@ -1337,19 +1104,7 @@ "@babel/types" "^7.4.4" esutils "^2.0.2" -"@babel/preset-react@^7.18.6", "@babel/preset-react@^7.22.5": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/preset-react/-/preset-react-7.25.9.tgz#5f473035dc2094bcfdbc7392d0766bd42dce173e" - integrity sha512-D3to0uSPiWE7rBrdIICCd0tJSIGpLaaGptna2+w7Pft5xMqLpA1sz99DK5TZ1TjGbdQ/VI1eCSZ06dv3lT4JOw== - dependencies: - "@babel/helper-plugin-utils" "^7.25.9" - "@babel/helper-validator-option" "^7.25.9" - "@babel/plugin-transform-react-display-name" "^7.25.9" - "@babel/plugin-transform-react-jsx" "^7.25.9" - "@babel/plugin-transform-react-jsx-development" "^7.25.9" - "@babel/plugin-transform-react-pure-annotations" "^7.25.9" - -"@babel/preset-react@^7.25.9": +"@babel/preset-react@^7.18.6", "@babel/preset-react@^7.22.5", "@babel/preset-react@^7.25.9": version "7.26.3" resolved "https://registry.yarnpkg.com/@babel/preset-react/-/preset-react-7.26.3.tgz#7c5e028d623b4683c1f83a0bd4713b9100560caa" integrity sha512-Nl03d6T9ky516DGK2YMxrTqvnpUW63TnJMOMonj+Zae0JiPC5BC9xPMSL6L8fiSpA5vP88qfygavVQvnLp+6Cw== @@ -1372,15 +1127,7 @@ "@babel/plugin-transform-modules-commonjs" "^7.25.9" "@babel/plugin-transform-typescript" "^7.25.9" -"@babel/runtime-corejs3@^7.22.6": - version "7.26.0" - resolved "https://registry.yarnpkg.com/@babel/runtime-corejs3/-/runtime-corejs3-7.26.0.tgz#5af6bed16073eb4a0191233d61e158a5c768c430" - integrity sha512-YXHu5lN8kJCb1LOb9PgV6pvak43X2h4HvRApcN5SdWeaItQOzfn1hgP6jasD6KWQyJDBxrVmA9o9OivlnNJK/w== - dependencies: - core-js-pure "^3.30.2" - regenerator-runtime "^0.14.0" - -"@babel/runtime-corejs3@^7.25.9": +"@babel/runtime-corejs3@^7.22.6", "@babel/runtime-corejs3@^7.25.9": version "7.26.10" resolved "https://registry.yarnpkg.com/@babel/runtime-corejs3/-/runtime-corejs3-7.26.10.tgz#5a3185ca2813f8de8ae68622572086edf5cf51f2" integrity sha512-uITFQYO68pMEYR46AHgQoyBg7KPPJDAbGn4jUTIRgCFJIp88MIBUianVOplhZDEec07bp9zIyr4Kp0FCyQzmWg== @@ -1388,30 +1135,14 @@ core-js-pure "^3.30.2" regenerator-runtime "^0.14.0" -"@babel/runtime@^7.1.2", "@babel/runtime@^7.10.3", "@babel/runtime@^7.12.13", "@babel/runtime@^7.12.5", "@babel/runtime@^7.18.3", "@babel/runtime@^7.22.6", "@babel/runtime@^7.26.0", "@babel/runtime@^7.5.5", "@babel/runtime@^7.8.4", "@babel/runtime@^7.8.7": - version "7.26.0" - resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.26.0.tgz#8600c2f595f277c60815256418b85356a65173c1" - integrity sha512-FDSOghenHTiToteC/QRlv2q3DhPZ/oOXTBoirfWNx1Cx3TMVcGWQtMMmQcSvb/JjpNeGzx8Pq/b4fKEJuWm1sw== - dependencies: - regenerator-runtime "^0.14.0" - -"@babel/runtime@^7.25.9": +"@babel/runtime@^7.1.2", "@babel/runtime@^7.10.3", "@babel/runtime@^7.12.13", "@babel/runtime@^7.12.5", "@babel/runtime@^7.18.3", "@babel/runtime@^7.22.6", "@babel/runtime@^7.25.9", "@babel/runtime@^7.26.0", "@babel/runtime@^7.5.5", "@babel/runtime@^7.8.4", "@babel/runtime@^7.8.7": version "7.26.10" resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.26.10.tgz#a07b4d8fa27af131a633d7b3524db803eb4764c2" integrity sha512-2WJMeRQPHKSPemqk/awGrAiuFfzBmOIPXKizAsVhWH9YJqLZ0H+HS4c8loHGgW6utJ3E/ejXQUsiGaQy2NZ9Fw== dependencies: regenerator-runtime "^0.14.0" -"@babel/template@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.25.9.tgz#ecb62d81a8a6f5dc5fe8abfc3901fc52ddf15016" - integrity sha512-9DGttpmPvIxBb/2uwpVo3dqJ+O6RooAFOS+lB+xDqoE2PVCE8nfoHMdZLpfCQRLwvohzXISPZcgxt80xLfsuwg== - dependencies: - "@babel/code-frame" "^7.25.9" - "@babel/parser" "^7.25.9" - "@babel/types" "^7.25.9" - -"@babel/template@^7.26.9": +"@babel/template@^7.25.9", "@babel/template@^7.26.9": version "7.26.9" resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.26.9.tgz#4577ad3ddf43d194528cff4e1fa6b232fa609bb2" integrity sha512-qyRplbeIpNZhmzOysF/wFMuP9sctmh2cFzRAZOn1YapxBsE1i9bJIY586R/WBLfLcmcBlM8ROBiQURnnNy+zfA== @@ -1420,20 +1151,7 @@ "@babel/parser" "^7.26.9" "@babel/types" "^7.26.9" -"@babel/traverse@^7.22.8", "@babel/traverse@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.25.9.tgz#a50f8fe49e7f69f53de5bea7e413cd35c5e13c84" - integrity sha512-ZCuvfwOwlz/bawvAuvcj8rrithP2/N55Tzz342AkTvq4qaWbGfmCk/tKhNaV2cthijKrPAA8SRJV5WWe7IBMJw== - dependencies: - "@babel/code-frame" "^7.25.9" - "@babel/generator" "^7.25.9" - "@babel/parser" "^7.25.9" - "@babel/template" "^7.25.9" - "@babel/types" "^7.25.9" - debug "^4.3.1" - globals "^11.1.0" - -"@babel/traverse@^7.26.10", "@babel/traverse@^7.26.8": +"@babel/traverse@^7.22.8", "@babel/traverse@^7.25.9", "@babel/traverse@^7.26.10", "@babel/traverse@^7.26.5", "@babel/traverse@^7.26.8", "@babel/traverse@^7.26.9": version "7.26.10" resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.26.10.tgz#43cca33d76005dbaa93024fae536cc1946a4c380" integrity sha512-k8NuDrxr0WrPH5Aupqb2LCVURP/S0vBEn5mK6iH+GIYob66U5EtoZvcdudR2jQ4cmTwhEwW1DLB+Yyas9zjF6A== @@ -1446,15 +1164,7 @@ debug "^4.3.1" globals "^11.1.0" -"@babel/types@^7.21.3", "@babel/types@^7.25.9", "@babel/types@^7.26.0", "@babel/types@^7.4.4": - version "7.26.0" - resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.26.0.tgz#deabd08d6b753bc8e0f198f8709fb575e31774ff" - integrity sha512-Z/yiTPj+lDVnF7lWeKCIJzaIkI0vYO87dMpZ4bg4TDrFe4XXLFWL1TbXU27gBP3QccxV9mZICCrnjnYlJjXHOA== - dependencies: - "@babel/helper-string-parser" "^7.25.9" - "@babel/helper-validator-identifier" "^7.25.9" - -"@babel/types@^7.26.10", "@babel/types@^7.26.9": +"@babel/types@^7.21.3", "@babel/types@^7.25.9", "@babel/types@^7.26.10", "@babel/types@^7.26.9", "@babel/types@^7.4.4": version "7.26.10" resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.26.10.tgz#396382f6335bd4feb65741eacfc808218f859259" integrity sha512-emqcG3vHrpxUKTrxcblR36dcrcoRDvKmnL/dCL6ZsHaShW80qxCAcNhzQZrpeM765VzEos+xOi4s+r4IXzTwdQ== @@ -1794,20 +1504,20 @@ resolved "https://registry.yarnpkg.com/@discoveryjs/json-ext/-/json-ext-0.5.7.tgz#1d572bfbbe14b7704e0ba0f39b74815b84870d70" integrity sha512-dBVuXR082gk3jsFp7Rd/JI4kytwGHecnCoTtXFb7DB6CNHp4rg5k1bhg0nWdLGLnOV71lmDzGQaLMy8iPLY0pw== -"@docsearch/css@3.8.0": - version "3.8.0" - resolved "https://registry.yarnpkg.com/@docsearch/css/-/css-3.8.0.tgz#c70a1a326249d878ab7c630d7a908c6769a38db3" - integrity sha512-pieeipSOW4sQ0+bE5UFC51AOZp9NGxg89wAlZ1BAQFaiRAGK1IKUaPQ0UGZeNctJXyqZ1UvBtOQh2HH+U5GtmA== +"@docsearch/css@3.9.0": + version "3.9.0" + resolved "https://registry.yarnpkg.com/@docsearch/css/-/css-3.9.0.tgz#3bc29c96bf024350d73b0cfb7c2a7b71bf251cd5" + integrity sha512-cQbnVbq0rrBwNAKegIac/t6a8nWoUAn8frnkLFW6YARaRmAQr5/Eoe6Ln2fqkUCZ40KpdrKbpSAmgrkviOxuWA== "@docsearch/react@^3.5.2": - version "3.8.0" - resolved "https://registry.yarnpkg.com/@docsearch/react/-/react-3.8.0.tgz#c32165e34fadea8a0283c8b61cd73e6e1844797d" - integrity sha512-WnFK720+iwTVt94CxY3u+FgX6exb3BfN5kE9xUY6uuAH/9W/UFboBZFLlrw/zxFRHoHZCOXRtOylsXF+6LHI+Q== + version "3.9.0" + resolved "https://registry.yarnpkg.com/@docsearch/react/-/react-3.9.0.tgz#d0842b700c3ee26696786f3c8ae9f10c1a3f0db3" + integrity sha512-mb5FOZYZIkRQ6s/NWnM98k879vu5pscWqTLubLFBO87igYYT4VzVazh4h5o/zCvTIZgEt3PvsCOMOswOUo9yHQ== dependencies: - "@algolia/autocomplete-core" "1.17.7" - "@algolia/autocomplete-preset-algolia" "1.17.7" - "@docsearch/css" "3.8.0" - algoliasearch "^5.12.0" + "@algolia/autocomplete-core" "1.17.9" + "@algolia/autocomplete-preset-algolia" "1.17.9" + "@docsearch/css" "3.9.0" + algoliasearch "^5.14.2" "@docusaurus/babel@3.7.0": version "3.7.0" @@ -2471,10 +2181,10 @@ source-map "^0.5.7" stylis "4.2.0" -"@emotion/cache@^11.13.1", "@emotion/cache@^11.13.5": - version "11.13.5" - resolved "https://registry.yarnpkg.com/@emotion/cache/-/cache-11.13.5.tgz#e78dad0489e1ed7572507ba8ed9d2130529e4266" - integrity sha512-Z3xbtJ+UcK76eWkagZ1onvn/wAVb1GOMuR15s30Fm2wrMgC7jzpnO2JZXr4eujTTqoQFUrZIw/rT0c6Zzjca1g== +"@emotion/cache@^11.13.5", "@emotion/cache@^11.14.0": + version "11.14.0" + resolved "https://registry.yarnpkg.com/@emotion/cache/-/cache-11.14.0.tgz#ee44b26986eeb93c8be82bb92f1f7a9b21b2ed76" + integrity sha512-L/B1lc/TViYk4DcpGxtAVbx0ZyiKM5ktoIyafGkH6zg/tj+mA+NE//aPYKG0k8kCHSHVJrpLpcAlOBEXQ3SavA== dependencies: "@emotion/memoize" "^0.9.0" "@emotion/sheet" "^1.4.0" @@ -2500,20 +2210,20 @@ integrity sha512-30FAj7/EoJ5mwVPOWhAyCX+FPfMDrVecJAM+Iw9NRoSl4BBAQeqj4cApHHUXOVvIPgLVDsCFoz/hGD+5QQD1GQ== "@emotion/react@^11.13.3": - version "11.13.5" - resolved "https://registry.yarnpkg.com/@emotion/react/-/react-11.13.5.tgz#fc818ff5b13424f86501ba4d0740f343ae20b8d9" - integrity sha512-6zeCUxUH+EPF1s+YF/2hPVODeV/7V07YU5x+2tfuRL8MdW6rv5vb2+CBEGTGwBdux0OIERcOS+RzxeK80k2DsQ== + version "11.14.0" + resolved "https://registry.yarnpkg.com/@emotion/react/-/react-11.14.0.tgz#cfaae35ebc67dd9ef4ea2e9acc6cd29e157dd05d" + integrity sha512-O000MLDBDdk/EohJPFUqvnp4qnHeYkVP5B0xEG0D/L7cOKP9kefu2DXn8dj74cQfsEzUqh+sr1RzFqiL1o+PpA== dependencies: "@babel/runtime" "^7.18.3" "@emotion/babel-plugin" "^11.13.5" - "@emotion/cache" "^11.13.5" + "@emotion/cache" "^11.14.0" "@emotion/serialize" "^1.3.3" - "@emotion/use-insertion-effect-with-fallbacks" "^1.1.0" + "@emotion/use-insertion-effect-with-fallbacks" "^1.2.0" "@emotion/utils" "^1.4.2" "@emotion/weak-memoize" "^0.4.0" hoist-non-react-statics "^3.3.1" -"@emotion/serialize@^1.3.2", "@emotion/serialize@^1.3.3": +"@emotion/serialize@^1.3.3": version "1.3.3" resolved "https://registry.yarnpkg.com/@emotion/serialize/-/serialize-1.3.3.tgz#d291531005f17d704d0463a032fe679f376509e8" integrity sha512-EISGqt7sSNWHGI76hC7x1CksiXPahbxEOrC5RjmFRJTqLyEK9/9hZvBbiYn70dw4wuwMKiEMCUlR6ZXTSWQqxA== @@ -2530,15 +2240,15 @@ integrity sha512-fTBW9/8r2w3dXWYM4HCB1Rdp8NLibOw2+XELH5m5+AkWiL/KqYX6dc0kKYlaYyKjrQ6ds33MCdMPEwgs2z1rqg== "@emotion/styled@^11.13.0": - version "11.13.5" - resolved "https://registry.yarnpkg.com/@emotion/styled/-/styled-11.13.5.tgz#0fa6602227414c5e42cf267506e3c35bae655df9" - integrity sha512-gnOQ+nGLPvDXgIx119JqGalys64lhMdnNQA9TMxhDA4K0Hq5+++OE20Zs5GxiCV9r814xQ2K5WmtofSpHVW6BQ== + version "11.14.0" + resolved "https://registry.yarnpkg.com/@emotion/styled/-/styled-11.14.0.tgz#f47ca7219b1a295186d7661583376fcea95f0ff3" + integrity sha512-XxfOnXFffatap2IyCeJyNov3kiDQWoR08gPUQxvbL7fxKryGBKUZUkG6Hz48DZwVrJSVh9sJboyV1Ds4OW6SgA== dependencies: "@babel/runtime" "^7.18.3" "@emotion/babel-plugin" "^11.13.5" "@emotion/is-prop-valid" "^1.3.0" "@emotion/serialize" "^1.3.3" - "@emotion/use-insertion-effect-with-fallbacks" "^1.1.0" + "@emotion/use-insertion-effect-with-fallbacks" "^1.2.0" "@emotion/utils" "^1.4.2" "@emotion/unitless@^0.10.0": @@ -2546,10 +2256,10 @@ resolved "https://registry.yarnpkg.com/@emotion/unitless/-/unitless-0.10.0.tgz#2af2f7c7e5150f497bdabd848ce7b218a27cf745" integrity sha512-dFoMUuQA20zvtVTuxZww6OHoJYgrzfKM1t52mVySDJnMSEa08ruEvdYQbhvyu6soU+NeLVd3yKfTfT0NeV6qGg== -"@emotion/use-insertion-effect-with-fallbacks@^1.1.0": - version "1.1.0" - resolved "https://registry.yarnpkg.com/@emotion/use-insertion-effect-with-fallbacks/-/use-insertion-effect-with-fallbacks-1.1.0.tgz#1a818a0b2c481efba0cf34e5ab1e0cb2dcb9dfaf" - integrity sha512-+wBOcIV5snwGgI2ya3u99D7/FJquOIniQT1IKyDsBmEgwvpxMNeS65Oib7OnE2d2aY+3BU4OiH+0Wchf8yk3Hw== +"@emotion/use-insertion-effect-with-fallbacks@^1.2.0": + version "1.2.0" + resolved "https://registry.yarnpkg.com/@emotion/use-insertion-effect-with-fallbacks/-/use-insertion-effect-with-fallbacks-1.2.0.tgz#8a8cb77b590e09affb960f4ff1e9a89e532738bf" + integrity sha512-yJMtVdH59sxi/aVJBpk9FQq+OR8ll5GT8oWd57UpeaKEVGab41JWaCFA7FRLoMLloOZF/c/wsPoe+bfGmRKgDg== "@emotion/utils@^1.4.2": version "1.4.2" @@ -2562,19 +2272,19 @@ integrity sha512-snKqtPW01tN0ui7yu9rGv69aJXr/a/Ywvl11sUjNtEcRc+ng/mQriFL0wLXMef74iHa/EkftbDzU9F8iFbH+zg== "@floating-ui/core@^1.6.0": - version "1.6.8" - resolved "https://registry.yarnpkg.com/@floating-ui/core/-/core-1.6.8.tgz#aa43561be075815879305965020f492cdb43da12" - integrity sha512-7XJ9cPU+yI2QeLS+FCSlqNFZJq8arvswefkZrYI1yQBbftw6FyrZOxYSh+9S7z7TpeWlRt9zJ5IhM1WIL334jA== + version "1.6.9" + resolved "https://registry.yarnpkg.com/@floating-ui/core/-/core-1.6.9.tgz#64d1da251433019dafa091de9b2886ff35ec14e6" + integrity sha512-uMXCuQ3BItDUbAMhIXw7UPXRfAlOAvZzdK9BWpE60MCn+Svt3aLn9jsPTi/WNGlRUu2uI0v5S7JiIUsbsvh3fw== dependencies: - "@floating-ui/utils" "^0.2.8" + "@floating-ui/utils" "^0.2.9" "@floating-ui/dom@^1.0.0": - version "1.6.12" - resolved "https://registry.yarnpkg.com/@floating-ui/dom/-/dom-1.6.12.tgz#6333dcb5a8ead3b2bf82f33d6bc410e95f54e556" - integrity sha512-NP83c0HjokcGVEMeoStg317VD9W7eDlGK7457dMBANbKA6GJZdc7rjujdgqzTaz93jkGgc5P/jeWbaCHnMNc+w== + version "1.6.13" + resolved "https://registry.yarnpkg.com/@floating-ui/dom/-/dom-1.6.13.tgz#a8a938532aea27a95121ec16e667a7cbe8c59e34" + integrity sha512-umqzocjDgNRGTuO7Q8CU32dkHkECqI8ZdMZ5Swb6QAM0t5rnlrN3lGo1hdpscRd3WS8T6DKYK4ephgIH9iRh3w== dependencies: "@floating-ui/core" "^1.6.0" - "@floating-ui/utils" "^0.2.8" + "@floating-ui/utils" "^0.2.9" "@floating-ui/react-dom@^2.1.1": version "2.1.2" @@ -2583,10 +2293,10 @@ dependencies: "@floating-ui/dom" "^1.0.0" -"@floating-ui/utils@^0.2.8": - version "0.2.8" - resolved "https://registry.yarnpkg.com/@floating-ui/utils/-/utils-0.2.8.tgz#21a907684723bbbaa5f0974cf7730bd797eb8e62" - integrity sha512-kym7SodPp8/wloecOpcmSnWJsK7M0E5Wg8UcFA+uO4B9s5d0ywXOEro/8HM9x0rW+TljRzul/14UYz3TleT3ig== +"@floating-ui/utils@^0.2.9": + version "0.2.9" + resolved "https://registry.yarnpkg.com/@floating-ui/utils/-/utils-0.2.9.tgz#50dea3616bc8191fb8e112283b49eaff03e78429" + integrity sha512-MDWhGtE+eHw5JW7lq4qhc5yRLS11ERl1c7Z6Xd0a58DozHES6EnNNwUWbMiG4J9Cgj053Bhk8zvlhFYKVhULwg== "@hapi/hoek@^9.0.0", "@hapi/hoek@^9.3.0": version "9.3.0" @@ -2620,9 +2330,9 @@ chalk "^4.0.0" "@jridgewell/gen-mapping@^0.3.5": - version "0.3.5" - resolved "https://registry.yarnpkg.com/@jridgewell/gen-mapping/-/gen-mapping-0.3.5.tgz#dcce6aff74bdf6dad1a95802b69b04a2fcb1fb36" - integrity sha512-IzL8ZoEDIBRWEzlCcRhOaCupYyN5gdIK+Q6fbFdPDg6HqX6jpkItn7DFIpW9LQzXG6Df9sA7+OKnq0qlz/GaQg== + version "0.3.8" + resolved "https://registry.yarnpkg.com/@jridgewell/gen-mapping/-/gen-mapping-0.3.8.tgz#4f0e06362e01362f823d348f1872b08f666d8142" + integrity sha512-imAbBGkb+ebQyxKgzv5Hu2nmROxoDOXHh80evxdoXNOrvAnVx7zimzc1Oo5h9RlfV4vPXaE2iM5pOFbvOCClWA== dependencies: "@jridgewell/set-array" "^1.2.1" "@jridgewell/sourcemap-codec" "^1.4.10" @@ -2651,7 +2361,7 @@ resolved "https://registry.yarnpkg.com/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.0.tgz#3188bcb273a414b0d215fd22a58540b989b9409a" integrity sha512-gv3ZRaISU3fjPAgNsriBRqGWQL6quFx04YMPW/zD8XMLsU32mhCCbfbO6KZFLjvYpCZ8zyDEgqsgf+PwPaM7GQ== -"@jridgewell/trace-mapping@^0.3.18", "@jridgewell/trace-mapping@^0.3.20", "@jridgewell/trace-mapping@^0.3.24", "@jridgewell/trace-mapping@^0.3.25": +"@jridgewell/trace-mapping@^0.3.18", "@jridgewell/trace-mapping@^0.3.24", "@jridgewell/trace-mapping@^0.3.25": version "0.3.25" resolved "https://registry.yarnpkg.com/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz#15f190e98895f3fc23276ee14bc76b675c2e50f0" integrity sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ== @@ -2701,113 +2411,113 @@ dependencies: "@types/mdx" "^2.0.0" -"@mui/base@5.0.0-beta.62": - version "5.0.0-beta.62" - resolved "https://registry.yarnpkg.com/@mui/base/-/base-5.0.0-beta.62.tgz#d5cec750691f76f997b3678a2053d99464155caa" - integrity sha512-TzJLCNlrMkSU4bTCdTT+TVUiGx4sjZLhH673UV6YN+rNNP8wJpkWfRSvjDB5HcbH2T0lUamnz643ZnV+8IiMjw== +"@mui/base@5.0.0-beta.69": + version "5.0.0-beta.69" + resolved "https://registry.yarnpkg.com/@mui/base/-/base-5.0.0-beta.69.tgz#fc3635818c6a9fb954b1ee00870109e7e266149d" + integrity sha512-r2YyGUXpZxj8rLAlbjp1x2BnMERTZ/dMqd9cClKj2OJ7ALAuiv/9X5E9eHfRc9o/dGRuLSMq/WTjREktJVjxVA== dependencies: "@babel/runtime" "^7.26.0" "@floating-ui/react-dom" "^2.1.1" - "@mui/types" "^7.2.19" - "@mui/utils" "^6.1.8" + "@mui/types" "^7.2.21" + "@mui/utils" "^6.4.1" "@popperjs/core" "^2.11.8" clsx "^2.1.1" prop-types "^15.8.1" -"@mui/core-downloads-tracker@^6.1.8": - version "6.1.8" - resolved "https://registry.yarnpkg.com/@mui/core-downloads-tracker/-/core-downloads-tracker-6.1.8.tgz#4fccd4eb9960ffa17ebf0533a6e9cfd2787cba57" - integrity sha512-TGAvzwUg9hybDacwfIGFjI2bXYXrIqky+vMfaeay8rvT56/PNAlvIDUJ54kpT5KRc9AWAihOvtDI7/LJOThOmQ== +"@mui/core-downloads-tracker@^6.4.7": + version "6.4.7" + resolved "https://registry.yarnpkg.com/@mui/core-downloads-tracker/-/core-downloads-tracker-6.4.7.tgz#df62091560024a6412b0f35dcd584f9dba70dced" + integrity sha512-XjJrKFNt9zAKvcnoIIBquXyFyhfrHYuttqMsoDS7lM7VwufYG4fAPw4kINjBFg++fqXM2BNAuWR9J7XVIuKIKg== "@mui/icons-material@^6.1.1": - version "6.1.8" - resolved "https://registry.yarnpkg.com/@mui/icons-material/-/icons-material-6.1.8.tgz#696fe487c16a6caba5b10cf72c5954dce4d33401" - integrity sha512-6frsXcf1TcJKWevWwRup6V4L8lzI33cbHcAjT83YLgKw0vYRZKY0kjMI9fhrJZdRWXgFFgKKvEv3GjoxbqFF7A== + version "6.4.7" + resolved "https://registry.yarnpkg.com/@mui/icons-material/-/icons-material-6.4.7.tgz#078406b61c7d17230b8633643dbb458f89e02059" + integrity sha512-Rk8cs9ufQoLBw582Rdqq7fnSXXZTqhYRbpe1Y5SAz9lJKZP3CIdrj0PfG8HJLGw1hrsHFN/rkkm70IDzhJsG1g== dependencies: "@babel/runtime" "^7.26.0" "@mui/lab@^6.0.0-beta.10": - version "6.0.0-beta.16" - resolved "https://registry.yarnpkg.com/@mui/lab/-/lab-6.0.0-beta.16.tgz#2f1a17a7c679d399611da586193bf291e103741f" - integrity sha512-YFeKREMMCiUhp4dGXd6Y/7N3BLepys9bM6xi4aF0WTZOvfl1ksDXPzuXPGiiiIuMgQFJeyN5iUnS1iPu3wH+kQ== + version "6.0.0-beta.30" + resolved "https://registry.yarnpkg.com/@mui/lab/-/lab-6.0.0-beta.30.tgz#650973b4d04965f18b3d3390e2dd90e772a4f461" + integrity sha512-ayDYkzTlkm5cnDGa10bvuFygX+2b9Hm1T4QZYMqV8+nSx3frKE0TLAbE7/qQ4vInOO5E4aOkHVBwZjyO+UbMTA== dependencies: "@babel/runtime" "^7.26.0" - "@mui/base" "5.0.0-beta.62" - "@mui/system" "^6.1.8" - "@mui/types" "^7.2.19" - "@mui/utils" "^6.1.8" + "@mui/base" "5.0.0-beta.69" + "@mui/system" "^6.4.7" + "@mui/types" "^7.2.21" + "@mui/utils" "^6.4.6" clsx "^2.1.1" prop-types "^15.8.1" "@mui/material@^6.1.1": - version "6.1.8" - resolved "https://registry.yarnpkg.com/@mui/material/-/material-6.1.8.tgz#67cc5796aaa3f81ac94783fd79c85c80062e4a99" - integrity sha512-QZdQFnXct+7NXIzHgT3qt+sQiO7HYGZU2vymP9Xl9tUMXEOA/S1mZMMb7+WGZrk5TzNlU/kP/85K0da5V1jXoQ== + version "6.4.7" + resolved "https://registry.yarnpkg.com/@mui/material/-/material-6.4.7.tgz#887f1efe4a1c244ef7aeebb7d95a6f061f50b89b" + integrity sha512-K65StXUeGAtFJ4ikvHKtmDCO5Ab7g0FZUu2J5VpoKD+O6Y3CjLYzRi+TMlI3kaL4CL158+FccMoOd/eaddmeRQ== dependencies: "@babel/runtime" "^7.26.0" - "@mui/core-downloads-tracker" "^6.1.8" - "@mui/system" "^6.1.8" - "@mui/types" "^7.2.19" - "@mui/utils" "^6.1.8" + "@mui/core-downloads-tracker" "^6.4.7" + "@mui/system" "^6.4.7" + "@mui/types" "^7.2.21" + "@mui/utils" "^6.4.6" "@popperjs/core" "^2.11.8" - "@types/react-transition-group" "^4.4.11" + "@types/react-transition-group" "^4.4.12" clsx "^2.1.1" csstype "^3.1.3" prop-types "^15.8.1" - react-is "^18.3.1" + react-is "^19.0.0" react-transition-group "^4.4.5" -"@mui/private-theming@^6.1.8": - version "6.1.8" - resolved "https://registry.yarnpkg.com/@mui/private-theming/-/private-theming-6.1.8.tgz#3ac194689bd4a4bb79816508e9fcc00708753d12" - integrity sha512-TuKl7msynCNCVvhX3c0ef1sF0Qb3VHcPs8XOGB/8bdOGBr/ynmIG1yTMjZeiFQXk8yN9fzK/FDEKMFxILNn3wg== +"@mui/private-theming@^6.4.6": + version "6.4.6" + resolved "https://registry.yarnpkg.com/@mui/private-theming/-/private-theming-6.4.6.tgz#77c0b150be94c061b34b34ce00eb60cdfb92837f" + integrity sha512-T5FxdPzCELuOrhpA2g4Pi6241HAxRwZudzAuL9vBvniuB5YU82HCmrARw32AuCiyTfWzbrYGGpZ4zyeqqp9RvQ== dependencies: "@babel/runtime" "^7.26.0" - "@mui/utils" "^6.1.8" + "@mui/utils" "^6.4.6" prop-types "^15.8.1" -"@mui/styled-engine@^6.1.8": - version "6.1.8" - resolved "https://registry.yarnpkg.com/@mui/styled-engine/-/styled-engine-6.1.8.tgz#8e99ff036d171b811eff0f0421c7aae8b0b9ac35" - integrity sha512-ZvEoT0U2nPLSLI+B4by4cVjaZnPT2f20f4JUPkyHdwLv65ZzuoHiTlwyhqX1Ch63p8bcJzKTHQVGisEoMK6PGA== +"@mui/styled-engine@^6.4.6": + version "6.4.6" + resolved "https://registry.yarnpkg.com/@mui/styled-engine/-/styled-engine-6.4.6.tgz#cd0783adbb066a349e1995f0e1a7b8c3c2d59738" + integrity sha512-vSWYc9ZLX46be5gP+FCzWVn5rvDr4cXC5JBZwSIkYk9xbC7GeV+0kCvB8Q6XLFQJy+a62bbqtmdwS4Ghi9NBlQ== dependencies: "@babel/runtime" "^7.26.0" - "@emotion/cache" "^11.13.1" - "@emotion/serialize" "^1.3.2" + "@emotion/cache" "^11.13.5" + "@emotion/serialize" "^1.3.3" "@emotion/sheet" "^1.4.0" csstype "^3.1.3" prop-types "^15.8.1" -"@mui/system@^6.1.8": - version "6.1.8" - resolved "https://registry.yarnpkg.com/@mui/system/-/system-6.1.8.tgz#8ec7eabbb40a6888ebc526fd919b8f3a9d8eeece" - integrity sha512-i1kLfQoWxzFpXTBQIuPoA3xKnAnP3en4I2T8xIolovSolGQX5k8vGjw1JaydQS40td++cFsgCdEU458HDNTGUA== +"@mui/system@^6.4.7": + version "6.4.7" + resolved "https://registry.yarnpkg.com/@mui/system/-/system-6.4.7.tgz#a4a8e541a2f1efef1c85a338723aa2f2d0a31e8e" + integrity sha512-7wwc4++Ak6tGIooEVA9AY7FhH2p9fvBMORT4vNLMAysH3Yus/9B9RYMbrn3ANgsOyvT3Z7nE+SP8/+3FimQmcg== dependencies: "@babel/runtime" "^7.26.0" - "@mui/private-theming" "^6.1.8" - "@mui/styled-engine" "^6.1.8" - "@mui/types" "^7.2.19" - "@mui/utils" "^6.1.8" + "@mui/private-theming" "^6.4.6" + "@mui/styled-engine" "^6.4.6" + "@mui/types" "^7.2.21" + "@mui/utils" "^6.4.6" clsx "^2.1.1" csstype "^3.1.3" prop-types "^15.8.1" -"@mui/types@^7.2.19": - version "7.2.19" - resolved "https://registry.yarnpkg.com/@mui/types/-/types-7.2.19.tgz#c941954dd24393fdce5f07830d44440cf4ab6c80" - integrity sha512-6XpZEM/Q3epK9RN8ENoXuygnqUQxE+siN/6rGRi2iwJPgBUR25mphYQ9ZI87plGh58YoZ5pp40bFvKYOCDJ3tA== +"@mui/types@^7.2.21": + version "7.2.21" + resolved "https://registry.yarnpkg.com/@mui/types/-/types-7.2.21.tgz#63f50874eda8e4a021a69aaa8ba9597369befda2" + integrity sha512-6HstngiUxNqLU+/DPqlUJDIPbzUBxIVHb1MmXP0eTWDIROiCR2viugXpEif0PPe2mLqqakPzzRClWAnK+8UJww== -"@mui/utils@^6.1.8": - version "6.1.8" - resolved "https://registry.yarnpkg.com/@mui/utils/-/utils-6.1.8.tgz#ae07cad4f6099eeb43dbc71267b4a96304ba3982" - integrity sha512-O2DWb1kz8hiANVcR7Z4gOB3SvPPsSQGUmStpyBDzde6dJIfBzgV9PbEQOBZd3EBsd1pB+Uv1z5LAJAbymmawrA== +"@mui/utils@^6.4.1", "@mui/utils@^6.4.6": + version "6.4.6" + resolved "https://registry.yarnpkg.com/@mui/utils/-/utils-6.4.6.tgz#307828bee501d30ed5cd1e339ca28c9efcc4e3f9" + integrity sha512-43nZeE1pJF2anGafNydUcYFPtHwAqiBiauRtaMvurdrZI3YrUjHkAu43RBsxef7OFtJMXGiHFvq43kb7lig0sA== dependencies: "@babel/runtime" "^7.26.0" - "@mui/types" "^7.2.19" - "@types/prop-types" "^15.7.13" + "@mui/types" "^7.2.21" + "@types/prop-types" "^15.7.14" clsx "^2.1.1" prop-types "^15.8.1" - react-is "^18.3.1" + react-is "^19.0.0" "@nodelib/fs.scandir@2.1.5": version "2.1.5" @@ -2830,94 +2540,94 @@ "@nodelib/fs.scandir" "2.1.5" fastq "^1.6.0" -"@parcel/watcher-android-arm64@2.5.0": - version "2.5.0" - resolved "https://registry.yarnpkg.com/@parcel/watcher-android-arm64/-/watcher-android-arm64-2.5.0.tgz#e32d3dda6647791ee930556aee206fcd5ea0fb7a" - integrity sha512-qlX4eS28bUcQCdribHkg/herLe+0A9RyYC+mm2PXpncit8z5b3nSqGVzMNR3CmtAOgRutiZ02eIJJgP/b1iEFQ== - -"@parcel/watcher-darwin-arm64@2.5.0": - version "2.5.0" - resolved "https://registry.yarnpkg.com/@parcel/watcher-darwin-arm64/-/watcher-darwin-arm64-2.5.0.tgz#0d9e680b7e9ec1c8f54944f1b945aa8755afb12f" - integrity sha512-hyZ3TANnzGfLpRA2s/4U1kbw2ZI4qGxaRJbBH2DCSREFfubMswheh8TeiC1sGZ3z2jUf3s37P0BBlrD3sjVTUw== - -"@parcel/watcher-darwin-x64@2.5.0": - version "2.5.0" - resolved "https://registry.yarnpkg.com/@parcel/watcher-darwin-x64/-/watcher-darwin-x64-2.5.0.tgz#f9f1d5ce9d5878d344f14ef1856b7a830c59d1bb" - integrity sha512-9rhlwd78saKf18fT869/poydQK8YqlU26TMiNg7AIu7eBp9adqbJZqmdFOsbZ5cnLp5XvRo9wcFmNHgHdWaGYA== - -"@parcel/watcher-freebsd-x64@2.5.0": - version "2.5.0" - resolved "https://registry.yarnpkg.com/@parcel/watcher-freebsd-x64/-/watcher-freebsd-x64-2.5.0.tgz#2b77f0c82d19e84ff4c21de6da7f7d096b1a7e82" - integrity sha512-syvfhZzyM8kErg3VF0xpV8dixJ+RzbUaaGaeb7uDuz0D3FK97/mZ5AJQ3XNnDsXX7KkFNtyQyFrXZzQIcN49Tw== - -"@parcel/watcher-linux-arm-glibc@2.5.0": - version "2.5.0" - resolved "https://registry.yarnpkg.com/@parcel/watcher-linux-arm-glibc/-/watcher-linux-arm-glibc-2.5.0.tgz#92ed322c56dbafa3d2545dcf2803334aee131e42" - integrity sha512-0VQY1K35DQET3dVYWpOaPFecqOT9dbuCfzjxoQyif1Wc574t3kOSkKevULddcR9znz1TcklCE7Ht6NIxjvTqLA== - -"@parcel/watcher-linux-arm-musl@2.5.0": - version "2.5.0" - resolved "https://registry.yarnpkg.com/@parcel/watcher-linux-arm-musl/-/watcher-linux-arm-musl-2.5.0.tgz#cd48e9bfde0cdbbd2ecd9accfc52967e22f849a4" - integrity sha512-6uHywSIzz8+vi2lAzFeltnYbdHsDm3iIB57d4g5oaB9vKwjb6N6dRIgZMujw4nm5r6v9/BQH0noq6DzHrqr2pA== - -"@parcel/watcher-linux-arm64-glibc@2.5.0": - version "2.5.0" - resolved "https://registry.yarnpkg.com/@parcel/watcher-linux-arm64-glibc/-/watcher-linux-arm64-glibc-2.5.0.tgz#7b81f6d5a442bb89fbabaf6c13573e94a46feb03" - integrity sha512-BfNjXwZKxBy4WibDb/LDCriWSKLz+jJRL3cM/DllnHH5QUyoiUNEp3GmL80ZqxeumoADfCCP19+qiYiC8gUBjA== - -"@parcel/watcher-linux-arm64-musl@2.5.0": - version "2.5.0" - resolved "https://registry.yarnpkg.com/@parcel/watcher-linux-arm64-musl/-/watcher-linux-arm64-musl-2.5.0.tgz#dcb8ff01077cdf59a18d9e0a4dff7a0cfe5fd732" - integrity sha512-S1qARKOphxfiBEkwLUbHjCY9BWPdWnW9j7f7Hb2jPplu8UZ3nes7zpPOW9bkLbHRvWM0WDTsjdOTUgW0xLBN1Q== - -"@parcel/watcher-linux-x64-glibc@2.5.0": - version "2.5.0" - resolved "https://registry.yarnpkg.com/@parcel/watcher-linux-x64-glibc/-/watcher-linux-x64-glibc-2.5.0.tgz#2e254600fda4e32d83942384d1106e1eed84494d" - integrity sha512-d9AOkusyXARkFD66S6zlGXyzx5RvY+chTP9Jp0ypSTC9d4lzyRs9ovGf/80VCxjKddcUvnsGwCHWuF2EoPgWjw== - -"@parcel/watcher-linux-x64-musl@2.5.0": - version "2.5.0" - resolved "https://registry.yarnpkg.com/@parcel/watcher-linux-x64-musl/-/watcher-linux-x64-musl-2.5.0.tgz#01fcea60fedbb3225af808d3f0a7b11229792eef" - integrity sha512-iqOC+GoTDoFyk/VYSFHwjHhYrk8bljW6zOhPuhi5t9ulqiYq1togGJB5e3PwYVFFfeVgc6pbz3JdQyDoBszVaA== - -"@parcel/watcher-win32-arm64@2.5.0": - version "2.5.0" - resolved "https://registry.yarnpkg.com/@parcel/watcher-win32-arm64/-/watcher-win32-arm64-2.5.0.tgz#87cdb16e0783e770197e52fb1dc027bb0c847154" - integrity sha512-twtft1d+JRNkM5YbmexfcH/N4znDtjgysFaV9zvZmmJezQsKpkfLYJ+JFV3uygugK6AtIM2oADPkB2AdhBrNig== - -"@parcel/watcher-win32-ia32@2.5.0": - version "2.5.0" - resolved "https://registry.yarnpkg.com/@parcel/watcher-win32-ia32/-/watcher-win32-ia32-2.5.0.tgz#778c39b56da33e045ba21c678c31a9f9d7c6b220" - integrity sha512-+rgpsNRKwo8A53elqbbHXdOMtY/tAtTzManTWShB5Kk54N8Q9mzNWV7tV+IbGueCbcj826MfWGU3mprWtuf1TA== - -"@parcel/watcher-win32-x64@2.5.0": - version "2.5.0" - resolved "https://registry.yarnpkg.com/@parcel/watcher-win32-x64/-/watcher-win32-x64-2.5.0.tgz#33873876d0bbc588aacce38e90d1d7480ce81cb7" - integrity sha512-lPrxve92zEHdgeff3aiu4gDOIt4u7sJYha6wbdEZDCDUhtjTsOMiaJzG5lMY4GkWH8p0fMmO2Ppq5G5XXG+DQw== +"@parcel/watcher-android-arm64@2.5.1": + version "2.5.1" + resolved "https://registry.yarnpkg.com/@parcel/watcher-android-arm64/-/watcher-android-arm64-2.5.1.tgz#507f836d7e2042f798c7d07ad19c3546f9848ac1" + integrity sha512-KF8+j9nNbUN8vzOFDpRMsaKBHZ/mcjEjMToVMJOhTozkDonQFFrRcfdLWn6yWKCmJKmdVxSgHiYvTCef4/qcBA== + +"@parcel/watcher-darwin-arm64@2.5.1": + version "2.5.1" + resolved "https://registry.yarnpkg.com/@parcel/watcher-darwin-arm64/-/watcher-darwin-arm64-2.5.1.tgz#3d26dce38de6590ef79c47ec2c55793c06ad4f67" + integrity sha512-eAzPv5osDmZyBhou8PoF4i6RQXAfeKL9tjb3QzYuccXFMQU0ruIc/POh30ePnaOyD1UXdlKguHBmsTs53tVoPw== + +"@parcel/watcher-darwin-x64@2.5.1": + version "2.5.1" + resolved "https://registry.yarnpkg.com/@parcel/watcher-darwin-x64/-/watcher-darwin-x64-2.5.1.tgz#99f3af3869069ccf774e4ddfccf7e64fd2311ef8" + integrity sha512-1ZXDthrnNmwv10A0/3AJNZ9JGlzrF82i3gNQcWOzd7nJ8aj+ILyW1MTxVk35Db0u91oD5Nlk9MBiujMlwmeXZg== + +"@parcel/watcher-freebsd-x64@2.5.1": + version "2.5.1" + resolved "https://registry.yarnpkg.com/@parcel/watcher-freebsd-x64/-/watcher-freebsd-x64-2.5.1.tgz#14d6857741a9f51dfe51d5b08b7c8afdbc73ad9b" + integrity sha512-SI4eljM7Flp9yPuKi8W0ird8TI/JK6CSxju3NojVI6BjHsTyK7zxA9urjVjEKJ5MBYC+bLmMcbAWlZ+rFkLpJQ== + +"@parcel/watcher-linux-arm-glibc@2.5.1": + version "2.5.1" + resolved "https://registry.yarnpkg.com/@parcel/watcher-linux-arm-glibc/-/watcher-linux-arm-glibc-2.5.1.tgz#43c3246d6892381db473bb4f663229ad20b609a1" + integrity sha512-RCdZlEyTs8geyBkkcnPWvtXLY44BCeZKmGYRtSgtwwnHR4dxfHRG3gR99XdMEdQ7KeiDdasJwwvNSF5jKtDwdA== + +"@parcel/watcher-linux-arm-musl@2.5.1": + version "2.5.1" + resolved "https://registry.yarnpkg.com/@parcel/watcher-linux-arm-musl/-/watcher-linux-arm-musl-2.5.1.tgz#663750f7090bb6278d2210de643eb8a3f780d08e" + integrity sha512-6E+m/Mm1t1yhB8X412stiKFG3XykmgdIOqhjWj+VL8oHkKABfu/gjFj8DvLrYVHSBNC+/u5PeNrujiSQ1zwd1Q== + +"@parcel/watcher-linux-arm64-glibc@2.5.1": + version "2.5.1" + resolved "https://registry.yarnpkg.com/@parcel/watcher-linux-arm64-glibc/-/watcher-linux-arm64-glibc-2.5.1.tgz#ba60e1f56977f7e47cd7e31ad65d15fdcbd07e30" + integrity sha512-LrGp+f02yU3BN9A+DGuY3v3bmnFUggAITBGriZHUREfNEzZh/GO06FF5u2kx8x+GBEUYfyTGamol4j3m9ANe8w== + +"@parcel/watcher-linux-arm64-musl@2.5.1": + version "2.5.1" + resolved "https://registry.yarnpkg.com/@parcel/watcher-linux-arm64-musl/-/watcher-linux-arm64-musl-2.5.1.tgz#f7fbcdff2f04c526f96eac01f97419a6a99855d2" + integrity sha512-cFOjABi92pMYRXS7AcQv9/M1YuKRw8SZniCDw0ssQb/noPkRzA+HBDkwmyOJYp5wXcsTrhxO0zq1U11cK9jsFg== + +"@parcel/watcher-linux-x64-glibc@2.5.1": + version "2.5.1" + resolved "https://registry.yarnpkg.com/@parcel/watcher-linux-x64-glibc/-/watcher-linux-x64-glibc-2.5.1.tgz#4d2ea0f633eb1917d83d483392ce6181b6a92e4e" + integrity sha512-GcESn8NZySmfwlTsIur+49yDqSny2IhPeZfXunQi48DMugKeZ7uy1FX83pO0X22sHntJ4Ub+9k34XQCX+oHt2A== + +"@parcel/watcher-linux-x64-musl@2.5.1": + version "2.5.1" + resolved "https://registry.yarnpkg.com/@parcel/watcher-linux-x64-musl/-/watcher-linux-x64-musl-2.5.1.tgz#277b346b05db54f55657301dd77bdf99d63606ee" + integrity sha512-n0E2EQbatQ3bXhcH2D1XIAANAcTZkQICBPVaxMeaCVBtOpBZpWJuf7LwyWPSBDITb7In8mqQgJ7gH8CILCURXg== + +"@parcel/watcher-win32-arm64@2.5.1": + version "2.5.1" + resolved "https://registry.yarnpkg.com/@parcel/watcher-win32-arm64/-/watcher-win32-arm64-2.5.1.tgz#7e9e02a26784d47503de1d10e8eab6cceb524243" + integrity sha512-RFzklRvmc3PkjKjry3hLF9wD7ppR4AKcWNzH7kXR7GUe0Igb3Nz8fyPwtZCSquGrhU5HhUNDr/mKBqj7tqA2Vw== + +"@parcel/watcher-win32-ia32@2.5.1": + version "2.5.1" + resolved "https://registry.yarnpkg.com/@parcel/watcher-win32-ia32/-/watcher-win32-ia32-2.5.1.tgz#2d0f94fa59a873cdc584bf7f6b1dc628ddf976e6" + integrity sha512-c2KkcVN+NJmuA7CGlaGD1qJh1cLfDnQsHjE89E60vUEMlqduHGCdCLJCID5geFVM0dOtA3ZiIO8BoEQmzQVfpQ== + +"@parcel/watcher-win32-x64@2.5.1": + version "2.5.1" + resolved "https://registry.yarnpkg.com/@parcel/watcher-win32-x64/-/watcher-win32-x64-2.5.1.tgz#ae52693259664ba6f2228fa61d7ee44b64ea0947" + integrity sha512-9lHBdJITeNR++EvSQVUcaZoWupyHfXe1jZvGZ06O/5MflPcuPLtEphScIBL+AiCWBO46tDSHzWyD0uDmmZqsgA== "@parcel/watcher@^2.4.1": - version "2.5.0" - resolved "https://registry.yarnpkg.com/@parcel/watcher/-/watcher-2.5.0.tgz#5c88818b12b8de4307a9d3e6dc3e28eba0dfbd10" - integrity sha512-i0GV1yJnm2n3Yq1qw6QrUrd/LI9bE8WEBOTtOkpCXHHdyN3TAGgqAK/DAT05z4fq2x04cARXt2pDmjWjL92iTQ== + version "2.5.1" + resolved "https://registry.yarnpkg.com/@parcel/watcher/-/watcher-2.5.1.tgz#342507a9cfaaf172479a882309def1e991fb1200" + integrity sha512-dfUnCxiN9H4ap84DvD2ubjw+3vUNpstxa0TneY/Paat8a3R4uQZDLSvWjmznAY/DoahqTHl9V46HF/Zs3F29pg== dependencies: detect-libc "^1.0.3" is-glob "^4.0.3" micromatch "^4.0.5" node-addon-api "^7.0.0" optionalDependencies: - "@parcel/watcher-android-arm64" "2.5.0" - "@parcel/watcher-darwin-arm64" "2.5.0" - "@parcel/watcher-darwin-x64" "2.5.0" - "@parcel/watcher-freebsd-x64" "2.5.0" - "@parcel/watcher-linux-arm-glibc" "2.5.0" - "@parcel/watcher-linux-arm-musl" "2.5.0" - "@parcel/watcher-linux-arm64-glibc" "2.5.0" - "@parcel/watcher-linux-arm64-musl" "2.5.0" - "@parcel/watcher-linux-x64-glibc" "2.5.0" - "@parcel/watcher-linux-x64-musl" "2.5.0" - "@parcel/watcher-win32-arm64" "2.5.0" - "@parcel/watcher-win32-ia32" "2.5.0" - "@parcel/watcher-win32-x64" "2.5.0" + "@parcel/watcher-android-arm64" "2.5.1" + "@parcel/watcher-darwin-arm64" "2.5.1" + "@parcel/watcher-darwin-x64" "2.5.1" + "@parcel/watcher-freebsd-x64" "2.5.1" + "@parcel/watcher-linux-arm-glibc" "2.5.1" + "@parcel/watcher-linux-arm-musl" "2.5.1" + "@parcel/watcher-linux-arm64-glibc" "2.5.1" + "@parcel/watcher-linux-arm64-musl" "2.5.1" + "@parcel/watcher-linux-x64-glibc" "2.5.1" + "@parcel/watcher-linux-x64-musl" "2.5.1" + "@parcel/watcher-win32-arm64" "2.5.1" + "@parcel/watcher-win32-ia32" "2.5.1" + "@parcel/watcher-win32-x64" "2.5.1" "@pnpm/config.env-replace@^1.1.0": version "1.1.0" @@ -3182,9 +2892,9 @@ integrity sha512-AYnb1nQyY49te+VRAVgmzfcgjYS91mY5P0TKUDCLEM+gNnA+3T6rWITXRLYCpahpqSQbN5cE+gHpnPyXjHWxcw== "@types/express-serve-static-core@*", "@types/express-serve-static-core@^5.0.0": - version "5.0.2" - resolved "https://registry.yarnpkg.com/@types/express-serve-static-core/-/express-serve-static-core-5.0.2.tgz#812d2871e5eea17fb0bd5214dda7a7b748c0e12a" - integrity sha512-vluaspfvWEtE4vcSDlKRNer52DvOGrB2xv6diXy6UKyKW0lqZiWHGNApSyxOv+8DE5Z27IzVvE7hNkxg7EXIcg== + version "5.0.6" + resolved "https://registry.yarnpkg.com/@types/express-serve-static-core/-/express-serve-static-core-5.0.6.tgz#41fec4ea20e9c7b22f024ab88a95c6bb288f51b8" + integrity sha512-3xhRnjJPkULekpSzgtoNYYcTWgEZkp4myc+Saevii5JPnHNvHMRlBSHDbs7Bh1iPPoVTERHEZXyhyLbMEsExsA== dependencies: "@types/node" "*" "@types/qs" "*" @@ -3254,9 +2964,9 @@ integrity sha512-D0CFMMtydbJAegzOyHjtiKPLlvnm3iTZyZRSZoLq2mRhDdmLfIWOCYPfQJ4cu2erKghU++QvjcUjp/5h7hESpA== "@types/http-proxy@^1.17.8": - version "1.17.15" - resolved "https://registry.yarnpkg.com/@types/http-proxy/-/http-proxy-1.17.15.tgz#12118141ce9775a6499ecb4c01d02f90fc839d36" - integrity sha512-25g5atgiVNTIv0LBDTg1H74Hvayx0ajtJPLLcYE3whFv75J0pWNtOBzaXJQgDTmrX1bx5U9YC2w/n65BN1HwRQ== + version "1.17.16" + resolved "https://registry.yarnpkg.com/@types/http-proxy/-/http-proxy-1.17.16.tgz#dee360707b35b3cc85afcde89ffeebff7d7f9240" + integrity sha512-sdWoUajOB1cd0A8cRRQ1cfyWNbmFKLAqBB89Y8x5iYyG/mkJHc0YUH8pdWBy2omi9qtCpiIgGjuwO0dQST2l5w== dependencies: "@types/node" "*" @@ -3302,9 +3012,9 @@ integrity sha512-/pyBZWSLD2n0dcHE3hq8s8ZvcETHtEuF+3E7XVt0Ig2nvsVQXdghHVcEkIWjy9A0wKfTn97a/PSDYohKIlnP/w== "@types/ms@*": - version "0.7.34" - resolved "https://registry.yarnpkg.com/@types/ms/-/ms-0.7.34.tgz#10964ba0dee6ac4cd462e2795b6bebd407303433" - integrity sha512-nG96G3Wp6acyAgJqGasjODb+acrI7KltPiRxzHPXnP3NgI28bpQDRv53olbqGXbfcgF5aiiHmO3xpwEpS5Ld9g== + version "2.1.0" + resolved "https://registry.yarnpkg.com/@types/ms/-/ms-2.1.0.tgz#052aa67a48eccc4309d7f0191b7e41434b90bb78" + integrity sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA== "@types/node-forge@^1.3.0": version "1.3.11" @@ -3314,11 +3024,11 @@ "@types/node" "*" "@types/node@*", "@types/node@^22.5.5": - version "22.9.3" - resolved "https://registry.yarnpkg.com/@types/node/-/node-22.9.3.tgz#08f3d64b3bc6d74b162d36f60213e8a6704ef2b4" - integrity sha512-F3u1fs/fce3FFk+DAxbxc78DF8x0cY09RRL8GnXLmkJ1jvx3TtPdWoTT5/NiYfI5ASqXBmfqJi9dZ3gxMx4lzw== + version "22.13.10" + resolved "https://registry.yarnpkg.com/@types/node/-/node-22.13.10.tgz#df9ea358c5ed991266becc3109dc2dc9125d77e4" + integrity sha512-I6LPUvlRH+O6VRUqYOcMudhaIdUVWfsjnZavnsraHvpBwaEyMN29ry+0UVJhImYL16xsscu0aske3yA+uPOWfw== dependencies: - undici-types "~6.19.8" + undici-types "~6.20.0" "@types/node@^17.0.5": version "17.0.45" @@ -3349,15 +3059,15 @@ resolved "https://registry.yarnpkg.com/@types/prismjs/-/prismjs-1.26.5.tgz#72499abbb4c4ec9982446509d2f14fb8483869d6" integrity sha512-AUZTa7hQ2KY5L7AmtSiqxlhWxb4ina0yd8hNbl4TWuqnv/pFP0nDMb3YrfSBf4hJVGLh2YEIBfKaBW/9UEl6IQ== -"@types/prop-types@*", "@types/prop-types@^15.7.13": - version "15.7.13" - resolved "https://registry.yarnpkg.com/@types/prop-types/-/prop-types-15.7.13.tgz#2af91918ee12d9d32914feb13f5326658461b451" - integrity sha512-hCZTSvwbzWGvhqxp/RqVqwU999pBf2vp7hzIjiYOsl8wqOmUxkQ6ddw1cV3l8811+kdUFus/q4d1Y3E3SyEifA== +"@types/prop-types@^15.7.14": + version "15.7.14" + resolved "https://registry.yarnpkg.com/@types/prop-types/-/prop-types-15.7.14.tgz#1433419d73b2a7ebfc6918dcefd2ec0d5cd698f2" + integrity sha512-gNMvNH49DJ7OJYv+KAKn0Xp45p8PLl6zo2YnvDIbTd4J6MER2BmWN49TG7n9LvkyihINxeKW8+3bfS2yDC9dzQ== "@types/qs@*": - version "6.9.17" - resolved "https://registry.yarnpkg.com/@types/qs/-/qs-6.9.17.tgz#fc560f60946d0aeff2f914eb41679659d3310e1a" - integrity sha512-rX4/bPcfmvxHDv0XjfJELTTr+iB+tn032nPILqHm5wbthUUUuVtNGGqzhya9XUxjTP8Fpr0qYgSZZKxGY++svQ== + version "6.9.18" + resolved "https://registry.yarnpkg.com/@types/qs/-/qs-6.9.18.tgz#877292caa91f7c1b213032b34626505b746624c2" + integrity sha512-kK7dgTYDyGqS+e2Q4aK9X3D7q234CIZ1Bv0q/7Z5IwRDoADNU81xXJK/YVyLbLTZCoIwUoDoffFeF+p/eIklAA== "@types/range-parser@*": version "1.2.7" @@ -3390,19 +3100,16 @@ "@types/history" "^4.7.11" "@types/react" "*" -"@types/react-transition-group@^4.4.11": - version "4.4.11" - resolved "https://registry.yarnpkg.com/@types/react-transition-group/-/react-transition-group-4.4.11.tgz#d963253a611d757de01ebb241143b1017d5d63d5" - integrity sha512-RM05tAniPZ5DZPzzNFP+DmrcOdD0efDUxMy3145oljWSl3x9ZV5vhme98gTxFrj2lhXvmGNnUiuDyJgY9IKkNA== - dependencies: - "@types/react" "*" +"@types/react-transition-group@^4.4.12": + version "4.4.12" + resolved "https://registry.yarnpkg.com/@types/react-transition-group/-/react-transition-group-4.4.12.tgz#b5d76568485b02a307238270bfe96cb51ee2a044" + integrity sha512-8TV6R3h2j7a91c+1DXdJi3Syo69zzIZbz7Lg5tORM5LEJG7X/E6a1V3drRyBRZq7/utz7A+c4OgYLiLcYGHG6w== "@types/react@*": - version "18.3.12" - resolved "https://registry.yarnpkg.com/@types/react/-/react-18.3.12.tgz#99419f182ccd69151813b7ee24b792fe08774f60" - integrity sha512-D2wOSq/d6Agt28q7rSI3jhU7G6aiuzljDGZ2hTZHIkrTLUI+AF3WMeKkEZ9nN2fkBAlcktT6vcZjDFiIhMYEQw== + version "19.0.10" + resolved "https://registry.yarnpkg.com/@types/react/-/react-19.0.10.tgz#d0c66dafd862474190fe95ce11a68de69ed2b0eb" + integrity sha512-JuRQ9KXLEjaUNjTWpzuR231Z2WpIwczOkBEIvbHNCzQefFIT0L8IqE6NV6ULLyC1SI/i234JnDoMkfg+RjQj2g== dependencies: - "@types/prop-types" "*" csstype "^3.0.2" "@types/retry@0.12.0": @@ -3468,9 +3175,9 @@ webpack "^5" "@types/ws@^8.5.5": - version "8.5.13" - resolved "https://registry.yarnpkg.com/@types/ws/-/ws-8.5.13.tgz#6414c280875e2691d0d1e080b05addbf5cb91e20" - integrity sha512-osM/gWBTPKgHV8XkTunnegTRIsvF6owmf5w+JtAfOw472dptdm0dlGv4xCt6GwQRcC2XVOvvRE/0bAoQcL2QkA== + version "8.18.0" + resolved "https://registry.yarnpkg.com/@types/ws/-/ws-8.18.0.tgz#8a2ec491d6f0685ceaab9a9b7ff44146236993b5" + integrity sha512-8svvI3hMyvN0kKCJMvTJP/x6Y/EoQbepff882wL+Sn5QsXb3etnamgrJq4isrBxSJj5L2AuXcI0+bgkoAXGUJw== dependencies: "@types/node" "*" @@ -3487,11 +3194,11 @@ "@types/yargs-parser" "*" "@ungap/structured-clone@^1.0.0": - version "1.2.0" - resolved "https://registry.yarnpkg.com/@ungap/structured-clone/-/structured-clone-1.2.0.tgz#756641adb587851b5ccb3e095daf27ae581c8406" - integrity sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ== + version "1.3.0" + resolved "https://registry.yarnpkg.com/@ungap/structured-clone/-/structured-clone-1.3.0.tgz#d06bbb384ebcf6c505fde1c3d0ed4ddffe0aaff8" + integrity sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g== -"@webassemblyjs/ast@1.14.1", "@webassemblyjs/ast@^1.12.1", "@webassemblyjs/ast@^1.14.1": +"@webassemblyjs/ast@1.14.1", "@webassemblyjs/ast@^1.14.1": version "1.14.1" resolved "https://registry.yarnpkg.com/@webassemblyjs/ast/-/ast-1.14.1.tgz#a9f6a07f2b03c95c8d38c4536a1fdfb521ff55b6" integrity sha512-nuBEDgQfm1ccRp/8bCQrx1frohyufl4JlbMMZ4P1wpeOfDhF6FQkxZJ1b/e+PLwr6X1Nhw6OLme5usuBWYBvuQ== @@ -3557,7 +3264,7 @@ resolved "https://registry.yarnpkg.com/@webassemblyjs/utf8/-/utf8-1.13.2.tgz#917a20e93f71ad5602966c2d685ae0c6c21f60f1" integrity sha512-3NQWGjKTASY1xV5m7Hr0iPeXD9+RDobLll3T9d2AO+g3my8xy5peVyjSag4I50mR1bBSN/Ct12lo+R9tJk0NZQ== -"@webassemblyjs/wasm-edit@^1.12.1", "@webassemblyjs/wasm-edit@^1.14.1": +"@webassemblyjs/wasm-edit@^1.14.1": version "1.14.1" resolved "https://registry.yarnpkg.com/@webassemblyjs/wasm-edit/-/wasm-edit-1.14.1.tgz#ac6689f502219b59198ddec42dcd496b1004d597" integrity sha512-RNJUIQH/J8iA/1NzlE4N7KtyZNHi3w7at7hDjvRNm5rcUXa00z1vRz3glZoULfJ5mpvYhLybmVcwcjGrC1pRrQ== @@ -3592,7 +3299,7 @@ "@webassemblyjs/wasm-gen" "1.14.1" "@webassemblyjs/wasm-parser" "1.14.1" -"@webassemblyjs/wasm-parser@1.14.1", "@webassemblyjs/wasm-parser@^1.12.1", "@webassemblyjs/wasm-parser@^1.14.1": +"@webassemblyjs/wasm-parser@1.14.1", "@webassemblyjs/wasm-parser@^1.14.1": version "1.14.1" resolved "https://registry.yarnpkg.com/@webassemblyjs/wasm-parser/-/wasm-parser-1.14.1.tgz#b3e13f1893605ca78b52c68e54cf6a865f90b9fb" integrity sha512-JLBl+KZ0R5qB7mCnud/yyX08jWFw5MsoalJ1pQ4EdFlgj9VdXKGuENGsiCIjegI1W7p91rUlcB/LB5yRJKNTcQ== @@ -3643,9 +3350,9 @@ acorn-walk@^8.0.0: acorn "^8.11.0" acorn@^8.0.0, acorn@^8.0.4, acorn@^8.11.0, acorn@^8.14.0, acorn@^8.8.2: - version "8.14.0" - resolved "https://registry.yarnpkg.com/acorn/-/acorn-8.14.0.tgz#063e2c70cac5fb4f6467f0b11152e04c682795b0" - integrity sha512-cl669nCJTZBsL97OF4kUQm5g5hC2uihk0NxY3WENAC0TYdILVkAyHymAntgxGkl7K+t0cXIrH5siy5S4XkFycA== + version "8.14.1" + resolved "https://registry.yarnpkg.com/acorn/-/acorn-8.14.1.tgz#721d5dc10f7d5b5609a891773d47731796935dfb" + integrity sha512-OvQ/2pUDKmgfCg++xsTX1wGxfTaszcHVcTctW4UJB4hibJx2HXxxO5UmVgyjMa+ZDsiaf5wWLXYpRWMmBI0QHg== address@^1.0.1, address@^1.1.2: version "1.2.2" @@ -3700,9 +3407,9 @@ ajv@^8.0.0, ajv@^8.9.0: require-from-string "^2.0.2" algoliasearch-helper@^3.13.3: - version "3.22.5" - resolved "https://registry.yarnpkg.com/algoliasearch-helper/-/algoliasearch-helper-3.22.5.tgz#2fcc26814e10a121a2c2526a1b05c754061c56c0" - integrity sha512-lWvhdnc+aKOKx8jyA3bsdEgHzm/sglC4cYdMG4xSQyRiPLJVJtH/IVYZG3Hp6PkTEhQqhyVYkeP9z2IlcHJsWw== + version "3.24.2" + resolved "https://registry.yarnpkg.com/algoliasearch-helper/-/algoliasearch-helper-3.24.2.tgz#332f9813b63442b13b8eaae19f313fe3db1047af" + integrity sha512-vBw/INZDfyh/THbVeDy8On8lZqd2qiUAHde5N4N1ygL4SoeLqLGJ4GHneHrDAYsjikRwTTtodEP0fiXl5MxHFQ== dependencies: "@algolia/events" "^4.0.1" @@ -3727,24 +3434,24 @@ algoliasearch@^4.18.0: "@algolia/requester-node-http" "4.24.0" "@algolia/transporter" "4.24.0" -algoliasearch@^5.12.0: - version "5.15.0" - resolved "https://registry.yarnpkg.com/algoliasearch/-/algoliasearch-5.15.0.tgz#09cef5a2555c4554b37a99f0488ea6ab2347e625" - integrity sha512-Yf3Swz1s63hjvBVZ/9f2P1Uu48GjmjCN+Esxb6MAONMGtZB1fRX8/S1AhUTtsuTlcGovbYLxpHgc7wEzstDZBw== - dependencies: - "@algolia/client-abtesting" "5.15.0" - "@algolia/client-analytics" "5.15.0" - "@algolia/client-common" "5.15.0" - "@algolia/client-insights" "5.15.0" - "@algolia/client-personalization" "5.15.0" - "@algolia/client-query-suggestions" "5.15.0" - "@algolia/client-search" "5.15.0" - "@algolia/ingestion" "1.15.0" - "@algolia/monitoring" "1.15.0" - "@algolia/recommend" "5.15.0" - "@algolia/requester-browser-xhr" "5.15.0" - "@algolia/requester-fetch" "5.15.0" - "@algolia/requester-node-http" "5.15.0" +algoliasearch@^5.14.2: + version "5.21.0" + resolved "https://registry.yarnpkg.com/algoliasearch/-/algoliasearch-5.21.0.tgz#0517971eba0c03efda8586213294a554db2d3ac9" + integrity sha512-hexLq2lSO1K5SW9j21Ubc+q9Ptx7dyRTY7se19U8lhIlVMLCNXWCyQ6C22p9ez8ccX0v7QVmwkl2l1CnuGoO2Q== + dependencies: + "@algolia/client-abtesting" "5.21.0" + "@algolia/client-analytics" "5.21.0" + "@algolia/client-common" "5.21.0" + "@algolia/client-insights" "5.21.0" + "@algolia/client-personalization" "5.21.0" + "@algolia/client-query-suggestions" "5.21.0" + "@algolia/client-search" "5.21.0" + "@algolia/ingestion" "1.21.0" + "@algolia/monitoring" "1.21.0" + "@algolia/recommend" "5.21.0" + "@algolia/requester-browser-xhr" "5.21.0" + "@algolia/requester-fetch" "5.21.0" + "@algolia/requester-node-http" "5.21.0" ansi-align@^3.0.1: version "3.0.1" @@ -3833,15 +3540,15 @@ at-least-node@^1.0.0: integrity sha512-+q/t7Ekv1EDY2l6Gda6LLiX14rU9TV20Wa3ofeQmwPFZbOMo9DXrLbOjFaaclkXKWidIaopwAObQDqwWtGUjqg== autoprefixer@^10.4.14, autoprefixer@^10.4.19: - version "10.4.20" - resolved "https://registry.yarnpkg.com/autoprefixer/-/autoprefixer-10.4.20.tgz#5caec14d43976ef42e32dcb4bd62878e96be5b3b" - integrity sha512-XY25y5xSv/wEoqzDyXXME4AFfkZI0P23z6Fs3YgymDnKJkCGOnkL0iTxCa85UTqaSgfcqyf3UA6+c7wUvx/16g== + version "10.4.21" + resolved "https://registry.yarnpkg.com/autoprefixer/-/autoprefixer-10.4.21.tgz#77189468e7a8ad1d9a37fbc08efc9f480cf0a95d" + integrity sha512-O+A6LWV5LDHSJD3LjHYoNi4VLsj/Whi7k6zG12xTYaU4cQ8oxQGckXNX8cRHK5yOZ/ppVHe0ZBXGzSV9jXdVbQ== dependencies: - browserslist "^4.23.3" - caniuse-lite "^1.0.30001646" + browserslist "^4.24.4" + caniuse-lite "^1.0.30001702" fraction.js "^4.3.7" normalize-range "^0.1.2" - picocolors "^1.0.1" + picocolors "^1.1.1" postcss-value-parser "^4.2.0" babel-loader@^9.1.3, babel-loader@^9.2.1: @@ -3877,14 +3584,6 @@ babel-plugin-polyfill-corejs2@^0.4.10: "@babel/helper-define-polyfill-provider" "^0.6.3" semver "^6.3.1" -babel-plugin-polyfill-corejs3@^0.10.6: - version "0.10.6" - resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.10.6.tgz#2deda57caef50f59c525aeb4964d3b2f867710c7" - integrity sha512-b37+KR2i/khY5sKmWNVQAnitvquQbNdWy6lJdsr0kmquCKEEUgMKK4SboVM3HtfnZilfjr4MMQ7vY58FVWDtIA== - dependencies: - "@babel/helper-define-polyfill-provider" "^0.6.2" - core-js-compat "^3.38.0" - babel-plugin-polyfill-corejs3@^0.11.0: version "0.11.1" resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.11.1.tgz#4e4e182f1bb37c7ba62e2af81d8dd09df31344f6" @@ -3999,17 +3698,7 @@ braces@^3.0.3, braces@~3.0.2: dependencies: fill-range "^7.1.1" -browserslist@^4.0.0, browserslist@^4.18.1, browserslist@^4.23.0, browserslist@^4.23.3, browserslist@^4.24.0, browserslist@^4.24.2: - version "4.24.2" - resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.24.2.tgz#f5845bc91069dbd55ee89faf9822e1d885d16580" - integrity sha512-ZIc+Q62revdMcqC6aChtW4jz3My3klmCO1fEmINZY/8J3EpBg5/A/D0AKmBveUh6pgoeycoMkVMko84tuYS+Gg== - dependencies: - caniuse-lite "^1.0.30001669" - electron-to-chromium "^1.5.41" - node-releases "^2.0.18" - update-browserslist-db "^1.1.1" - -browserslist@^4.24.4: +browserslist@^4.0.0, browserslist@^4.18.1, browserslist@^4.23.0, browserslist@^4.24.0, browserslist@^4.24.4: version "4.24.4" resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.24.4.tgz#c6b2865a3f08bcb860a0e827389003b9fe686e4b" integrity sha512-KDi1Ny1gSePi1vm0q4oxSF8b4DR44GF4BbmS2YdhPLOEqd8pDviZOGH/GsmRwoWJ2+5Lr085X7naowMwKHDG1A== @@ -4052,16 +3741,31 @@ cacheable-request@^10.2.8: normalize-url "^8.0.0" responselike "^3.0.0" -call-bind@^1.0.5, call-bind@^1.0.7: - version "1.0.7" - resolved "https://registry.yarnpkg.com/call-bind/-/call-bind-1.0.7.tgz#06016599c40c56498c18769d2730be242b6fa3b9" - integrity sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w== +call-bind-apply-helpers@^1.0.0, call-bind-apply-helpers@^1.0.1, call-bind-apply-helpers@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz#4b5428c222be985d79c3d82657479dbe0b59b2d6" + integrity sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ== dependencies: - es-define-property "^1.0.0" es-errors "^1.3.0" function-bind "^1.1.2" + +call-bind@^1.0.8: + version "1.0.8" + resolved "https://registry.yarnpkg.com/call-bind/-/call-bind-1.0.8.tgz#0736a9660f537e3388826f440d5ec45f744eaa4c" + integrity sha512-oKlSFMcMwpUg2ednkhQ454wfWiU/ul3CkJe/PEHcTKuiX6RpbehUiFMXu13HalGZxfUwCQzZG747YXBn1im9ww== + dependencies: + call-bind-apply-helpers "^1.0.0" + es-define-property "^1.0.0" get-intrinsic "^1.2.4" - set-function-length "^1.2.1" + set-function-length "^1.2.2" + +call-bound@^1.0.2, call-bound@^1.0.3: + version "1.0.4" + resolved "https://registry.yarnpkg.com/call-bound/-/call-bound-1.0.4.tgz#238de935d2a2a692928c538c7ccfa91067fd062a" + integrity sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg== + dependencies: + call-bind-apply-helpers "^1.0.2" + get-intrinsic "^1.3.0" callsites@^3.0.0: version "3.1.0" @@ -4096,12 +3800,7 @@ caniuse-api@^3.0.0: lodash.memoize "^4.1.2" lodash.uniq "^4.5.0" -caniuse-lite@^1.0.0, caniuse-lite@^1.0.30001646, caniuse-lite@^1.0.30001669: - version "1.0.30001684" - resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001684.tgz#0eca437bab7d5f03452ff0ef9de8299be6b08e16" - integrity sha512-G1LRwLIQjBQoyq0ZJGqGIJUXzJ8irpbjHLpVRXDvBEScFJ9b17sgK6vlx0GAJFE21okD7zXl08rRRUfq6HdoEQ== - -caniuse-lite@^1.0.30001688: +caniuse-lite@^1.0.0, caniuse-lite@^1.0.30001688, caniuse-lite@^1.0.30001702: version "1.0.30001703" resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001703.tgz#977cb4920598c158f491ecf4f4f2cfed9e354718" integrity sha512-kRlAGTRWgPsOj7oARC9m1okJEXdL/8fekFVcxA8Hl7GH4r/sN4OJn/i6Flde373T50KS7Y37oFbMwlE8+F42kQ== @@ -4120,9 +3819,9 @@ chalk@^4.0.0, chalk@^4.1.0, chalk@^4.1.2: supports-color "^7.1.0" chalk@^5.0.1, chalk@^5.2.0: - version "5.3.0" - resolved "https://registry.yarnpkg.com/chalk/-/chalk-5.3.0.tgz#67c20a7ebef70e7f3970a01f90fa210cb6860385" - integrity sha512-dLitG79d+GV1Nb/VYcCDFivJeK1hiukt9QjRNVOsUtTy1rR1YJsmpGGTZ3qJos+uw7WmWF4wUwBd9jxjocFC2w== + version "5.4.1" + resolved "https://registry.yarnpkg.com/chalk/-/chalk-5.4.1.tgz#1b48bf0963ec158dce2aacf69c093ae2dd2092d8" + integrity sha512-zgVZuo2WcZgfUEmsn6eO3kINexW8RAE4maiQ8QNs8CtpPCSyMiYsULR3HQYkm3w8FIA3SberyMJMSldGsW+U3w== char-regex@^1.0.2: version "1.0.2" @@ -4190,9 +3889,9 @@ chokidar@^3.4.2, chokidar@^3.5.3: fsevents "~2.3.2" chokidar@^4.0.0: - version "4.0.1" - resolved "https://registry.yarnpkg.com/chokidar/-/chokidar-4.0.1.tgz#4a6dff66798fb0f72a94f616abbd7e1a19f31d41" - integrity sha512-n8enUVCED/KVRQlab1hr3MVpcVMvxtZjmEa956u+4YijlmQED223XMSYj2tLuKvr4jcCTzNNMpQDUer72MMmzA== + version "4.0.3" + resolved "https://registry.yarnpkg.com/chokidar/-/chokidar-4.0.3.tgz#7be37a4c03c9aee1ecfe862a4a23b2c70c205d30" + integrity sha512-Qgzu8kfBvo+cA4962jnP1KkS6Dop5NS6g7R5LFYJr4b8Ub94PPQXUksCw9PvXoeXPRRddRNC5C1JQUR2SMGtnA== dependencies: readdirp "^4.0.1" @@ -4321,9 +4020,9 @@ compressible@~2.0.18: mime-db ">= 1.43.0 < 2" compression@^1.7.4: - version "1.7.5" - resolved "https://registry.yarnpkg.com/compression/-/compression-1.7.5.tgz#fdd256c0a642e39e314c478f6c2cd654edd74c93" - integrity sha512-bQJ0YRck5ak3LgtnpKkiabX5pNF7tMUh1BSy2ZBOTh0Dim0BUu6aPPwByIns6/A5Prh8PufSPerMDUklpzes2Q== + version "1.8.0" + resolved "https://registry.yarnpkg.com/compression/-/compression-1.8.0.tgz#09420efc96e11a0f44f3a558de59e321364180f7" + integrity sha512-k6WLKfunuqCYD3t6AsuPGvQWaKwuLLh2/xHNcX4qE+vIfDNXpSqnrhwA7O53R7WVQUnt8dVAIW+YHr7xTgOgGA== dependencies: bytes "3.1.2" compressible "~2.0.18" @@ -4433,13 +4132,6 @@ copy-webpack-plugin@^11.0.0: schema-utils "^4.0.0" serialize-javascript "^6.0.0" -core-js-compat@^3.38.0, core-js-compat@^3.38.1: - version "3.39.0" - resolved "https://registry.yarnpkg.com/core-js-compat/-/core-js-compat-3.39.0.tgz#b12dccb495f2601dc860bdbe7b4e3ffa8ba63f61" - integrity sha512-VgEUx3VwlExr5no0tXlBt+silBvhTryPwCXRI2Id1PN8WTKu7MreethvddqOubrYxkFdv/RnYrqlv1sFNAUelw== - dependencies: - browserslist "^4.24.2" - core-js-compat@^3.40.0: version "3.41.0" resolved "https://registry.yarnpkg.com/core-js-compat/-/core-js-compat-3.41.0.tgz#4cdfce95f39a8f27759b667cf693d96e5dda3d17" @@ -4448,14 +4140,14 @@ core-js-compat@^3.40.0: browserslist "^4.24.4" core-js-pure@^3.30.2: - version "3.39.0" - resolved "https://registry.yarnpkg.com/core-js-pure/-/core-js-pure-3.39.0.tgz#aa0d54d70a15bdc13e7c853db87c10abc30d68f3" - integrity sha512-7fEcWwKI4rJinnK+wLTezeg2smbFFdSBP6E2kQZNbnzM2s1rpKQ6aaRteZSSg7FLU3P0HGGVo/gbpfanU36urg== + version "3.41.0" + resolved "https://registry.yarnpkg.com/core-js-pure/-/core-js-pure-3.41.0.tgz#349fecad168d60807a31e83c99d73d786fe80811" + integrity sha512-71Gzp96T9YPk63aUvE5Q5qP+DryB4ZloUZPSOebGM88VNw8VNfvdA7z6kGA8iGOTEzAomsRidp4jXSmUIJsL+Q== core-js@^3.31.1: - version "3.39.0" - resolved "https://registry.yarnpkg.com/core-js/-/core-js-3.39.0.tgz#57f7647f4d2d030c32a72ea23a0555b2eaa30f83" - integrity sha512-raM0ew0/jJUqkJ0E6e8UDtl+y/7ktFivgWvqw8dNSQeNWoSDLvQ1H/RN3aPXB9tBd4/FhyR4RDPGhsNIMsAn7g== + version "3.41.0" + resolved "https://registry.yarnpkg.com/core-js/-/core-js-3.41.0.tgz#57714dafb8c751a6095d028a7428f1fb5834a776" + integrity sha512-SJ4/EHwS36QMJd6h/Rg+GyR4A5xE0FSI3eZ+iBVpfqf1x0eTSg1smWLHrA+2jQThZSh97fmSgFSU8B61nxosxA== core-util-is@~1.0.0: version "1.0.3" @@ -4702,16 +4394,16 @@ debug@2.6.9, debug@^2.6.0: ms "2.0.0" debug@4, debug@^4.0.0, debug@^4.1.0, debug@^4.1.1, debug@^4.3.1, debug@^4.3.2: - version "4.3.7" - resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.7.tgz#87945b4151a011d76d95a198d7111c865c360a52" - integrity sha512-Er2nc/H7RrMXZBFCEim6TCmMk02Z8vLC2Rbi1KEBggpo0fS6l0S1nnapwmIi3yW/+GOJap1Krg4w0Hg80oCqgQ== + version "4.4.0" + resolved "https://registry.yarnpkg.com/debug/-/debug-4.4.0.tgz#2b3f2aea2ffeb776477460267377dc8710faba8a" + integrity sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA== dependencies: ms "^2.1.3" decode-named-character-reference@^1.0.0: - version "1.0.2" - resolved "https://registry.yarnpkg.com/decode-named-character-reference/-/decode-named-character-reference-1.0.2.tgz#daabac9690874c394c81e4162a0304b35d824f0e" - integrity sha512-O8x12RzrUF8xyVcY0KJowWsmaJxQbmy0/EtnNtHRpsOcT7dFk5W598coHqBVpmWo1oQQfsCqfCmkZN5DJrZVdg== + version "1.1.0" + resolved "https://registry.yarnpkg.com/decode-named-character-reference/-/decode-named-character-reference-1.1.0.tgz#5d6ce68792808901210dac42a8e9853511e2b8bf" + integrity sha512-Wy+JTSbFThEOXQIR2L6mxJvEs+veIzpmqD7ynWxMXGpnk3smkHQOp6forLdHsKpAMW9iJpaBBIxz285t1n1C3w== dependencies: character-entities "^2.0.0" @@ -4849,11 +4541,11 @@ dns-packet@^5.2.2: "@leichtgewicht/ip-codec" "^2.0.1" docusaurus-plugin-sass@^0.2.5: - version "0.2.5" - resolved "https://registry.yarnpkg.com/docusaurus-plugin-sass/-/docusaurus-plugin-sass-0.2.5.tgz#6bfb8a227ac6265be685dcbc24ba1989e27b8005" - integrity sha512-Z+D0fLFUKcFpM+bqSUmqKIU+vO+YF1xoEQh5hoFreg2eMf722+siwXDD+sqtwU8E4MvVpuvsQfaHwODNlxJAEg== + version "0.2.6" + resolved "https://registry.yarnpkg.com/docusaurus-plugin-sass/-/docusaurus-plugin-sass-0.2.6.tgz#b4930a1fe1cc7bcead639bb1bee38bce0ffd073d" + integrity sha512-2hKQQDkrufMong9upKoG/kSHJhuwd+FA3iAe/qzS/BmWpbIpe7XKmq5wlz4J5CJaOPu4x+iDJbgAxZqcoQf0kg== dependencies: - sass-loader "^10.1.1" + sass-loader "^16.0.2" dom-converter@^0.2.0: version "0.2.0" @@ -4917,9 +4609,9 @@ domutils@^2.5.2, domutils@^2.8.0: domhandler "^4.2.0" domutils@^3.0.1: - version "3.1.0" - resolved "https://registry.yarnpkg.com/domutils/-/domutils-3.1.0.tgz#c47f551278d3dc4b0b1ab8cbb42d751a6f0d824e" - integrity sha512-H78uMmQtI2AhgDJjWeQmHwJJ2bLPD3GMmO7Zja/ZZh84wkm+4ut+IUnUdRa8uCGX88DiVx1j6FRe1XfxEgjEZA== + version "3.2.2" + resolved "https://registry.yarnpkg.com/domutils/-/domutils-3.2.2.tgz#edbfe2b668b0c1d97c24baf0f1062b132221bc78" + integrity sha512-6kZKyUajlDuqlHKVX1w7gyslj9MPIXzIFiz/rGu35uC1wMi+kMhQwGhl4lt9unC9Vb9INnY9Z3/ZA3+FhASLaw== dependencies: dom-serializer "^2.0.0" domelementtype "^2.3.0" @@ -4941,9 +4633,18 @@ dot-prop@^6.0.1: is-obj "^2.0.0" dotenv@^16.4.2: - version "16.4.5" - resolved "https://registry.yarnpkg.com/dotenv/-/dotenv-16.4.5.tgz#cdd3b3b604cb327e286b4762e13502f717cb099f" - integrity sha512-ZmdL2rui+eB2YwhsWzjInR8LldtZHGDoQ1ugH85ppHKwpUHL7j7rN0Ti9NCnGiQbhaZ11FpR+7ao1dNsmduNUg== + version "16.4.7" + resolved "https://registry.yarnpkg.com/dotenv/-/dotenv-16.4.7.tgz#0e20c5b82950140aa99be360a8a5f52335f53c26" + integrity sha512-47qPchRCykZC03FhkYAhrvwU4xDBFIj1QPqaarj6mdM/hgUzfPHcpkHJOn3mJAufFeeAxAzeGsr5X0M4k6fLZQ== + +dunder-proto@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/dunder-proto/-/dunder-proto-1.0.1.tgz#d7ae667e1dc83482f8b70fd0f6eefc50da30f58a" + integrity sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A== + dependencies: + call-bind-apply-helpers "^1.0.1" + es-errors "^1.3.0" + gopd "^1.2.0" duplexer@^0.1.2: version "0.1.2" @@ -4960,11 +4661,6 @@ ee-first@1.1.1: resolved "https://registry.yarnpkg.com/ee-first/-/ee-first-1.1.1.tgz#590c61156b0ae2f4f0255732a158b266bc56b21d" integrity sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow== -electron-to-chromium@^1.5.41: - version "1.5.64" - resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.5.64.tgz#ac8c4c89075d35a1514b620f47dfe48a71ec3697" - integrity sha512-IXEuxU+5ClW2IGEYFC2T7szbyVgehupCWQe5GNh+H065CD6U6IFN0s4KeAMFGNmQolRU4IV7zGBWSYMmZ8uuqQ== - electron-to-chromium@^1.5.73: version "1.5.114" resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.5.114.tgz#f2bb4fda80a7db4ea273565e75b0ebbe19af0ac3" @@ -5006,9 +4702,9 @@ encodeurl@~2.0.0: integrity sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg== enhanced-resolve@^5.17.1: - version "5.17.1" - resolved "https://registry.yarnpkg.com/enhanced-resolve/-/enhanced-resolve-5.17.1.tgz#67bfbbcc2f81d511be77d686a90267ef7f898a15" - integrity sha512-LMHl3dXhTcfv8gM4kEzIUeTQ+7fpdA0l2tUf34BddXPkz2A5xJ5L/Pchd5BL6rdccM9QGvu0sWZzK1Z1t4wwyg== + version "5.18.1" + resolved "https://registry.yarnpkg.com/enhanced-resolve/-/enhanced-resolve-5.18.1.tgz#728ab082f8b7b6836de51f1637aab5d3b9568faf" + integrity sha512-ZSW3ma5GkcQBIpwZTSRAI8N71Uuwgs93IezB7mf7R60tC8ZbJideoDNKjHn2O9KIlx6rkGTTEk1xUCK2E1Y2Yg== dependencies: graceful-fs "^4.2.4" tapable "^2.2.0" @@ -5037,12 +4733,10 @@ error-ex@^1.3.1: dependencies: is-arrayish "^0.2.1" -es-define-property@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/es-define-property/-/es-define-property-1.0.0.tgz#c7faefbdff8b2696cf5f46921edfb77cc4ba3845" - integrity sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ== - dependencies: - get-intrinsic "^1.2.4" +es-define-property@^1.0.0, es-define-property@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/es-define-property/-/es-define-property-1.0.1.tgz#983eb2f9a6724e9303f61addf011c72e09e0b0fa" + integrity sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g== es-errors@^1.3.0: version "1.3.0" @@ -5050,9 +4744,16 @@ es-errors@^1.3.0: integrity sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw== es-module-lexer@^1.2.1: - version "1.5.4" - resolved "https://registry.yarnpkg.com/es-module-lexer/-/es-module-lexer-1.5.4.tgz#a8efec3a3da991e60efa6b633a7cad6ab8d26b78" - integrity sha512-MVNK56NiMrOwitFB7cqDwq0CQutbw+0BvLshJSse0MUNU+y1FC3bUS/AQg7oUng+/wKrrki7JfmwtVHkVfPLlw== + version "1.6.0" + resolved "https://registry.yarnpkg.com/es-module-lexer/-/es-module-lexer-1.6.0.tgz#da49f587fd9e68ee2404fe4e256c0c7d3a81be21" + integrity sha512-qqnD1yMU6tk/jnaMosogGySTZP8YtUgAffA9nMN+E/rjxcfRQ6IEk7IiozUjgxKoFHBGjTLnrHB/YC45r/59EQ== + +es-object-atoms@^1.0.0, es-object-atoms@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/es-object-atoms/-/es-object-atoms-1.1.1.tgz#1c4f2c4837327597ce69d2ca190a7fdd172338c1" + integrity sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA== + dependencies: + es-errors "^1.3.0" esast-util-from-estree@^2.0.0: version "2.0.0" @@ -5174,9 +4875,9 @@ estree-util-to-js@^2.0.0: source-map "^0.7.0" estree-util-value-to-estree@^3.0.1: - version "3.2.1" - resolved "https://registry.yarnpkg.com/estree-util-value-to-estree/-/estree-util-value-to-estree-3.2.1.tgz#f8083e56f51efb4889794490730c036ba6167ee6" - integrity sha512-Vt2UOjyPbNQQgT5eJh+K5aATti0OjCIAGc9SgMdOFYbohuifsWclR74l0iZTJwePMgWYdX1hlVS+dedH9XV8kw== + version "3.3.2" + resolved "https://registry.yarnpkg.com/estree-util-value-to-estree/-/estree-util-value-to-estree-3.3.2.tgz#75bb2263850b6f5ac35edd343929c36b51a69806" + integrity sha512-hYH1aSvQI63Cvq3T3loaem6LW4u72F187zW4FHpTrReJSm6W66vYTFNO1vH/chmcOulp1HlAj1pxn8Ag0oXI5Q== dependencies: "@types/estree" "^1.0.0" @@ -5244,9 +4945,9 @@ execa@^5.0.0: strip-final-newline "^2.0.0" express@^4.17.3: - version "4.21.1" - resolved "https://registry.yarnpkg.com/express/-/express-4.21.1.tgz#9dae5dda832f16b4eec941a4e44aa89ec481b281" - integrity sha512-YSFlK1Ee0/GC8QaO91tHcDxJiE/X4FbpAyQWkxAvG6AXCuR65YzK8ua6D9hvi/TzUfZMpc+BwuM1IPw8fmQBiQ== + version "4.21.2" + resolved "https://registry.yarnpkg.com/express/-/express-4.21.2.tgz#cf250e48362174ead6cea4a566abef0162c1ec32" + integrity sha512-28HqgMZAmih1Czt9ny7qr6ek2qddF4FclbMzwhCREB6OFfH+rXAnuNCwo1/wFvrtbgsQDb4kSbX9de9lFbrXnA== dependencies: accepts "~1.3.8" array-flatten "1.1.1" @@ -5267,7 +4968,7 @@ express@^4.17.3: methods "~1.1.2" on-finished "2.4.1" parseurl "~1.3.3" - path-to-regexp "0.1.10" + path-to-regexp "0.1.12" proxy-addr "~2.0.7" qs "6.13.0" range-parser "~1.2.1" @@ -5298,15 +4999,15 @@ fast-deep-equal@^3.1.1, fast-deep-equal@^3.1.3: integrity sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q== fast-glob@^3.2.11, fast-glob@^3.2.9, fast-glob@^3.3.0: - version "3.3.2" - resolved "https://registry.yarnpkg.com/fast-glob/-/fast-glob-3.3.2.tgz#a904501e57cfdd2ffcded45e99a54fef55e46129" - integrity sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow== + version "3.3.3" + resolved "https://registry.yarnpkg.com/fast-glob/-/fast-glob-3.3.3.tgz#d06d585ce8dba90a16b0505c543c3ccfb3aeb818" + integrity sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg== dependencies: "@nodelib/fs.stat" "^2.0.2" "@nodelib/fs.walk" "^1.2.3" glob-parent "^5.1.2" merge2 "^1.3.0" - micromatch "^4.0.4" + micromatch "^4.0.8" fast-json-stable-stringify@^2.0.0: version "2.1.0" @@ -5314,14 +5015,14 @@ fast-json-stable-stringify@^2.0.0: integrity sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw== fast-uri@^3.0.1: - version "3.0.3" - resolved "https://registry.yarnpkg.com/fast-uri/-/fast-uri-3.0.3.tgz#892a1c91802d5d7860de728f18608a0573142241" - integrity sha512-aLrHthzCjH5He4Z2H9YZ+v6Ujb9ocRuW6ZzkJQOrTxleEijANq4v1TsaPaVG1PZcuurEzrLcWRyYBYXD5cEiaw== + version "3.0.6" + resolved "https://registry.yarnpkg.com/fast-uri/-/fast-uri-3.0.6.tgz#88f130b77cfaea2378d56bf970dea21257a68748" + integrity sha512-Atfo14OibSv5wAp4VWNsFYE1AchQRTv9cBGWET4pZWHzYshFSS9NQI6I57rdKn9croWVMbYFbLhJ+yJvmZIIHw== fastq@^1.6.0: - version "1.17.1" - resolved "https://registry.yarnpkg.com/fastq/-/fastq-1.17.1.tgz#2a523f07a4e7b1e81a42b91b8bf2254107753b47" - integrity sha512-sRVD3lWVIXWg6By68ZN7vho9a1pQcN/WBFaAAsDDFzlJjvoGx0P8z7V1t72grFJfJhu3YPZBuu25f7Kaw2jN1w== + version "1.19.1" + resolved "https://registry.yarnpkg.com/fastq/-/fastq-1.19.1.tgz#d50eaba803c8846a883c16492821ebcd2cda55f5" + integrity sha512-GwLTyxkCXjXbxqIhTsMI2Nui8huMPtnxg7krajPJAjnEG/iiOS7i+zCtWGZR9G0NBKbXKh6X9m9UIsYX/N6vvQ== dependencies: reusify "^1.0.4" @@ -5477,9 +5178,9 @@ fresh@0.5.2: integrity sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q== fs-extra@^11.1.1, fs-extra@^11.2.0: - version "11.2.0" - resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-11.2.0.tgz#e70e17dfad64232287d01929399e0ea7c86b0e5b" - integrity sha512-PmDi3uwK5nFuXh7XDTlVnS17xJS7vW36is2+w3xcv8SVxiB4NyATf4ctkVY5bkSjX0Y4nbvZCq1/EjtEyr9ktw== + version "11.3.0" + resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-11.3.0.tgz#0daced136bbaf65a555a326719af931adc7a314d" + integrity sha512-Z4XaCL6dUDHfP/jT25jJKMmtxvuwbkrD1vNSMFlo9lNLY2c5FHYSQgHPRZUjAB26TpDEoW9HCOgplrdbaPV/ew== dependencies: graceful-fs "^4.2.0" jsonfile "^6.0.1" @@ -5520,22 +5221,35 @@ gensync@^1.0.0-beta.2: resolved "https://registry.yarnpkg.com/gensync/-/gensync-1.0.0-beta.2.tgz#32a6ee76c3d7f52d46b2b1ae5d93fea8580a25e0" integrity sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg== -get-intrinsic@^1.1.3, get-intrinsic@^1.2.4: - version "1.2.4" - resolved "https://registry.yarnpkg.com/get-intrinsic/-/get-intrinsic-1.2.4.tgz#e385f5a4b5227d449c3eabbad05494ef0abbeadd" - integrity sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ== +get-intrinsic@^1.2.4, get-intrinsic@^1.2.5, get-intrinsic@^1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/get-intrinsic/-/get-intrinsic-1.3.0.tgz#743f0e3b6964a93a5491ed1bffaae054d7f98d01" + integrity sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ== dependencies: + call-bind-apply-helpers "^1.0.2" + es-define-property "^1.0.1" es-errors "^1.3.0" + es-object-atoms "^1.1.1" function-bind "^1.1.2" - has-proto "^1.0.1" - has-symbols "^1.0.3" - hasown "^2.0.0" + get-proto "^1.0.1" + gopd "^1.2.0" + has-symbols "^1.1.0" + hasown "^2.0.2" + math-intrinsics "^1.1.0" get-own-enumerable-property-symbols@^3.0.0: version "3.0.2" resolved "https://registry.yarnpkg.com/get-own-enumerable-property-symbols/-/get-own-enumerable-property-symbols-3.0.2.tgz#b5fde77f22cbe35f390b4e089922c50bce6ef664" integrity sha512-I0UBV/XOz1XkIJHEUDMZAbzCThU/H8DxmSfmdGcKPnVhu2VfFqr34jr9777IyaTYvxjedWhqVIilEDsCdP5G6g== +get-proto@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/get-proto/-/get-proto-1.0.1.tgz#150b3f2743869ef3e851ec0c49d15b1d14d00ee1" + integrity sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g== + dependencies: + dunder-proto "^1.0.1" + es-object-atoms "^1.0.0" + get-stream@^6.0.0, get-stream@^6.0.1: version "6.0.1" resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-6.0.1.tgz#a262d8eef67aced57c2852ad6167526a43cbf7b7" @@ -5628,12 +5342,10 @@ globby@^13.1.1: merge2 "^1.4.1" slash "^4.0.0" -gopd@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/gopd/-/gopd-1.0.1.tgz#29ff76de69dac7489b7c0918a5788e56477c332c" - integrity sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA== - dependencies: - get-intrinsic "^1.1.3" +gopd@^1.0.1, gopd@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/gopd/-/gopd-1.2.0.tgz#89f56b8217bdbc8802bd299df6d7f1081d7e51a1" + integrity sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg== got@^12.1.0: version "12.6.1" @@ -5696,22 +5408,17 @@ has-property-descriptors@^1.0.0, has-property-descriptors@^1.0.2: dependencies: es-define-property "^1.0.0" -has-proto@^1.0.1: - version "1.0.3" - resolved "https://registry.yarnpkg.com/has-proto/-/has-proto-1.0.3.tgz#b31ddfe9b0e6e9914536a6ab286426d0214f77fd" - integrity sha512-SJ1amZAJUiZS+PhsVLf5tGydlaVB8EdFpaSO4gmiUKUOxk8qzn5AIy4ZeJUmh22znIdk/uMAUT2pl3FxzVUH+Q== - -has-symbols@^1.0.3: - version "1.0.3" - resolved "https://registry.yarnpkg.com/has-symbols/-/has-symbols-1.0.3.tgz#bb7b2c4349251dce87b125f7bdf874aa7c8b39f8" - integrity sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A== +has-symbols@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/has-symbols/-/has-symbols-1.1.0.tgz#fc9c6a783a084951d0b971fe1018de813707a338" + integrity sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ== has-yarn@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/has-yarn/-/has-yarn-3.0.0.tgz#c3c21e559730d1d3b57e28af1f30d06fac38147d" integrity sha512-IrsVwUHhEULx3R8f/aA8AHuEzAorplsab/v8HBzEiIukwq5i/EC+xmOW+HfP1OaDP+2JkgT1yILHN2O3UFIbcA== -hasown@^2.0.0, hasown@^2.0.2: +hasown@^2.0.2: version "2.0.2" resolved "https://registry.yarnpkg.com/hasown/-/hasown-2.0.2.tgz#003eaf91be7adc372e84ec59dc37252cedb80003" integrity sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ== @@ -5719,15 +5426,15 @@ hasown@^2.0.0, hasown@^2.0.2: function-bind "^1.1.2" hast-util-from-parse5@^8.0.0: - version "8.0.2" - resolved "https://registry.yarnpkg.com/hast-util-from-parse5/-/hast-util-from-parse5-8.0.2.tgz#29b42758ba96535fd6021f0f533c000886c0f00f" - integrity sha512-SfMzfdAi/zAoZ1KkFEyyeXBn7u/ShQrfd675ZEE9M3qj+PMFX05xubzRyF76CCSJu8au9jgVxDV1+okFvgZU4A== + version "8.0.3" + resolved "https://registry.yarnpkg.com/hast-util-from-parse5/-/hast-util-from-parse5-8.0.3.tgz#830a35022fff28c3fea3697a98c2f4cc6b835a2e" + integrity sha512-3kxEVkEKt0zvcZ3hCRYI8rqrgwtlIOFMWkbclACvjlDw8Li9S2hk/d51OI0nr/gIpdMHNepwgOKqZ/sy0Clpyg== dependencies: "@types/hast" "^3.0.0" "@types/unist" "^3.0.0" devlop "^1.0.0" hastscript "^9.0.0" - property-information "^6.0.0" + property-information "^7.0.0" vfile "^6.0.0" vfile-location "^5.0.0" web-namespaces "^2.0.0" @@ -5759,9 +5466,9 @@ hast-util-raw@^9.0.0: zwitch "^2.0.0" hast-util-to-estree@^3.0.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/hast-util-to-estree/-/hast-util-to-estree-3.1.0.tgz#f2afe5e869ddf0cf690c75f9fc699f3180b51b19" - integrity sha512-lfX5g6hqVh9kjS/B9E2gSkvHH4SZNiQFiqWS0x9fENzEl+8W12RqdRxX6d/Cwxi30tPQs3bIO+aolQJNp1bIyw== + version "3.1.3" + resolved "https://registry.yarnpkg.com/hast-util-to-estree/-/hast-util-to-estree-3.1.3.tgz#e654c1c9374645135695cc0ab9f70b8fcaf733d7" + integrity sha512-48+B/rJWAp0jamNbAAf9M7Uf//UVqAoMmgXhBdxTDJLGKY+LRnZ99qcG+Qjl5HfMpYNzS5v4EAwVEF34LeAj7w== dependencies: "@types/estree" "^1.0.0" "@types/estree-jsx" "^1.0.0" @@ -5774,16 +5481,16 @@ hast-util-to-estree@^3.0.0: mdast-util-mdx-expression "^2.0.0" mdast-util-mdx-jsx "^3.0.0" mdast-util-mdxjs-esm "^2.0.0" - property-information "^6.0.0" + property-information "^7.0.0" space-separated-tokens "^2.0.0" - style-to-object "^0.4.0" + style-to-js "^1.0.0" unist-util-position "^5.0.0" zwitch "^2.0.0" hast-util-to-jsx-runtime@^2.0.0: - version "2.3.2" - resolved "https://registry.yarnpkg.com/hast-util-to-jsx-runtime/-/hast-util-to-jsx-runtime-2.3.2.tgz#6d11b027473e69adeaa00ca4cfb5bb68e3d282fa" - integrity sha512-1ngXYb+V9UT5h+PxNRa1O1FYguZK/XL+gkeqvp7EdHlB9oHUG0eYRo/vY5inBdcqo3RkPMC58/H94HvkbfGdyg== + version "2.3.6" + resolved "https://registry.yarnpkg.com/hast-util-to-jsx-runtime/-/hast-util-to-jsx-runtime-2.3.6.tgz#ff31897aae59f62232e21594eac7ef6b63333e98" + integrity sha512-zl6s8LwNyo1P9uw+XJGvZtdFF1GdAkOg8ujOw+4Pyb76874fLps4ueHXDhXWdk6YHQ6OgUtinliG7RsYvCbbBg== dependencies: "@types/estree" "^1.0.0" "@types/hast" "^3.0.0" @@ -5795,9 +5502,9 @@ hast-util-to-jsx-runtime@^2.0.0: mdast-util-mdx-expression "^2.0.0" mdast-util-mdx-jsx "^3.0.0" mdast-util-mdxjs-esm "^2.0.0" - property-information "^6.0.0" + property-information "^7.0.0" space-separated-tokens "^2.0.0" - style-to-object "^1.0.0" + style-to-js "^1.0.0" unist-util-position "^5.0.0" vfile-message "^4.0.0" @@ -5822,14 +5529,14 @@ hast-util-whitespace@^3.0.0: "@types/hast" "^3.0.0" hastscript@^9.0.0: - version "9.0.0" - resolved "https://registry.yarnpkg.com/hastscript/-/hastscript-9.0.0.tgz#2b76b9aa3cba8bf6d5280869f6f6f7165c230763" - integrity sha512-jzaLBGavEDKHrc5EfFImKN7nZKKBdSLIdGvCwDZ9TfzbF2ffXiov8CKE445L2Z1Ek2t/m4SKQ2j6Ipv7NyUolw== + version "9.0.1" + resolved "https://registry.yarnpkg.com/hastscript/-/hastscript-9.0.1.tgz#dbc84bef6051d40084342c229c451cd9dc567dff" + integrity sha512-g7df9rMFX/SPi34tyGCyUBREQoKkapwdY/T04Qn9TDWfHhAYt4/I0gMVirzK5wEzeUqIjEB+LXC/ypb7Aqno5w== dependencies: "@types/hast" "^3.0.0" comma-separated-tokens "^2.0.0" hast-util-parse-selector "^4.0.0" - property-information "^6.0.0" + property-information "^7.0.0" space-separated-tokens "^2.0.0" he@^1.2.0: @@ -5975,9 +5682,9 @@ http-errors@~1.6.2: statuses ">= 1.4.0 < 2" http-parser-js@>=0.5.1: - version "0.5.8" - resolved "https://registry.yarnpkg.com/http-parser-js/-/http-parser-js-0.5.8.tgz#af23090d9ac4e24573de6f6aecc9d84a48bf20e3" - integrity sha512-SGeBX54F94Wgu5RH3X5jsDtf4eHyRogWX1XGT3b4HuW3tQPM4AaBzoUji/4AAJNXCEOWZ5O0DgZmJw1947gD5Q== + version "0.5.9" + resolved "https://registry.yarnpkg.com/http-parser-js/-/http-parser-js-0.5.9.tgz#b817b3ca0edea6236225000d795378707c169cec" + integrity sha512-n1XsPy3rXVxlqxVioEWdC+0+M+SQw0DpJynwtOPo1X+ZlvdzTLtDBIJJlDQTnwZIFJrZSzSGmIOUdP8tu+SgLw== http-proxy-middleware@^2.0.3: version "2.0.7" @@ -6037,9 +5744,9 @@ ignore@^5.2.0, ignore@^5.2.4: integrity sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g== image-size@^1.0.2: - version "1.1.1" - resolved "https://registry.yarnpkg.com/image-size/-/image-size-1.1.1.tgz#ddd67d4dc340e52ac29ce5f546a09f4e29e840ac" - integrity sha512-541xKlUw6jr/6gGuk92F+mYM5zaFAc5ahphvkqvNe2bQ6gVBkd6bfrmVJ2t4KDAfikAYZyIqTnktX3i6/aQDrQ== + version "1.2.0" + resolved "https://registry.yarnpkg.com/image-size/-/image-size-1.2.0.tgz#312af27a2ff4ff58595ad00b9344dd684c910df6" + integrity sha512-4S8fwbO6w3GeCVN6OPtA9I5IGKkcDMPcKndtUlpJuCwu7JLjtj7JZpwqLuyY2nrmQT3AWsCJLSKPsc2mPBSl3w== dependencies: queue "6.0.2" @@ -6059,9 +5766,9 @@ immutable@^5.0.2: integrity sha512-P8IdPQHq3lA1xVeBRi5VPqUm5HDgKnx0Ru51wZz5mjxHr5n3RWhjIpOFU7ybkUxfB+5IToy+OLaHYDBIWsv+uw== import-fresh@^3.1.0, import-fresh@^3.2.1, import-fresh@^3.3.0: - version "3.3.0" - resolved "https://registry.yarnpkg.com/import-fresh/-/import-fresh-3.3.0.tgz#37162c25fcb9ebaa2e6e53d5b4d88ce17d9e0c2b" - integrity sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw== + version "3.3.1" + resolved "https://registry.yarnpkg.com/import-fresh/-/import-fresh-3.3.1.tgz#9cecb56503c0ada1f2741dbbd6546e4b13b57ccf" + integrity sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ== dependencies: parent-module "^1.0.0" resolve-from "^4.0.0" @@ -6114,11 +5821,6 @@ ini@^1.3.4, ini@^1.3.5, ini@~1.3.0: resolved "https://registry.yarnpkg.com/ini/-/ini-1.3.8.tgz#a29da425b48806f34767a4efce397269af28432c" integrity sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew== -inline-style-parser@0.1.1: - version "0.1.1" - resolved "https://registry.yarnpkg.com/inline-style-parser/-/inline-style-parser-0.1.1.tgz#ec8a3b429274e9c0a1f1c4ffa9453a7fef72cea1" - integrity sha512-7NXolsK4CAS5+xvdj5OMMbI962hU/wvwoxk+LWR9Ek9bVtyuuYScDN6eS0rUm6TxApFpw7CX1o4uJzcd4AyD3Q== - inline-style-parser@0.2.4: version "0.2.4" resolved "https://registry.yarnpkg.com/inline-style-parser/-/inline-style-parser-0.2.4.tgz#f4af5fe72e612839fcd453d989a586566d695f22" @@ -6178,10 +5880,10 @@ is-ci@^3.0.1: dependencies: ci-info "^3.2.0" -is-core-module@^2.13.0: - version "2.15.1" - resolved "https://registry.yarnpkg.com/is-core-module/-/is-core-module-2.15.1.tgz#a7363a25bee942fefab0de13bf6aa372c82dcc37" - integrity sha512-z0vtXSwucUJtANQWldhbtbt7BnL0vxiFjIdDLAatwhDYty2bad6s+rijD6Ri4YuYJubLzIJLUidCh09e1djEVQ== +is-core-module@^2.16.0: + version "2.16.1" + resolved "https://registry.yarnpkg.com/is-core-module/-/is-core-module-2.16.1.tgz#2a98801a849f43e2add644fbb6bc6229b19a4ef4" + integrity sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w== dependencies: hasown "^2.0.2" @@ -6366,9 +6068,9 @@ jest-worker@^29.4.3: supports-color "^8.0.0" jiti@^1.20.0: - version "1.21.6" - resolved "https://registry.yarnpkg.com/jiti/-/jiti-1.21.6.tgz#6c7f7398dd4b3142767f9a168af2f317a428d268" - integrity sha512-2yTgeWTWzMWkHu6Jp9NKgePDaYHbntiwvYuuJLbbN9vl7DC9DvXKOB2BC3ZZ92D3cvV/aflH0osDfwpHepQ53w== + version "1.21.7" + resolved "https://registry.yarnpkg.com/jiti/-/jiti-1.21.7.tgz#9dd81043424a3d28458b193d965f0d18a2300ba9" + integrity sha512-/imKNG4EbWNrVjoNC/1H5/9GFy+tqjGBHCaSsN+P2RnPqjsLmv6UD3Ej+Kj8nBWaRAwyk7kK5ZUc+OEatnTR3A== joi@^17.9.2: version "17.13.3" @@ -6401,7 +6103,12 @@ js-yaml@^4.1.0: dependencies: argparse "^2.0.1" -jsesc@^3.0.2, jsesc@~3.0.2: +jsesc@^3.0.2: + version "3.1.0" + resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-3.1.0.tgz#74d335a234f67ed19907fdadfac7ccf9d409825d" + integrity sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA== + +jsesc@~3.0.2: version "3.0.2" resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-3.0.2.tgz#bb8b09a6597ba426425f2e4a07245c3d00b9343e" integrity sha512-xKqzzWXDttJuOcawBt4KnKHHIf5oQ/Cxax+0PWFG+DFDgHNAdi+TXECADI+RYiFUMmx8792xsMbbgXj4CwnP4g== @@ -6457,11 +6164,6 @@ kleur@^3.0.3: resolved "https://registry.yarnpkg.com/kleur/-/kleur-3.0.3.tgz#a79c9ecc86ee1ce3fa6206d1216c501f147fc07e" integrity sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w== -klona@^2.0.4: - version "2.0.6" - resolved "https://registry.yarnpkg.com/klona/-/klona-2.0.6.tgz#85bffbf819c03b2f53270412420a4555ef882e22" - integrity sha512-dhG34DXATL5hSxJbIexCft8FChFXtmskoZYnoPWjXQuebWYCNkVeV3KkGegCK9CP1oswI/vQibS2GY7Em/sJJA== - latest-version@^7.0.0: version "7.0.0" resolved "https://registry.yarnpkg.com/latest-version/-/latest-version-7.0.0.tgz#843201591ea81a4d404932eeb61240fe04e9e5da" @@ -6470,17 +6172,17 @@ latest-version@^7.0.0: package-json "^8.1.0" launch-editor@^2.6.0: - version "2.9.1" - resolved "https://registry.yarnpkg.com/launch-editor/-/launch-editor-2.9.1.tgz#253f173bd441e342d4344b4dae58291abb425047" - integrity sha512-Gcnl4Bd+hRO9P9icCP/RVVT2o8SFlPXofuCxvA2SaZuH45whSvf5p8x5oih5ftLiVhEI4sp5xDY+R+b3zJBh5w== + version "2.10.0" + resolved "https://registry.yarnpkg.com/launch-editor/-/launch-editor-2.10.0.tgz#5ca3edfcb9667df1e8721310f3a40f1127d4bc42" + integrity sha512-D7dBRJo/qcGX9xlvt/6wUYzQxjh5G1RvZPgPv8vi4KRU99DVQL/oW7tnVOCCTm2HGeo3C5HvGE5Yrh6UBoZ0vA== dependencies: picocolors "^1.0.0" shell-quote "^1.8.1" less@^4.2.0: - version "4.2.0" - resolved "https://registry.yarnpkg.com/less/-/less-4.2.0.tgz#cbefbfaa14a4cd388e2099b2b51f956e1465c450" - integrity sha512-P3b3HJDBtSzsXUl0im2L7gTO5Ubg8mEN6G8qoTS77iXxXX4Hvu4Qj540PZDvQ8V6DmX6iXo98k7Md0Cm1PrLaA== + version "4.2.2" + resolved "https://registry.yarnpkg.com/less/-/less-4.2.2.tgz#4b59ede113933b58ab152190edf9180fc36846d8" + integrity sha512-tkuLHQlvWUTeQ3doAqnHbNn8T6WX1KA8yvbKG9x4VtKtIjHsVKQZCH11zRgAfbDAXC2UNIg/K9BYAAcEzUIrNg== dependencies: copy-anything "^2.0.1" parse-node-version "^1.0.1" @@ -6505,9 +6207,9 @@ lilconfig@^2.0.5: integrity sha512-utWOt/GHzuUxnLKxB6dk81RoOeoNeHgbrXiuGk4yyF5qlRz+iIVWu56E2fqGHFrXz0QNUhLB/8nKqvRH66JKGQ== lilconfig@^3.1.1: - version "3.1.2" - resolved "https://registry.yarnpkg.com/lilconfig/-/lilconfig-3.1.2.tgz#e4a7c3cb549e3a606c8dcc32e5ae1005e62c05cb" - integrity sha512-eop+wDAvpItUys0FWkHIKeC9ybYrTGbU41U5K7+bttZZeohvnY7M9dZ5kB21GNWiFT2q1OoPTvncPCgSOVO5ow== + version "3.1.3" + resolved "https://registry.yarnpkg.com/lilconfig/-/lilconfig-3.1.3.tgz#a1bcfd6257f9585bf5ae14ceeebb7b559025e4c4" + integrity sha512-/vlFKAoH5Cgt3Ie+JLhRbwOsCQePABiU3tJ1egGvyQ+33R/vcwM2Zl2QR/LzjsBeItPt3oSVXapn+m4nQDvpzw== lines-and-columns@^1.1.6: version "1.2.4" @@ -6636,13 +6338,19 @@ markdown-table@^3.0.0: resolved "https://registry.yarnpkg.com/markdown-table/-/markdown-table-3.0.4.tgz#fe44d6d410ff9d6f2ea1797a3f60aa4d2b631c2a" integrity sha512-wiYz4+JrLyb/DqW2hkFJxP7Vd7JuTDm77fvbM8VfEQdmSMqcImWeeRbHwZjBjIFki/VaMK2BhFi7oUUZeM5bqw== +math-intrinsics@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/math-intrinsics/-/math-intrinsics-1.1.0.tgz#a0dd74be81e2aa5c2f27e65ce283605ee4e2b7f9" + integrity sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g== + mdast-util-directive@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/mdast-util-directive/-/mdast-util-directive-3.0.0.tgz#3fb1764e705bbdf0afb0d3f889e4404c3e82561f" - integrity sha512-JUpYOqKI4mM3sZcNxmF/ox04XYFFkNwr0CFlrQIkCwbvH0xzMCqkMqAde9wRd80VAhaUrwFwKm2nxretdT1h7Q== + version "3.1.0" + resolved "https://registry.yarnpkg.com/mdast-util-directive/-/mdast-util-directive-3.1.0.tgz#f3656f4aab6ae3767d3c72cfab5e8055572ccba1" + integrity sha512-I3fNFt+DHmpWCYAT7quoM6lHf9wuqtI+oCOfvILnoicNIqjh5E3dEJWiXuYME2gNe8vl1iMQwyUHa7bgFmak6Q== dependencies: "@types/mdast" "^4.0.0" "@types/unist" "^3.0.0" + ccount "^2.0.0" devlop "^1.0.0" mdast-util-from-markdown "^2.0.0" mdast-util-to-markdown "^2.0.0" @@ -6651,9 +6359,9 @@ mdast-util-directive@^3.0.0: unist-util-visit-parents "^6.0.0" mdast-util-find-and-replace@^3.0.0, mdast-util-find-and-replace@^3.0.1: - version "3.0.1" - resolved "https://registry.yarnpkg.com/mdast-util-find-and-replace/-/mdast-util-find-and-replace-3.0.1.tgz#a6fc7b62f0994e973490e45262e4bc07607b04e0" - integrity sha512-SG21kZHGC3XRTSUhtofZkBzZTJNM5ecCi0SK2IMKmSXR8vO3peL+kb1O0z7Zl83jKtutG4k5Wv/W7V3/YHvzPA== + version "3.0.2" + resolved "https://registry.yarnpkg.com/mdast-util-find-and-replace/-/mdast-util-find-and-replace-3.0.2.tgz#70a3174c894e14df722abf43bc250cbae44b11df" + integrity sha512-Tmd1Vg/m3Xz43afeNxDIhWRtFZgM2VLyaf4vSTYwudTyeuTneoL3qtWMA5jeLyz/O1vDJmmV4QuScFCA2tBPwg== dependencies: "@types/mdast" "^4.0.0" escape-string-regexp "^5.0.0" @@ -6702,9 +6410,9 @@ mdast-util-gfm-autolink-literal@^2.0.0: micromark-util-character "^2.0.0" mdast-util-gfm-footnote@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/mdast-util-gfm-footnote/-/mdast-util-gfm-footnote-2.0.0.tgz#25a1753c7d16db8bfd53cd84fe50562bd1e6d6a9" - integrity sha512-5jOT2boTSVkMnQ7LTrd6n/18kqwjmuYqo7JUPe+tRCY6O7dAuTFMtTPauYYrMPpox9hlN0uOx/FL8XvEfG9/mQ== + version "2.1.0" + resolved "https://registry.yarnpkg.com/mdast-util-gfm-footnote/-/mdast-util-gfm-footnote-2.1.0.tgz#7778e9d9ca3df7238cc2bd3fa2b1bf6a65b19403" + integrity sha512-sqpDWlsHn7Ac9GNZQMeUzPQSMzR6Wv0WKRNvQRg0KqHh02fpTz69Qc1QSseNX29bhz1ROIyNyxExfawVKTm1GQ== dependencies: "@types/mdast" "^4.0.0" devlop "^1.1.0" @@ -6743,9 +6451,9 @@ mdast-util-gfm-task-list-item@^2.0.0: mdast-util-to-markdown "^2.0.0" mdast-util-gfm@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/mdast-util-gfm/-/mdast-util-gfm-3.0.0.tgz#3f2aecc879785c3cb6a81ff3a243dc11eca61095" - integrity sha512-dgQEX5Amaq+DuUqf26jJqSK9qgixgd6rYDHAv4aTBuA92cTknZlKpPfa86Z/s8Dj8xsAQpFfBmPUHWJBWqS4Bw== + version "3.1.0" + resolved "https://registry.yarnpkg.com/mdast-util-gfm/-/mdast-util-gfm-3.1.0.tgz#2cdf63b92c2a331406b0fb0db4c077c1b0331751" + integrity sha512-0ulfdQOM3ysHhCJ1p06l0b0VKlhU0wuQs3thxZQagjcjPrlFRqY215uZGHHJan9GEAXd9MbfPjFJz+qMkVR6zQ== dependencies: mdast-util-from-markdown "^2.0.0" mdast-util-gfm-autolink-literal "^2.0.0" @@ -6768,9 +6476,9 @@ mdast-util-mdx-expression@^2.0.0: mdast-util-to-markdown "^2.0.0" mdast-util-mdx-jsx@^3.0.0: - version "3.1.3" - resolved "https://registry.yarnpkg.com/mdast-util-mdx-jsx/-/mdast-util-mdx-jsx-3.1.3.tgz#76b957b3da18ebcfd0de3a9b4451dcd6fdec2320" - integrity sha512-bfOjvNt+1AcbPLTFMFWY149nJz0OjmewJs3LQQ5pIyVGxP4CdOqNVJL6kTaM5c68p8q82Xv3nCyFfUnuEcH3UQ== + version "3.2.0" + resolved "https://registry.yarnpkg.com/mdast-util-mdx-jsx/-/mdast-util-mdx-jsx-3.2.0.tgz#fd04c67a2a7499efb905a8a5c578dddc9fdada0d" + integrity sha512-lj/z8v0r6ZtsN/cGNNtemmmfoLAFZnjMbNyLzBafjzikOM+glrjNHPlf6lQDOTccj9n5b0PPihEBbhneMyGs1Q== dependencies: "@types/estree-jsx" "^1.0.0" "@types/hast" "^3.0.0" @@ -6896,9 +6604,9 @@ methods@~1.1.2: integrity sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w== micromark-core-commonmark@^2.0.0: - version "2.0.2" - resolved "https://registry.yarnpkg.com/micromark-core-commonmark/-/micromark-core-commonmark-2.0.2.tgz#6a45bbb139e126b3f8b361a10711ccc7c6e15e93" - integrity sha512-FKjQKbxd1cibWMM1P9N+H8TwlgGgSkWZMmfuVucLCHaYqeSvJ0hFeHsIa65pA2nYbes0f8LDHPMrd9X7Ujxg9w== + version "2.0.3" + resolved "https://registry.yarnpkg.com/micromark-core-commonmark/-/micromark-core-commonmark-2.0.3.tgz#c691630e485021a68cf28dbc2b2ca27ebf678cd4" + integrity sha512-RDBrHEMSxVFLg6xvnXmb1Ayr2WzLAWjeSATAoxwKYJV94TeNavgoIdA0a9ytzDSVzBy2YKFK+emCPOEibLeCrg== dependencies: decode-named-character-reference "^1.0.0" devlop "^1.0.0" @@ -6977,9 +6685,9 @@ micromark-extension-gfm-strikethrough@^2.0.0: micromark-util-types "^2.0.0" micromark-extension-gfm-table@^2.0.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/micromark-extension-gfm-table/-/micromark-extension-gfm-table-2.1.0.tgz#5cadedfbb29fca7abf752447967003dc3b6583c9" - integrity sha512-Ub2ncQv+fwD70/l4ou27b4YzfNaCJOvyX4HxXU15m7mpYY+rjuWzsLIPZHJL253Z643RpbcP1oeIJlQ/SKW67g== + version "2.1.1" + resolved "https://registry.yarnpkg.com/micromark-extension-gfm-table/-/micromark-extension-gfm-table-2.1.1.tgz#fac70bcbf51fe65f5f44033118d39be8a9b5940b" + integrity sha512-t2OU/dXXioARrC6yWfJ4hqB7rct14e8f7m0cbI5hUmDyyIlwv5vEtooptH8INkbLzOatzKuVbQmAYcbWoyz6Dg== dependencies: devlop "^1.0.0" micromark-factory-space "^2.0.0" @@ -7261,9 +6969,9 @@ micromark-util-sanitize-uri@^2.0.0: micromark-util-symbol "^2.0.0" micromark-util-subtokenize@^2.0.0: - version "2.0.3" - resolved "https://registry.yarnpkg.com/micromark-util-subtokenize/-/micromark-util-subtokenize-2.0.3.tgz#70ffb99a454bd8c913c8b709c3dc97baefb65f96" - integrity sha512-VXJJuNxYWSoYL6AJ6OQECCFGhIU2GGHMw8tahogePBrjkG8aCCas3ibkp7RnVOSTClg2is05/R7maAhF1XyQMg== + version "2.1.0" + resolved "https://registry.yarnpkg.com/micromark-util-subtokenize/-/micromark-util-subtokenize-2.1.0.tgz#d8ade5ba0f3197a1cf6a2999fbbfe6357a1a19ee" + integrity sha512-XQLu552iSctvnEcgXw6+Sx75GflAPNED1qx7eBJ+wydBb2KCbRZe+NwvIEEMM83uml1+2WSXpBAcp9IUCgCYWA== dependencies: devlop "^1.0.0" micromark-util-chunked "^2.0.0" @@ -7286,14 +6994,14 @@ micromark-util-types@^1.0.0: integrity sha512-ukRBgie8TIAcacscVHSiddHjO4k/q3pnedmzMQ4iwDcK0FtFCohKOlFbaOL/mPgfnPsL3C1ZyxJa4sbWrBl3jg== micromark-util-types@^2.0.0: - version "2.0.1" - resolved "https://registry.yarnpkg.com/micromark-util-types/-/micromark-util-types-2.0.1.tgz#a3edfda3022c6c6b55bfb049ef5b75d70af50709" - integrity sha512-534m2WhVTddrcKVepwmVEVnUAmtrx9bfIjNoQHRqfnvdaHQiFytEhJoTgpWJvDEXCO5gLTQh3wYC1PgOJA4NSQ== + version "2.0.2" + resolved "https://registry.yarnpkg.com/micromark-util-types/-/micromark-util-types-2.0.2.tgz#f00225f5f5a0ebc3254f96c36b6605c4b393908e" + integrity sha512-Yw0ECSpJoViF1qTU4DC6NwtC4aWGt1EkzaQB8KPPyCRR8z9TWeV0HbEFGTO+ZY1wB22zmxnJqhPyTpOVCpeHTA== micromark@^4.0.0: - version "4.0.1" - resolved "https://registry.yarnpkg.com/micromark/-/micromark-4.0.1.tgz#294c2f12364759e5f9e925a767ae3dfde72223ff" - integrity sha512-eBPdkcoCNvYcxQOAKAlceo5SNdzZWfF+FcSupREAzdAh9rRmE239CEQAiTwIgblwnoM8zzj35sZ5ZwvSEOF6Kw== + version "4.0.2" + resolved "https://registry.yarnpkg.com/micromark/-/micromark-4.0.2.tgz#91395a3e1884a198e62116e33c9c568e39936fdb" + integrity sha512-zpe98Q6kvavpCr1NPVSCMebCKfD7CA2NqZ+rykeNhONIJBpc1tFKt9hucLGwha3jNTNI8lHpctWJWoimVF4PfA== dependencies: "@types/debug" "^4.0.0" debug "^4.0.0" @@ -7313,7 +7021,7 @@ micromark@^4.0.0: micromark-util-symbol "^2.0.0" micromark-util-types "^2.0.0" -micromatch@^4.0.2, micromatch@^4.0.4, micromatch@^4.0.5: +micromatch@^4.0.2, micromatch@^4.0.5, micromatch@^4.0.8: version "4.0.8" resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-4.0.8.tgz#d66fa18f3a47076789320b9b1af32bd86d9fa202" integrity sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA== @@ -7396,9 +7104,9 @@ minimist@^1.2.0, minimist@^1.2.6: integrity sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA== mrmime@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/mrmime/-/mrmime-2.0.0.tgz#151082a6e06e59a9a39b46b3e14d5cfe92b3abb4" - integrity sha512-eu38+hdgojoyq63s+yTpN4XMBdt5l8HhMhc4VKLO9KM5caLIBvUm4thi7fFaxyTmCKeNnXZ5pAlBwCUnhA09uw== + version "2.0.1" + resolved "https://registry.yarnpkg.com/mrmime/-/mrmime-2.0.1.tgz#bc3e87f7987853a54c9850eeb1f1078cd44adddc" + integrity sha512-Y3wQdFg2Va6etvQ5I82yUhGdsKrcYox6p7FfL1LbK2J4V01F9TGlepTIhnK24t7koZibmg82KGglhA1XK5IsLQ== ms@2.0.0: version "2.0.0" @@ -7418,10 +7126,10 @@ multicast-dns@^7.2.5: dns-packet "^5.2.2" thunky "^1.0.2" -nanoid@^3.3.7: - version "3.3.7" - resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.3.7.tgz#d0c301a691bc8d54efa0a2226ccf3fe2fd656bd8" - integrity sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g== +nanoid@^3.3.8: + version "3.3.9" + resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.3.9.tgz#e0097d8e026b3343ff053e9ccd407360a03f503a" + integrity sha512-SppoicMGpZvbF1l3z4x7No3OlIjP7QJvC9XR7AhZr1kL133KHnKPztkKDc+Ir4aJ/1VhTySrtKhrsycmrMQfvg== needle@^3.1.0: version "3.3.1" @@ -7460,9 +7168,9 @@ node-addon-api@^7.0.0: integrity sha512-5m3bsyrjFWE1xf7nz7YXdN4udnVtXK6/Yfgn5qnahL6bCkf2yKt4k3nuTKAtT4r3IG8JNR2ncsIMdZuAzJjHQQ== node-emoji@^2.1.0: - version "2.1.3" - resolved "https://registry.yarnpkg.com/node-emoji/-/node-emoji-2.1.3.tgz#93cfabb5cc7c3653aa52f29d6ffb7927d8047c06" - integrity sha512-E2WEOVsgs7O16zsURJ/eH8BqhF029wGpEOnv7Urwdo2wmQanOACwJQh0devF9D9RhoZru0+9JXIS0dBXIAz+lA== + version "2.2.0" + resolved "https://registry.yarnpkg.com/node-emoji/-/node-emoji-2.2.0.tgz#1d000e3c76e462577895be1b436f4aa2d6760eb0" + integrity sha512-Z3lTE9pLaJF47NyMhd4ww1yFTAP8YhYI8SleJiHzM46Fgpm5cnNzSl9XfzFNqbaz+VlJrIj3fXQ4DeN1Rjm6cw== dependencies: "@sindresorhus/is" "^4.6.0" char-regex "^1.0.2" @@ -7474,11 +7182,6 @@ node-forge@^1: resolved "https://registry.yarnpkg.com/node-forge/-/node-forge-1.3.1.tgz#be8da2af243b2417d5f646a770663a92b7e9ded3" integrity sha512-dPEtOeMvF9VMcYV/1Wb8CPoVAXtp6MKMlcbAt4ddqmGqUJ6fQZFXkNZNkNlfevtNkGtaSoXf/vNNNSvgrdXwtA== -node-releases@^2.0.18: - version "2.0.18" - resolved "https://registry.yarnpkg.com/node-releases/-/node-releases-2.0.18.tgz#f010e8d35e2fe8d6b2944f03f70213ecedc4ca3f" - integrity sha512-d9VeXT4SJ7ZeOqGX6R5EM022wpL+eWPooLI+5UpWn2jCT1aosUQEhQP214x33Wkwx3JQMvIm+tIoVOdodFS40g== - node-releases@^2.0.19: version "2.0.19" resolved "https://registry.yarnpkg.com/node-releases/-/node-releases-2.0.19.tgz#9e445a52950951ec4d177d843af370b411caf314" @@ -7531,10 +7234,10 @@ object-assign@^4.1.1: resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.1.tgz#2109adc7965887cfc05cbbd442cac8bfbb360863" integrity sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg== -object-inspect@^1.13.1: - version "1.13.3" - resolved "https://registry.yarnpkg.com/object-inspect/-/object-inspect-1.13.3.tgz#f14c183de51130243d6d18ae149375ff50ea488a" - integrity sha512-kDCGIbxkDSXE3euJZZXzc6to7fCrKHNI/hSRQnRuQ+BWjFNzZwiFF8fj/6o2t2G9/jTj8PSIYTfCLelLZEeRpA== +object-inspect@^1.13.3: + version "1.13.4" + resolved "https://registry.yarnpkg.com/object-inspect/-/object-inspect-1.13.4.tgz#8375265e21bc20d0fa582c22e1b13485d6e00213" + integrity sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew== object-keys@^1.1.1: version "1.1.1" @@ -7542,13 +7245,15 @@ object-keys@^1.1.1: integrity sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA== object.assign@^4.1.0: - version "4.1.5" - resolved "https://registry.yarnpkg.com/object.assign/-/object.assign-4.1.5.tgz#3a833f9ab7fdb80fc9e8d2300c803d216d8fdbb0" - integrity sha512-byy+U7gp+FVwmyzKPYhW2h5l3crpmGsxl7X2s8y43IgxvG4g3QZ6CffDtsNQy1WsmZpQbO+ybo0AlW7TY6DcBQ== + version "4.1.7" + resolved "https://registry.yarnpkg.com/object.assign/-/object.assign-4.1.7.tgz#8c14ca1a424c6a561b0bb2a22f66f5049a945d3d" + integrity sha512-nK28WOo+QIjBkDduTINE4JkF/UJJKyf2EJxvJKfblDpyg0Q+pkOHNTL0Qwy6NP6FhE/EnzV73BxxqcJaXY9anw== dependencies: - call-bind "^1.0.5" + call-bind "^1.0.8" + call-bound "^1.0.3" define-properties "^1.2.1" - has-symbols "^1.0.3" + es-object-atoms "^1.0.0" + has-symbols "^1.1.0" object-keys "^1.1.1" obuf@^1.0.0, obuf@^1.1.2: @@ -7689,12 +7394,11 @@ parent-module@^1.0.0: callsites "^3.0.0" parse-entities@^4.0.0: - version "4.0.1" - resolved "https://registry.yarnpkg.com/parse-entities/-/parse-entities-4.0.1.tgz#4e2a01111fb1c986549b944af39eeda258fc9e4e" - integrity sha512-SWzvYcSJh4d/SGLIOQfZ/CoNv6BTlI6YEQ7Nj82oDVnRpwe/Z/F1EMx42x3JAOwGBlCjeCH0BRJQbQ/opHL17w== + version "4.0.2" + resolved "https://registry.yarnpkg.com/parse-entities/-/parse-entities-4.0.2.tgz#61d46f5ed28e4ee62e9ddc43d6b010188443f159" + integrity sha512-GG2AQYWoLgL877gQIKeRPGO1xF9+eG1ujIb5soS5gPvLQ1y2o8FL90w2QWNdf9I361Mpp7726c+lj3U0qK1uGw== dependencies: "@types/unist" "^2.0.0" - character-entities "^2.0.0" character-entities-legacy "^3.0.0" character-reference-invalid "^2.0.0" decode-named-character-reference "^1.0.0" @@ -7785,10 +7489,10 @@ path-parse@^1.0.7: resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.7.tgz#fbc114b60ca42b30d9daf5858e4bd68bbedb6735" integrity sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw== -path-to-regexp@0.1.10: - version "0.1.10" - resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-0.1.10.tgz#67e9108c5c0551b9e5326064387de4763c4d5f8b" - integrity sha512-7lf7qcQidTku0Gu3YDPc8DJ1q7OOucfa/BSsIwjuh56VU7katFvuM8hULfkwB3Fns/rsVF7PwPKVw1sl5KQS9w== +path-to-regexp@0.1.12: + version "0.1.12" + resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-0.1.12.tgz#d5e1a12e478a976d432ef3c58d534b9923164bb7" + integrity sha512-RA1GjUVMnvYFxuqovrEqZoxxW5NUZqbwKtYz/Tt7nXerk0LbLblQmrsgdeOxV5SFHf0UDggjS/bSeOZwt1pmEQ== path-to-regexp@3.3.0: version "3.3.0" @@ -7807,7 +7511,7 @@ path-type@^4.0.0: resolved "https://registry.yarnpkg.com/path-type/-/path-type-4.0.0.tgz#84ed01c0a7ba380afe09d90a8c180dcd9d03043b" integrity sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw== -picocolors@^1.0.0, picocolors@^1.0.1, picocolors@^1.1.0, picocolors@^1.1.1: +picocolors@^1.0.0, picocolors@^1.1.1: version "1.1.1" resolved "https://registry.yarnpkg.com/picocolors/-/picocolors-1.1.1.tgz#3d321af3eab939b083c8f929a1d12cda81c26b6b" integrity sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA== @@ -8108,9 +7812,9 @@ postcss-modules-extract-imports@^3.0.0, postcss-modules-extract-imports@^3.1.0: integrity sha512-k3kNe0aNFQDAZGbin48pL2VNidTF0w4/eASDsxlyspobzU3wZQLOGj7L9gfRe0Jo9/4uud09DsjFNH7winGv8Q== postcss-modules-local-by-default@^4.0.4, postcss-modules-local-by-default@^4.0.5: - version "4.1.0" - resolved "https://registry.yarnpkg.com/postcss-modules-local-by-default/-/postcss-modules-local-by-default-4.1.0.tgz#b0db6bc81ffc7bdc52eb0f84d6ca0bedf0e36d21" - integrity sha512-rm0bdSv4jC3BDma3s9H19ZddW0aHX6EoqwDYU2IfZhRN+53QrufTRo2IdkAbRqLx4R2IYbZnbjKKxg4VN5oU9Q== + version "4.2.0" + resolved "https://registry.yarnpkg.com/postcss-modules-local-by-default/-/postcss-modules-local-by-default-4.2.0.tgz#d150f43837831dae25e4085596e84f6f5d6ec368" + integrity sha512-5kcJm/zk+GJDSfw+V/42fJ5fhjL5YbFDl8nVdXkJPLLW+Vf9mTD5Xe0wqIaDnLuL2U6cDNpTr+UQ+v2HWIBhzw== dependencies: icss-utils "^5.0.0" postcss-selector-parser "^7.0.0" @@ -8352,9 +8056,9 @@ postcss-selector-parser@^6.0.11, postcss-selector-parser@^6.0.16: util-deprecate "^1.0.2" postcss-selector-parser@^7.0.0: - version "7.0.0" - resolved "https://registry.yarnpkg.com/postcss-selector-parser/-/postcss-selector-parser-7.0.0.tgz#41bd8b56f177c093ca49435f65731befe25d6b9c" - integrity sha512-9RbEr1Y7FFfptd/1eEdntyjMwLeghW1bHX9GWjXo19vx4ytPQhANltvVxDggzJl7mnWM+dX28kb6cyS/4iQjlQ== + version "7.1.0" + resolved "https://registry.yarnpkg.com/postcss-selector-parser/-/postcss-selector-parser-7.1.0.tgz#4d6af97eba65d73bc4d84bcb343e865d7dd16262" + integrity sha512-8sLjZwK0R+JlxlYcTuVnyT2v+htpdrjDOKuMcOVdYjt52Lh8hWRYpxBPoKx/Zg+bcjc3wx6fmQevMmUztS/ccA== dependencies: cssesc "^3.0.0" util-deprecate "^1.0.2" @@ -8392,11 +8096,11 @@ postcss-zindex@^6.0.2: integrity sha512-5BxW9l1evPB/4ZIc+2GobEBoKC+h8gPGCMi+jxsYvd2x0mjq7wazk6DrP71pStqxE9Foxh5TVnonbWpFZzXaYg== postcss@^8.0.0, postcss@^8.4.21, postcss@^8.4.24, postcss@^8.4.26, postcss@^8.4.33, postcss@^8.4.35, postcss@^8.4.38: - version "8.4.49" - resolved "https://registry.yarnpkg.com/postcss/-/postcss-8.4.49.tgz#4ea479048ab059ab3ae61d082190fabfd994fe19" - integrity sha512-OCVPnIObs4N29kxTjzLfUryOkvZEq+pf8jTF0lg8E7uETuWHA+v7j3c/xJmiqpX450191LlmZfUKkXxkTry7nA== + version "8.5.3" + resolved "https://registry.yarnpkg.com/postcss/-/postcss-8.5.3.tgz#1463b6f1c7fb16fe258736cba29a2de35237eafb" + integrity sha512-dle9A3yYxlBSrt8Fu+IpjGT8SY8hN0mlaA6GY8t0P5PjIOZemULz/E2Bnm/2dcUOena75OTNkHI76uZBNUUq3A== dependencies: - nanoid "^3.3.7" + nanoid "^3.3.8" picocolors "^1.1.1" source-map-js "^1.2.1" @@ -8414,17 +8118,17 @@ pretty-time@^1.1.0: integrity sha512-28iF6xPQrP8Oa6uxE6a1biz+lWeTOAPKggvjB8HAs6nVMKZwf5bG++632Dx614hIWgUPkgivRfG+a8uAXGTIbA== prism-react-renderer@^2.3.0: - version "2.4.0" - resolved "https://registry.yarnpkg.com/prism-react-renderer/-/prism-react-renderer-2.4.0.tgz#c5ea692029c2f8b3fd04f63662d04ffd4eaf10a0" - integrity sha512-327BsVCD/unU4CNLZTWVHyUHKnsqcvj2qbPlQ8MiBE2eq2rgctjigPA1Gp9HLF83kZ20zNN6jgizHJeEsyFYOw== + version "2.4.1" + resolved "https://registry.yarnpkg.com/prism-react-renderer/-/prism-react-renderer-2.4.1.tgz#ac63b7f78e56c8f2b5e76e823a976d5ede77e35f" + integrity sha512-ey8Ls/+Di31eqzUxC46h8MksNuGx/n0AAC8uKpwFau4RPDYLuE3EXTp8N8G2vX2N7UC/+IXeNUnlWBGGcAG+Ig== dependencies: "@types/prismjs" "^1.26.0" clsx "^2.0.0" prismjs@^1.29.0: - version "1.29.0" - resolved "https://registry.yarnpkg.com/prismjs/-/prismjs-1.29.0.tgz#f113555a8fa9b57c35e637bba27509dcf802dd12" - integrity sha512-Kx/1w86q/epKcmte75LNrEoT+lX8pBpavuAbvJWRXar7Hz8jrtF+e3vY751p0R8H9HdArwaCTNDDzHg/ScJK1Q== + version "1.30.0" + resolved "https://registry.yarnpkg.com/prismjs/-/prismjs-1.30.0.tgz#d9709969d9d4e16403f6f348c63553b19f0975a9" + integrity sha512-DEvV2ZF2r2/63V+tK8hQvrR2ZGn10srHbXviTlcv7Kpzw8jWiNTqbVgjO3IY8RxrrOUF8VPMQQFysYYYv0YZxw== process-nextick-args@~2.0.0: version "2.0.1" @@ -8453,6 +8157,11 @@ property-information@^6.0.0: resolved "https://registry.yarnpkg.com/property-information/-/property-information-6.5.0.tgz#6212fbb52ba757e92ef4fb9d657563b933b7ffec" integrity sha512-PgTgs/BlvHxOu8QuEN7wi5A0OmXaBcHpmCSTehcs6Uuu9IkDIEo13Hy7n898RHfrQ49vKCoGeWZSaAK01nwVig== +property-information@^7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/property-information/-/property-information-7.0.0.tgz#3508a6d6b0b8eb3ca6eb2c6623b164d2ed2ab112" + integrity sha512-7D/qOz/+Y4X/rzSB6jKxKUsQnphO046ei8qxG59mtM3RG3DHgTK81HrxrmoDVINJb8NKT5ZsRbwHvQ6B68Iyhg== + proto-list@~1.2.1: version "1.2.4" resolved "https://registry.yarnpkg.com/proto-list/-/proto-list-1.2.4.tgz#212d5bfe1318306a420f6402b8e26ff39647a849" @@ -8583,9 +8292,9 @@ react-dom@^18.0.0: scheduler "^0.23.2" react-error-overlay@^6.0.11: - version "6.0.11" - resolved "https://registry.yarnpkg.com/react-error-overlay/-/react-error-overlay-6.0.11.tgz#92835de5841c5cf08ba00ddd2d677b6d17ff9adb" - integrity sha512-/6UZ2qgEyH2aqzYZgQPxEnz33NJ2gNsnHA2o5+o4wW9bLM/JYQitNP9xPhsXwC08hMMovfGe/8retsdDsczPRg== + version "6.1.0" + resolved "https://registry.yarnpkg.com/react-error-overlay/-/react-error-overlay-6.1.0.tgz#22b86256beb1c5856f08a9a228adb8121dd985f2" + integrity sha512-SN/U6Ytxf1QGkw/9ve5Y+NxBbZM6Ht95tuXNMKs8EJyFa/Vy/+Co3stop3KBHARfn/giv+Lj1uUnTfOJ3moFEQ== react-fast-compare@^3.2.0, react-fast-compare@^3.2.2: version "3.2.2" @@ -8628,10 +8337,10 @@ react-is@^16.13.1, react-is@^16.6.0, react-is@^16.7.0: resolved "https://registry.yarnpkg.com/react-is/-/react-is-16.13.1.tgz#789729a4dc36de2999dc156dd6c1d9c18cea56a4" integrity sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ== -react-is@^18.3.1: - version "18.3.1" - resolved "https://registry.yarnpkg.com/react-is/-/react-is-18.3.1.tgz#e83557dc12eae63a99e003a46388b1dcbb44db7e" - integrity sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg== +react-is@^19.0.0: + version "19.0.0" + resolved "https://registry.yarnpkg.com/react-is/-/react-is-19.0.0.tgz#d6669fd389ff022a9684f708cf6fa4962d1fea7a" + integrity sha512-H91OHcwjZsbq3ClIDHMzBShc1rotbfACdWENsmEf0IFvZ3FgGPtdHMcsv45bQ1hAbgdfiA8SnxTKfDS+x/8m2g== react-json-view-lite@^1.2.0: version "1.5.0" @@ -8727,9 +8436,9 @@ readable-stream@^3.0.6: util-deprecate "^1.0.1" readdirp@^4.0.1: - version "4.0.2" - resolved "https://registry.yarnpkg.com/readdirp/-/readdirp-4.0.2.tgz#388fccb8b75665da3abffe2d8f8ed59fe74c230a" - integrity sha512-yDMz9g+VaZkqBYS/ozoBJwaBhTbZo3UNYQHNRw1D3UFQB8oHB4uS/tAODO+ZLjGWmUbKnIlOWO+aaIiAxrUWHA== + version "4.1.2" + resolved "https://registry.yarnpkg.com/readdirp/-/readdirp-4.1.2.tgz#eb85801435fbf2a7ee58f19e0921b068fc69948d" + integrity sha512-GDhwkLfywWL2s6vEjyhri+eXmfH6j1L7JE27WhqLeYzoh/A3DBaYGEj2H/HFZCn/kMfim73FXxEJTw06WtxQwg== readdirp@~3.6.0: version "3.6.0" @@ -8821,7 +8530,7 @@ regenerator-transform@^0.15.2: dependencies: "@babel/runtime" "^7.8.4" -regexpu-core@^6.1.1: +regexpu-core@^6.2.0: version "6.2.0" resolved "https://registry.yarnpkg.com/regexpu-core/-/regexpu-core-6.2.0.tgz#0e5190d79e542bf294955dccabae04d3c7d53826" integrity sha512-H66BPQMrv+V16t8xtmq+UC0CBpiTBA60V8ibS1QVReIp8T1z8hwFxqcGzm9K6lgsN7sB5edVH8a+ze6Fqm4weA== @@ -8834,9 +8543,9 @@ regexpu-core@^6.1.1: unicode-match-property-value-ecmascript "^2.1.0" registry-auth-token@^5.0.1: - version "5.0.2" - resolved "https://registry.yarnpkg.com/registry-auth-token/-/registry-auth-token-5.0.2.tgz#8b026cc507c8552ebbe06724136267e63302f756" - integrity sha512-o/3ikDxtXaA59BmZuZrJZDJv8NMDGSj+6j6XaeBmHw8eY1i1qd9+6H+LjVvQXx3HN6aRCGa1cUdJ9RaJZUugnQ== + version "5.1.0" + resolved "https://registry.yarnpkg.com/registry-auth-token/-/registry-auth-token-5.1.0.tgz#3c659047ecd4caebd25bc1570a3aa979ae490eca" + integrity sha512-GdekYuwLXLxMuFTwAPg5UKGLW/UXzQrZvH/Zj791BQif5T05T0RsaLfHc9q3ZOKi7n+BoprPD9mJ0O0k4xzUlw== dependencies: "@pnpm/npm-conf" "^2.1.0" @@ -8883,9 +8592,9 @@ relateurl@^0.2.7: integrity sha512-G08Dxvm4iDN3MLM0EsP62EDV9IuhXPR6blNz6Utcp7zyV3tr4HVNINt6MpaRWbxoOHT3Q7YN2P+jaHX8vUbgog== remark-directive@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/remark-directive/-/remark-directive-3.0.0.tgz#34452d951b37e6207d2e2a4f830dc33442923268" - integrity sha512-l1UyWJ6Eg1VPU7Hm/9tt0zKtReJQNOA4+iDMAxTyZNWnJnFlbS/7zhiel/rogTLQ2vMYwDzSJa4BiVNqGlqIMA== + version "3.0.1" + resolved "https://registry.yarnpkg.com/remark-directive/-/remark-directive-3.0.1.tgz#689ba332f156cfe1118e849164cc81f157a3ef0a" + integrity sha512-gwglrEQEZcZYgVyG1tQuA+h58EZfq5CSULw7J90AFuCTyib1thgHPoqQ+h9iFvU6R+vnZ5oNFQR5QKgGpk741A== dependencies: "@types/mdast" "^4.0.0" mdast-util-directive "^3.0.0" @@ -8914,9 +8623,9 @@ remark-frontmatter@^5.0.0: unified "^11.0.0" remark-gfm@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/remark-gfm/-/remark-gfm-4.0.0.tgz#aea777f0744701aa288b67d28c43565c7e8c35de" - integrity sha512-U92vJgBPkbw4Zfu/IiW2oTZLSL3Zpv+uI7My2eq8JxKgqraFdU8YUGicEJCEgSbeaG+QDFqIcwwfMTOEelPxuA== + version "4.0.1" + resolved "https://registry.yarnpkg.com/remark-gfm/-/remark-gfm-4.0.1.tgz#33227b2a74397670d357bf05c098eaf8513f0d6b" + integrity sha512-1quofZ2RQ9EWdeN34S79+KExV1764+wCUGop5CPL1WGdD0ocPpu91lzPGbwWMECpEpd42kJGQwzRfyov9j4yNg== dependencies: "@types/mdast" "^4.0.0" mdast-util-gfm "^3.0.0" @@ -9025,11 +8734,11 @@ resolve-pathname@^3.0.0: integrity sha512-C7rARubxI8bXFNB/hqcp/4iUeIXJhJZvFPFPiSPRnhU5UPxzMFIl+2E6yY6c4k9giDJAhtV+enfA+G89N6Csng== resolve@^1.1.6, resolve@^1.14.2, resolve@^1.19.0: - version "1.22.8" - resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.22.8.tgz#b6c87a9f2aa06dfab52e3d70ac8cde321fa5a48d" - integrity sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw== + version "1.22.10" + resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.22.10.tgz#b663e83ffb09bbf2386944736baae803029b8b39" + integrity sha512-NPRy+/ncIMeDlTAsuqwKIiferiawhefFJtkNSW0qZJEqMEb+qBt/77B/jGeeek+F0uOeN05CDa6HXbbIgtVX4w== dependencies: - is-core-module "^2.13.0" + is-core-module "^2.16.0" path-parse "^1.0.7" supports-preserve-symlinks-flag "^1.0.0" @@ -9046,9 +8755,9 @@ retry@^0.13.1: integrity sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg== reusify@^1.0.4: - version "1.0.4" - resolved "https://registry.yarnpkg.com/reusify/-/reusify-1.0.4.tgz#90da382b1e126efc02146e90845a88db12925d76" - integrity sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw== + version "1.1.0" + resolved "https://registry.yarnpkg.com/reusify/-/reusify-1.1.0.tgz#0fe13b9522e1473f51b558ee796e08f11f9b489f" + integrity sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw== rimraf@^3.0.2: version "3.0.2" @@ -9094,28 +8803,17 @@ safe-buffer@~5.1.0, safe-buffer@~5.1.1: resolved "https://registry.yarnpkg.com/safer-buffer/-/safer-buffer-2.1.2.tgz#44fa161b0187b9549dd84bb91802f9bd8385cd6a" integrity sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg== -sass-loader@^10.1.1: - version "10.5.2" - resolved "https://registry.yarnpkg.com/sass-loader/-/sass-loader-10.5.2.tgz#1ca30534fff296417b853c7597ca3b0bbe8c37d0" - integrity sha512-vMUoSNOUKJILHpcNCCyD23X34gve1TS7Rjd9uXHeKqhvBG39x6XbswFDtpbTElj6XdMFezoWhkh5vtKudf2cgQ== - dependencies: - klona "^2.0.4" - loader-utils "^2.0.0" - neo-async "^2.6.2" - schema-utils "^3.0.0" - semver "^7.3.2" - sass-loader@^16.0.2: - version "16.0.3" - resolved "https://registry.yarnpkg.com/sass-loader/-/sass-loader-16.0.3.tgz#17b944fab6702dc7a52c5d2a88cbfa38c39cdc75" - integrity sha512-gosNorT1RCkuCMyihv6FBRR7BMV06oKRAs+l4UMp1mlcVg9rWN6KMmUj3igjQwmYys4mDP3etEYJgiHRbgHCHA== + version "16.0.5" + resolved "https://registry.yarnpkg.com/sass-loader/-/sass-loader-16.0.5.tgz#257bc90119ade066851cafe7f2c3f3504c7cda98" + integrity sha512-oL+CMBXrj6BZ/zOq4os+UECPL+bWqt6OAC6DWS8Ln8GZRcMDjlJ4JC3FBDuHJdYaFWIdKNIBYmtZtK2MaMkNIw== dependencies: neo-async "^2.6.2" sass@^1.70.0, sass@^1.79.3: - version "1.81.0" - resolved "https://registry.yarnpkg.com/sass/-/sass-1.81.0.tgz#a9010c0599867909dfdbad057e4a6fbdd5eec941" - integrity sha512-Q4fOxRfhmv3sqCLoGfvrC9pRV8btc0UtqL9mN6Yrv6Qi9ScL55CVH1vlPP863ISLEEMNLLuu9P+enCeGHlnzhA== + version "1.85.1" + resolved "https://registry.yarnpkg.com/sass/-/sass-1.85.1.tgz#18ab0bb48110ae99163778f06445b406148ca0d5" + integrity sha512-Uk8WpxM5v+0cMR0XjX9KfRIacmSG86RH4DCCZjLU2rFh5tyutt9siAXJ7G+YfxQ99Q6wrRMbMlVl6KqUms71ag== dependencies: chokidar "^4.0.0" immutable "^5.0.2" @@ -9149,7 +8847,7 @@ schema-utils@2.7.0: ajv "^6.12.2" ajv-keywords "^3.4.1" -schema-utils@^3.0.0, schema-utils@^3.1.1, schema-utils@^3.2.0: +schema-utils@^3.0.0: version "3.3.0" resolved "https://registry.yarnpkg.com/schema-utils/-/schema-utils-3.3.0.tgz#f50a88877c3c01652a15b622ae9e9795df7a60fe" integrity sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg== @@ -9158,17 +8856,7 @@ schema-utils@^3.0.0, schema-utils@^3.1.1, schema-utils@^3.2.0: ajv "^6.12.5" ajv-keywords "^3.5.2" -schema-utils@^4.0.0, schema-utils@^4.0.1: - version "4.2.0" - resolved "https://registry.yarnpkg.com/schema-utils/-/schema-utils-4.2.0.tgz#70d7c93e153a273a805801882ebd3bff20d89c8b" - integrity sha512-L0jRsrPpjdckP3oPug3/VxNKt2trR8TcabrM6FOAAlvC/9Phcmm+cuAgTlxBqdBR1WJx7Naj9WHw+aOmheSVbw== - dependencies: - "@types/json-schema" "^7.0.9" - ajv "^8.9.0" - ajv-formats "^2.1.1" - ajv-keywords "^5.1.0" - -schema-utils@^4.3.0: +schema-utils@^4.0.0, schema-utils@^4.0.1, schema-utils@^4.3.0: version "4.3.0" resolved "https://registry.yarnpkg.com/schema-utils/-/schema-utils-4.3.0.tgz#3b669f04f71ff2dfb5aba7ce2d5a9d79b35622c0" integrity sha512-Gf9qqc58SpCA/xdziiHz35F4GNIWYWZrEshUc/G/r5BnLph6xpKuLeoJoQuj5WfBIx/eQLf+hmVPYHaxJu7V2g== @@ -9217,9 +8905,9 @@ semver@^6.3.1: integrity sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA== semver@^7.3.2, semver@^7.3.5, semver@^7.3.7, semver@^7.5.4: - version "7.6.3" - resolved "https://registry.yarnpkg.com/semver/-/semver-7.6.3.tgz#980f7b5550bc175fb4dc09403085627f9eb33143" - integrity sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A== + version "7.7.1" + resolved "https://registry.yarnpkg.com/semver/-/semver-7.7.1.tgz#abd5098d82b18c6c81f6074ff2647fd3e7220c9f" + integrity sha512-hlq8tAfn0m/61p4BVRcPzIGr6LKiMwo4VM6dGi6pt4qcRkmNzTcWq6eCEjEh+qXjkMDvPlOFFSGwQjoEa6gyMA== send@0.19.0: version "0.19.0" @@ -9283,7 +8971,7 @@ serve-static@1.16.2: parseurl "~1.3.3" send "0.19.0" -set-function-length@^1.2.1: +set-function-length@^1.2.2: version "1.2.2" resolved "https://registry.yarnpkg.com/set-function-length/-/set-function-length-1.2.2.tgz#aac72314198eaed975cf77b2c3b6b880695e5449" integrity sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg== @@ -9330,9 +9018,9 @@ shebang-regex@^3.0.0: integrity sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A== shell-quote@^1.7.3, shell-quote@^1.8.1: - version "1.8.1" - resolved "https://registry.yarnpkg.com/shell-quote/-/shell-quote-1.8.1.tgz#6dbf4db75515ad5bac63b4f1894c3a154c766680" - integrity sha512-6j1W9l1iAs/4xYBI1SYOVZyFcCis9b4KCLQ8fgAGG07QvzaRLVVRQvAy85yNmmZSjYjg4MWh4gNvlPujU/5LpA== + version "1.8.2" + resolved "https://registry.yarnpkg.com/shell-quote/-/shell-quote-1.8.2.tgz#d2d83e057959d53ec261311e9e9b8f51dcb2934a" + integrity sha512-AzqKpGKjrj7EM6rKVQEPpB288oCfnrEIuyoT9cyF4nmGa7V8Zk6f7RRqYisX8X9m+Q7bd632aZW4ky7EhbQztA== shelljs@^0.8.5: version "0.8.5" @@ -9343,15 +9031,45 @@ shelljs@^0.8.5: interpret "^1.0.0" rechoir "^0.6.2" +side-channel-list@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/side-channel-list/-/side-channel-list-1.0.0.tgz#10cb5984263115d3b7a0e336591e290a830af8ad" + integrity sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA== + dependencies: + es-errors "^1.3.0" + object-inspect "^1.13.3" + +side-channel-map@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/side-channel-map/-/side-channel-map-1.0.1.tgz#d6bb6b37902c6fef5174e5f533fab4c732a26f42" + integrity sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA== + dependencies: + call-bound "^1.0.2" + es-errors "^1.3.0" + get-intrinsic "^1.2.5" + object-inspect "^1.13.3" + +side-channel-weakmap@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/side-channel-weakmap/-/side-channel-weakmap-1.0.2.tgz#11dda19d5368e40ce9ec2bdc1fb0ecbc0790ecea" + integrity sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A== + dependencies: + call-bound "^1.0.2" + es-errors "^1.3.0" + get-intrinsic "^1.2.5" + object-inspect "^1.13.3" + side-channel-map "^1.0.1" + side-channel@^1.0.6: - version "1.0.6" - resolved "https://registry.yarnpkg.com/side-channel/-/side-channel-1.0.6.tgz#abd25fb7cd24baf45466406b1096b7831c9215f2" - integrity sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA== + version "1.1.0" + resolved "https://registry.yarnpkg.com/side-channel/-/side-channel-1.1.0.tgz#c3fcff9c4da932784873335ec9765fa94ff66bc9" + integrity sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw== dependencies: - call-bind "^1.0.7" es-errors "^1.3.0" - get-intrinsic "^1.2.4" - object-inspect "^1.13.1" + object-inspect "^1.13.3" + side-channel-list "^1.0.0" + side-channel-map "^1.0.1" + side-channel-weakmap "^1.0.2" signal-exit@^3.0.2, signal-exit@^3.0.3: version "3.0.7" @@ -9497,12 +9215,7 @@ statuses@2.0.1: resolved "https://registry.yarnpkg.com/statuses/-/statuses-1.5.0.tgz#161c7dac177659fd9811f43771fa99381478628c" integrity sha512-OpZ3zP+jT1PI7I8nemJX4AKmAX070ZkYPVWV/AaKTJl+tXCTGyVdC1a4SL8RUQYEwk/f34ZX8UTykN68FwrqAA== -std-env@^3.0.1: - version "3.8.0" - resolved "https://registry.yarnpkg.com/std-env/-/std-env-3.8.0.tgz#b56ffc1baf1a29dcc80a3bdf11d7fca7c315e7d5" - integrity sha512-Bc3YwwCB+OzldMxOXJIIvC6cPRWr/LxOp48CdQTOkPyk/t4JWWJbrilwBd7RJzKV8QW7tJkcgAmeuLLJugl5/w== - -std-env@^3.7.0: +std-env@^3.0.1, std-env@^3.7.0: version "3.8.1" resolved "https://registry.yarnpkg.com/std-env/-/std-env-3.8.1.tgz#2b81c631c62e3d0b964b87f099b8dcab6c9a5346" integrity sha512-vj5lIj3Mwf9D79hBkltk5qmkFI+biIKWS2IBxEyEU3AX1tUf7AoL8nSazCOiiqQsGKIq01SClsKEzweu34uwvA== @@ -9595,14 +9308,14 @@ strip-json-comments@~2.0.1: resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-2.0.1.tgz#3c531942e908c2697c0ec344858c286c7ca0a60a" integrity sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ== -style-to-object@^0.4.0: - version "0.4.4" - resolved "https://registry.yarnpkg.com/style-to-object/-/style-to-object-0.4.4.tgz#266e3dfd56391a7eefb7770423612d043c3f33ec" - integrity sha512-HYNoHZa2GorYNyqiCaBgsxvcJIn7OHq6inEga+E6Ke3m5JkoqpQbnFssk4jwe+K7AhGa2fcha4wSOf1Kn01dMg== +style-to-js@^1.0.0: + version "1.1.16" + resolved "https://registry.yarnpkg.com/style-to-js/-/style-to-js-1.1.16.tgz#e6bd6cd29e250bcf8fa5e6591d07ced7575dbe7a" + integrity sha512-/Q6ld50hKYPH3d/r6nr117TZkHR0w0kGGIVfpG9N6D8NymRPM9RqCUv4pRpJ62E5DqOYx2AFpbZMyCPnjQCnOw== dependencies: - inline-style-parser "0.1.1" + style-to-object "1.0.8" -style-to-object@^1.0.0: +style-to-object@1.0.8: version "1.0.8" resolved "https://registry.yarnpkg.com/style-to-object/-/style-to-object-1.0.8.tgz#67a29bca47eaa587db18118d68f9d95955e81292" integrity sha512-xT47I/Eo0rwJmaXC4oilDGDWLohVhR6o/xAQcPQN8q6QBuZVL8qMYL85kLmST5cPjAorwvqIA4qXTRQoYHaL6g== @@ -9680,18 +9393,7 @@ tapable@^2.0.0, tapable@^2.1.1, tapable@^2.2.0, tapable@^2.2.1: resolved "https://registry.yarnpkg.com/tapable/-/tapable-2.2.1.tgz#1967a73ef4060a82f12ab96af86d52fdb76eeca0" integrity sha512-GNzQvQTOIP6RyTfE2Qxb8ZVlNmw0n88vp1szwWRimP02mnTsx3Wtn5qRdqY9w2XduFNUgvOwhNnQsjwCp+kqaQ== -terser-webpack-plugin@^5.3.10, terser-webpack-plugin@^5.3.9: - version "5.3.10" - resolved "https://registry.yarnpkg.com/terser-webpack-plugin/-/terser-webpack-plugin-5.3.10.tgz#904f4c9193c6fd2a03f693a2150c62a92f40d199" - integrity sha512-BKFPWlPDndPs+NGGCr1U59t0XScL5317Y0UReNrHaw9/FwhPENlq6bfgs+4yPfyP51vqC1bQ4rp1EfXW5ZSH9w== - dependencies: - "@jridgewell/trace-mapping" "^0.3.20" - jest-worker "^27.4.5" - schema-utils "^3.1.1" - serialize-javascript "^6.0.1" - terser "^5.26.0" - -terser-webpack-plugin@^5.3.11: +terser-webpack-plugin@^5.3.11, terser-webpack-plugin@^5.3.9: version "5.3.14" resolved "https://registry.yarnpkg.com/terser-webpack-plugin/-/terser-webpack-plugin-5.3.14.tgz#9031d48e57ab27567f02ace85c7d690db66c3e06" integrity sha512-vkZjpUjb6OMS7dhV+tILUW6BhpDR7P2L/aQSAv+Uwk+m8KATX9EccViHTJR2qDtACKPIYndLGCyl3FMo+r2LMw== @@ -9702,17 +9404,7 @@ terser-webpack-plugin@^5.3.11: serialize-javascript "^6.0.2" terser "^5.31.1" -terser@^5.10.0, terser@^5.15.1, terser@^5.26.0: - version "5.36.0" - resolved "https://registry.yarnpkg.com/terser/-/terser-5.36.0.tgz#8b0dbed459ac40ff7b4c9fd5a3a2029de105180e" - integrity sha512-IYV9eNMuFAV4THUspIRXkLakHnV6XO7FEdtKjf/mDyrnqUg9LnlOn6/RwRvM9SZjR4GUq8Nk8zj67FzVARr74w== - dependencies: - "@jridgewell/source-map" "^0.3.3" - acorn "^8.8.2" - commander "^2.20.0" - source-map-support "~0.5.20" - -terser@^5.31.1: +terser@^5.10.0, terser@^5.15.1, terser@^5.31.1: version "5.39.0" resolved "https://registry.yarnpkg.com/terser/-/terser-5.39.0.tgz#0e82033ed57b3ddf1f96708d123cca717d86ca3a" integrity sha512-LBAhFyLho16harJoWMg/nZsQYgTrg5jXOn2nCYjRUcZZEdE3qa2zb8QEDRUGVZBW4rlazf2fxkg8tztybTaqWw== @@ -9840,10 +9532,10 @@ typescript@~5.5.2: resolved "https://registry.yarnpkg.com/typescript/-/typescript-5.5.4.tgz#d9852d6c82bad2d2eda4fd74a5762a8f5909e9ba" integrity sha512-Mtq29sKDAEYP7aljRgtPOpTvOfbwRWlS6dPRzwjdE+C0R4brX/GUyhHSecbHMFLNBLcJIPt9nl9yG5TZ1weH+Q== -undici-types@~6.19.8: - version "6.19.8" - resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-6.19.8.tgz#35111c9d1437ab83a7cdc0abae2f26d88eda0a02" - integrity sha512-ve2KP6f/JnbPBFyobGHuerC9g1FYGn/F8n1LWTwNxCEzd6IfqTwUQcNXgEtmmQ6DlRrC1hrSrBnCZPokRrDHjw== +undici-types@~6.20.0: + version "6.20.0" + resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-6.20.0.tgz#8171bf22c1f588d1554d55bf204bc624af388433" + integrity sha512-Ny6QZ2Nju20vw1SRHe3d9jVu6gJ+4e3+MMpqu7pqE5HT6WsTSlce++GQmK5UXS8mzV8DSYHrQH+Xrf2jVcuKNg== unicode-canonical-property-names-ecmascript@^2.0.0: version "2.0.1" @@ -9949,12 +9641,12 @@ unpipe@1.0.0, unpipe@~1.0.0: integrity sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ== update-browserslist-db@^1.1.1: - version "1.1.1" - resolved "https://registry.yarnpkg.com/update-browserslist-db/-/update-browserslist-db-1.1.1.tgz#80846fba1d79e82547fb661f8d141e0945755fe5" - integrity sha512-R8UzCaa9Az+38REPiJ1tXlImTJXlVfgHZsglwBD/k6nj76ctsH1E3q4doGrukiLQd3sGQYu56r5+lo5r94l29A== + version "1.1.3" + resolved "https://registry.yarnpkg.com/update-browserslist-db/-/update-browserslist-db-1.1.3.tgz#348377dd245216f9e7060ff50b15a1b740b75420" + integrity sha512-UxhIZQ+QInVdunkDAaiazvvT/+fXL5Osr0JZlJulepYu6Jd7qJtDZjlur0emRlT71EN3ScPoE7gvsuIKKNavKw== dependencies: escalade "^3.2.0" - picocolors "^1.1.0" + picocolors "^1.1.1" update-notifier@^6.0.2: version "6.0.2" @@ -10159,36 +9851,7 @@ webpack-sources@^3.2.3: resolved "https://registry.yarnpkg.com/webpack-sources/-/webpack-sources-3.2.3.tgz#2d4daab8451fd4b240cc27055ff6a0c2ccea0cde" integrity sha512-/DyMEOrDgLKKIG0fmvtz+4dUX/3Ghozwgm6iPp8KRhvn+eQf9+Q7GWxVNMk3+uCPWfdXYC4ExGBckIXdFEfH1w== -webpack@^5, webpack@^5.88.1: - version "5.96.1" - resolved "https://registry.yarnpkg.com/webpack/-/webpack-5.96.1.tgz#3676d1626d8312b6b10d0c18cc049fba7ac01f0c" - integrity sha512-l2LlBSvVZGhL4ZrPwyr8+37AunkcYj5qh8o6u2/2rzoPc8gxFJkLj1WxNgooi9pnoc06jh0BjuXnamM4qlujZA== - dependencies: - "@types/eslint-scope" "^3.7.7" - "@types/estree" "^1.0.6" - "@webassemblyjs/ast" "^1.12.1" - "@webassemblyjs/wasm-edit" "^1.12.1" - "@webassemblyjs/wasm-parser" "^1.12.1" - acorn "^8.14.0" - browserslist "^4.24.0" - chrome-trace-event "^1.0.2" - enhanced-resolve "^5.17.1" - es-module-lexer "^1.2.1" - eslint-scope "5.1.1" - events "^3.2.0" - glob-to-regexp "^0.4.1" - graceful-fs "^4.2.11" - json-parse-even-better-errors "^2.3.1" - loader-runner "^4.2.0" - mime-types "^2.1.27" - neo-async "^2.6.2" - schema-utils "^3.2.0" - tapable "^2.1.1" - terser-webpack-plugin "^5.3.10" - watchpack "^2.4.1" - webpack-sources "^3.2.3" - -webpack@^5.95.0: +webpack@^5, webpack@^5.88.1, webpack@^5.95.0: version "5.98.0" resolved "https://registry.yarnpkg.com/webpack/-/webpack-5.98.0.tgz#44ae19a8f2ba97537978246072fb89d10d1fbd17" integrity sha512-UFynvx+gM44Gv9qFgj0acCQK2VE1CtdfwFdimkapco3hlPCJ/zeq73n2yVKimVbtm+TnApIugGhLJnkU6gjYXA== @@ -10320,9 +9983,9 @@ ws@^7.3.1: integrity sha512-+dbF1tHwZpXcbOJdVOkzLDxZP1ailvSxM6ZweXTegylPny803bFhA+vqBYw4s31NSAk4S2Qz+AKXK9a4wkdjcQ== ws@^8.13.0: - version "8.18.0" - resolved "https://registry.yarnpkg.com/ws/-/ws-8.18.0.tgz#0d7505a6eafe2b0e712d232b42279f53bc289bbc" - integrity sha512-8VbfWfHLbbwu3+N6OKsOMpBdT4kXPDDB9cJk2bJ6mh9ucxdlnNvH1e+roYkKmN9Nxw2yjz7VzeO9oOz2zJ04Pw== + version "8.18.1" + resolved "https://registry.yarnpkg.com/ws/-/ws-8.18.1.tgz#ea131d3784e1dfdff91adb0a4a116b127515e3cb" + integrity sha512-RKW2aJZMXeMxVpnZ6bck+RswznaxmzdULiBr6KY7XkTnW8uvt0iT9H5DkHUChXrc+uurzwa0rVI16n/Xzjdz1w== xdg-basedir@^5.0.1, xdg-basedir@^5.1.0: version "5.1.0" @@ -10352,9 +10015,9 @@ yocto-queue@^0.1.0: integrity sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q== yocto-queue@^1.0.0: - version "1.1.1" - resolved "https://registry.yarnpkg.com/yocto-queue/-/yocto-queue-1.1.1.tgz#fef65ce3ac9f8a32ceac5a634f74e17e5b232110" - integrity sha512-b4JR1PFR10y1mKjhHY9LaGo6tmrgjit7hxVIeAmyMw3jegXR4dhYqLaQF5zMXZxY7tLpMyJeLjr1C4rLmkVe8g== + version "1.2.0" + resolved "https://registry.yarnpkg.com/yocto-queue/-/yocto-queue-1.2.0.tgz#4a29a93e7591328fa31768701e6ea66962401f79" + integrity sha512-KHBC7z61OJeaMGnF3wqNZj+GGNXOyypZviiKpQeiHirG5Ib1ImwcLBH70rbMSkKfSmUNBsdf2PwaEJtKvgmkNw== zwitch@^2.0.0: version "2.0.4"