diff --git a/CHANGELOG.md b/CHANGELOG.md index ae6f6ce71d6..70b13589e3d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ([#4734](https://github.com/open-telemetry/opentelemetry-python/pull/4734)) - build: bump ruff to 0.14.1 ([#4782](https://github.com/open-telemetry/opentelemetry-python/pull/4782)) +- semantic-conventions: Bump to 1.38.0 + ([#4791](https://github.com/open-telemetry/opentelemetry-python/pull/4791)) ## Version 1.38.0/0.59b0 (2025-10-16) diff --git a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/app_attributes.py b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/app_attributes.py index 73456b671ea..6500e67802b 100644 --- a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/app_attributes.py +++ b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/app_attributes.py @@ -67,6 +67,18 @@ The y (vertical) component of a screen coordinate, in screen pixels. """ +APP_SCREEN_ID: Final = "app.screen.id" +""" +An identifier that uniquely differentiates this screen from other screens in the same application. +Note: A screen represents only the part of the device display drawn by the app. It typically contains multiple widgets or UI components and is larger in scope than individual widgets. Multiple screens can coexist on the same display simultaneously (e.g., split view on tablets). +""" + +APP_SCREEN_NAME: Final = "app.screen.name" +""" +The name of an application screen. +Note: A screen represents only the part of the device display drawn by the app. It typically contains multiple widgets or UI components and is larger in scope than individual widgets. Multiple screens can coexist on the same display simultaneously (e.g., split view on tablets). +""" + APP_WIDGET_ID: Final = "app.widget.id" """ An identifier that uniquely differentiates this widget from other widgets in the same application. diff --git a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/azure_attributes.py b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/azure_attributes.py index 9db27f62989..d0ec046aba7 100644 --- a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/azure_attributes.py +++ b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/azure_attributes.py @@ -35,7 +35,7 @@ ) """ List of regions contacted during operation in the order that they were contacted. If there is more than one region listed, it indicates that the operation was performed on multiple regions i.e. cross-regional call. -Note: Region name matches the format of `displayName` in [Azure Location API](https://learn.microsoft.com/rest/api/subscription/subscriptions/list-locations?view=rest-subscription-2021-10-01&tabs=HTTP#location). +Note: Region name matches the format of `displayName` in [Azure Location API](https://learn.microsoft.com/rest/api/resources/subscriptions/list-locations). """ AZURE_COSMOSDB_OPERATION_REQUEST_CHARGE: Final = ( diff --git a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/container_attributes.py b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/container_attributes.py index ef04de504c0..da45fe37ef2 100644 --- a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/container_attributes.py +++ b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/container_attributes.py @@ -58,7 +58,7 @@ CONTAINER_IMAGE_ID: Final = "container.image.id" """ Runtime specific image identifier. Usually a hash algorithm followed by a UUID. -Note: Docker defines a sha256 of the image id; `container.image.id` corresponds to the `Image` field from the Docker container inspect [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect) endpoint. +Note: Docker defines a sha256 of the image id; `container.image.id` corresponds to the `Image` field from the Docker container inspect [API](https://docs.docker.com/reference/api/engine/version/v1.43/#tag/Container/operation/ContainerInspect) endpoint. K8s defines a link to the container registry repository with digest `"imageID": "registry.azurecr.io /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`. The ID is assigned by the container runtime and can vary in different environments. Consider using `oci.manifest.digest` if it is important to identify the same image in different environments/runtimes. """ @@ -71,12 +71,12 @@ CONTAINER_IMAGE_REPO_DIGESTS: Final = "container.image.repo_digests" """ Repo digests of the container image as provided by the container runtime. -Note: [Docker](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect) and [CRI](https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238) report those under the `RepoDigests` field. +Note: [Docker](https://docs.docker.com/reference/api/engine/version/v1.43/#tag/Image/operation/ImageInspect) and [CRI](https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238) report those under the `RepoDigests` field. """ CONTAINER_IMAGE_TAGS: Final = "container.image.tags" """ -Container image tags. An example can be found in [Docker Image Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). Should be only the `` section of the full name for example from `registry.example.com/my-org/my-image:`. +Container image tags. An example can be found in [Docker Image Inspect](https://docs.docker.com/reference/api/engine/version/v1.43/#tag/Image/operation/ImageInspect). Should be only the `` section of the full name for example from `registry.example.com/my-org/my-image:`. """ CONTAINER_LABEL_TEMPLATE: Final = "container.label" diff --git a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/db_attributes.py b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/db_attributes.py index 8c40189eff6..e6d5335795f 100644 --- a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/db_attributes.py +++ b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/db_attributes.py @@ -128,7 +128,7 @@ DB_COSMOSDB_STATUS_CODE: Final = "db.cosmosdb.status_code" """ -Deprecated: Replaced by `db.response.status_code`. +Deprecated: Use `db.response.status_code` instead. """ DB_COSMOSDB_SUB_STATUS_CODE: Final = "db.cosmosdb.sub_status_code" @@ -215,6 +215,9 @@ `db.query.parameter.` SHOULD match up with the parameterized placeholders present in `db.query.text`. +It is RECOMMENDED to capture the value as provided by the application +without attempting to do any case normalization. + `db.query.parameter.` SHOULD NOT be captured on batch operations. Examples: @@ -222,8 +225,8 @@ - For a query `SELECT * FROM users where username = %s` with the parameter `"jdoe"`, the attribute `db.query.parameter.0` SHOULD be set to `"jdoe"`. -- For a query `"SELECT * FROM users WHERE username = %(username)s;` with parameter - `username = "jdoe"`, the attribute `db.query.parameter.username` SHOULD be set to `"jdoe"`. +- For a query `"SELECT * FROM users WHERE username = %(userName)s;` with parameter + `userName = "jdoe"`, the attribute `db.query.parameter.userName` SHOULD be set to `"jdoe"`. """ DB_QUERY_SUMMARY: Final = "db.query.summary" @@ -238,7 +241,7 @@ DB_REDIS_DATABASE_INDEX: Final = "db.redis.database_index" """ -Deprecated: Replaced by `db.namespace`. +Deprecated: Uncategorized. """ DB_RESPONSE_RETURNED_ROWS: Final = "db.response.returned_rows" diff --git a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/dns_attributes.py b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/dns_attributes.py index ca162d42e3b..bcb744cb968 100644 --- a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/dns_attributes.py +++ b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/dns_attributes.py @@ -22,5 +22,5 @@ DNS_QUESTION_NAME: Final = "dns.question.name" """ The name being queried. -Note: If the name field contains non-printable characters (below 32 or above 126), those characters should be represented as escaped base 10 integers (\\DDD). Back slashes and quotes should be escaped. Tabs, carriage returns, and line feeds should be converted to \\t, \\r, and \\n respectively. +Note: The name represents the queried domain name as it appears in the DNS query without any additional normalization. """ diff --git a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/enduser_attributes.py b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/enduser_attributes.py index aec9804d7f6..6813a00a700 100644 --- a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/enduser_attributes.py +++ b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/enduser_attributes.py @@ -34,7 +34,7 @@ ENDUSER_ROLE: Final = "enduser.role" """ -Deprecated: Replaced by `user.roles`. +Deprecated: Use `user.roles` instead. """ ENDUSER_SCOPE: Final = "enduser.scope" diff --git a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/event_attributes.py b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/event_attributes.py index 7fa5cf490ce..8db935d76b0 100644 --- a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/event_attributes.py +++ b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/event_attributes.py @@ -16,5 +16,5 @@ EVENT_NAME: Final = "event.name" """ -Deprecated: Replaced by EventName top-level field on the LogRecord. +Deprecated: The value of this attribute MUST now be set as the value of the EventName field on the LogRecord to indicate that the LogRecord represents an Event. """ diff --git a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/faas_attributes.py b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/faas_attributes.py index 7ba2267fa4a..98ab0a49344 100644 --- a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/faas_attributes.py +++ b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/faas_attributes.py @@ -121,7 +121,7 @@ - **Google Cloud Run (Services):** The [revision](https://cloud.google.com/run/docs/managing/revisions) (i.e., the function name plus the revision suffix). - **Google Cloud Functions:** The value of the - [`K_REVISION` environment variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). + [`K_REVISION` environment variable](https://cloud.google.com/run/docs/container-contract#services-env-vars). - **Azure Functions:** Not applicable. Do not set this attribute. """ diff --git a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/gcp_attributes.py b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/gcp_attributes.py index 4a44d97190d..c84a16b936d 100644 --- a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/gcp_attributes.py +++ b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/gcp_attributes.py @@ -72,6 +72,67 @@ The name of the workload as configured in AppHub. """ +GCP_APPHUB_DESTINATION_APPLICATION_CONTAINER: Final = ( + "gcp.apphub_destination.application.container" +) +""" +The container within GCP where the AppHub destination application is defined. +""" + +GCP_APPHUB_DESTINATION_APPLICATION_ID: Final = ( + "gcp.apphub_destination.application.id" +) +""" +The name of the destination application as configured in AppHub. +""" + +GCP_APPHUB_DESTINATION_APPLICATION_LOCATION: Final = ( + "gcp.apphub_destination.application.location" +) +""" +The GCP zone or region where the destination application is defined. +""" + +GCP_APPHUB_DESTINATION_SERVICE_CRITICALITY_TYPE: Final = ( + "gcp.apphub_destination.service.criticality_type" +) +""" +Criticality of a destination workload indicates its importance to the business as specified in [AppHub type enum](https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type). +""" + +GCP_APPHUB_DESTINATION_SERVICE_ENVIRONMENT_TYPE: Final = ( + "gcp.apphub_destination.service.environment_type" +) +""" +Software lifecycle stage of a destination service as defined [AppHub environment type](https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type_1). +""" + +GCP_APPHUB_DESTINATION_SERVICE_ID: Final = "gcp.apphub_destination.service.id" +""" +The name of the destination service as configured in AppHub. +""" + +GCP_APPHUB_DESTINATION_WORKLOAD_CRITICALITY_TYPE: Final = ( + "gcp.apphub_destination.workload.criticality_type" +) +""" +Criticality of a destination workload indicates its importance to the business as specified in [AppHub type enum](https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type). +""" + +GCP_APPHUB_DESTINATION_WORKLOAD_ENVIRONMENT_TYPE: Final = ( + "gcp.apphub_destination.workload.environment_type" +) +""" +Environment of a destination workload is the stage of a software lifecycle as provided in the [AppHub environment type](https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type_1). +""" + +GCP_APPHUB_DESTINATION_WORKLOAD_ID: Final = ( + "gcp.apphub_destination.workload.id" +) +""" +The name of the destination workload as configured in AppHub. +""" + GCP_CLIENT_SERVICE: Final = "gcp.client.service" """ Identifies the Google Cloud service for which the official client library is intended. @@ -141,3 +202,47 @@ class GcpApphubWorkloadEnvironmentTypeValues(Enum): """Test environment.""" DEVELOPMENT = "DEVELOPMENT" """Development environment.""" + + +class GcpApphubDestinationServiceCriticalityTypeValues(Enum): + MISSION_CRITICAL = "MISSION_CRITICAL" + """Mission critical service.""" + HIGH = "HIGH" + """High impact.""" + MEDIUM = "MEDIUM" + """Medium impact.""" + LOW = "LOW" + """Low impact.""" + + +class GcpApphubDestinationServiceEnvironmentTypeValues(Enum): + PRODUCTION = "PRODUCTION" + """Production environment.""" + STAGING = "STAGING" + """Staging environment.""" + TEST = "TEST" + """Test environment.""" + DEVELOPMENT = "DEVELOPMENT" + """Development environment.""" + + +class GcpApphubDestinationWorkloadCriticalityTypeValues(Enum): + MISSION_CRITICAL = "MISSION_CRITICAL" + """Mission critical service.""" + HIGH = "HIGH" + """High impact.""" + MEDIUM = "MEDIUM" + """Medium impact.""" + LOW = "LOW" + """Low impact.""" + + +class GcpApphubDestinationWorkloadEnvironmentTypeValues(Enum): + PRODUCTION = "PRODUCTION" + """Production environment.""" + STAGING = "STAGING" + """Staging environment.""" + TEST = "TEST" + """Test environment.""" + DEVELOPMENT = "DEVELOPMENT" + """Development environment.""" diff --git a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/gen_ai_attributes.py b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/gen_ai_attributes.py index 0de1d391fbf..ead3c3ffb03 100644 --- a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/gen_ai_attributes.py +++ b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/gen_ai_attributes.py @@ -48,6 +48,32 @@ Note: Data sources are used by AI agents and RAG applications to store grounding data. A data source may be an external database, object store, document collection, website, or any other storage system used by the GenAI agent or application. The `gen_ai.data_source.id` SHOULD match the identifier used by the GenAI system rather than a name specific to the external storage, such as a database or object store. Semantic conventions referencing `gen_ai.data_source.id` MAY also leverage additional attributes, such as `db.*`, to further identify and describe the data source. """ +GEN_AI_EMBEDDINGS_DIMENSION_COUNT: Final = "gen_ai.embeddings.dimension.count" +""" +The number of dimensions the resulting output embeddings should have. +""" + +GEN_AI_EVALUATION_EXPLANATION: Final = "gen_ai.evaluation.explanation" +""" +A free-form explanation for the assigned score provided by the evaluator. +""" + +GEN_AI_EVALUATION_NAME: Final = "gen_ai.evaluation.name" +""" +The name of the evaluation metric used for the GenAI response. +""" + +GEN_AI_EVALUATION_SCORE_LABEL: Final = "gen_ai.evaluation.score.label" +""" +Human readable label for evaluation. +Note: This attribute provides a human-readable interpretation of the evaluation score produced by an evaluator. For example, a score value of 1 could mean "relevant" in one evaluation system and "not relevant" in another, depending on the scoring range and evaluator. The label SHOULD have low cardinality. Possible values depend on the evaluation metric and evaluator used; implementations SHOULD document the possible values. +""" + +GEN_AI_EVALUATION_SCORE_VALUE: Final = "gen_ai.evaluation.score.value" +""" +The evaluation score returned by the evaluator. +""" + GEN_AI_INPUT_MESSAGES: Final = "gen_ai.input.messages" """ The chat history provided to the model as an input. @@ -272,11 +298,47 @@ The type of token being counted. """ +GEN_AI_TOOL_CALL_ARGUMENTS: Final = "gen_ai.tool.call.arguments" +""" +Parameters passed to the tool call. +Note: > [!WARNING] +> This attribute may contain sensitive information. + +It's expected to be an object - in case a serialized string is available +to the instrumentation, the instrumentation SHOULD do the best effort to +deserialize it to an object. When recorded on spans, it MAY be recorded as a JSON string if structured format is not supported and SHOULD be recorded in structured form otherwise. +""" + GEN_AI_TOOL_CALL_ID: Final = "gen_ai.tool.call.id" """ The tool call identifier. """ +GEN_AI_TOOL_CALL_RESULT: Final = "gen_ai.tool.call.result" +""" +The result returned by the tool call (if any and if execution was successful). +Note: > [!WARNING] +> This attribute may contain sensitive information. + +It's expected to be an object - in case a serialized string is available +to the instrumentation, the instrumentation SHOULD do the best effort to +deserialize it to an object. When recorded on spans, it MAY be recorded as a JSON string if structured format is not supported and SHOULD be recorded in structured form otherwise. +""" + +GEN_AI_TOOL_DEFINITIONS: Final = "gen_ai.tool.definitions" +""" +The list of source system tool definitions available to the GenAI agent or model. +Note: The value of this attribute matches source system tool definition format. + +It's expected to be an array of objects where each object represents a tool definition. In case a serialized string is available +to the instrumentation, the instrumentation SHOULD do the best effort to +deserialize it to an array. When recorded on spans, it MAY be recorded as a JSON string if structured format is not supported and SHOULD be recorded in structured form otherwise. + +Since this attribute could be large, it's NOT RECOMMENDED to populate +it by default. Instrumentations MAY provide a way to enable +populating this attribute. +""" + GEN_AI_TOOL_DESCRIPTION: Final = "gen_ai.tool.description" """ The tool description. @@ -422,9 +484,9 @@ class GenAiSystemValues(Enum): COHERE = "cohere" """Cohere.""" AZ_AI_INFERENCE = "az.ai.inference" - """Azure AI Inference.""" + """Deprecated: Replaced by `azure.ai.inference`.""" AZ_AI_OPENAI = "az.ai.openai" - """Azure OpenAI.""" + """Deprecated: Replaced by `azure.ai.openai`.""" AZURE_AI_INFERENCE = "azure.ai.inference" """Azure AI Inference.""" AZURE_AI_OPENAI = "azure.ai.openai" @@ -436,7 +498,7 @@ class GenAiSystemValues(Enum): PERPLEXITY = "perplexity" """Perplexity.""" XAI = "xai" - """Deprecated: Replaced by `x_ai`.""" + """xAI.""" DEEPSEEK = "deepseek" """DeepSeek.""" GROQ = "groq" diff --git a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/http_attributes.py b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/http_attributes.py index e97f5ce507d..13491c0d63a 100644 --- a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/http_attributes.py +++ b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/http_attributes.py @@ -29,7 +29,7 @@ HTTP_FLAVOR: Final = "http.flavor" """ -Deprecated: Replaced by `network.protocol.name`. +Deprecated: Split into `network.protocol.name` and `network.protocol.version`. """ HTTP_HOST: Final = "http.host" @@ -160,7 +160,7 @@ class HttpConnectionStateValues(Enum): @deprecated( - "The attribute http.flavor is deprecated - Replaced by `network.protocol.name`" + "The attribute http.flavor is deprecated - Split into `network.protocol.name` and `network.protocol.version`" ) class HttpFlavorValues(Enum): HTTP_1_0 = "1.0" @@ -199,5 +199,7 @@ class HttpRequestMethodValues(Enum): """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.http_attributes.HttpRequestMethodValues.PUT`.""" TRACE = "TRACE" """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.http_attributes.HttpRequestMethodValues.TRACE`.""" + QUERY = "QUERY" + """QUERY method.""" OTHER = "_OTHER" """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.http_attributes.HttpRequestMethodValues.OTHER`.""" diff --git a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/k8s_attributes.py b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/k8s_attributes.py index 557d333d697..d2d609c3d06 100644 --- a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/k8s_attributes.py +++ b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/k8s_attributes.py @@ -366,6 +366,16 @@ The name of the Pod. """ +K8S_POD_STATUS_PHASE: Final = "k8s.pod.status.phase" +""" +The phase for the pod. Corresponds to the `phase` field of the: [K8s PodStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#podstatus-v1-core). +""" + +K8S_POD_STATUS_REASON: Final = "k8s.pod.status.reason" +""" +The reason for the pod state. Corresponds to the `reason` field of the: [K8s PodStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#podstatus-v1-core). +""" + K8S_POD_UID: Final = "k8s.pod.uid" """ The UID of the Pod. @@ -421,7 +431,7 @@ K8S_RESOURCEQUOTA_RESOURCE_NAME: Final = "k8s.resourcequota.resource_name" """ The name of the K8s resource a resource quota defines. -Note: The value for this attribute can be either the full `count/[.]` string (e.g., count/deployments.apps, count/pods), or, for certain core Kubernetes resources, just the resource name (e.g., pods, services, configmaps). Both forms are supported by Kubernetes for object count quotas. See [Kubernetes Resource Quotas documentation](https://kubernetes.io/docs/concepts/policy/resource-quotas/#object-count-quota) for more details. +Note: The value for this attribute can be either the full `count/[.]` string (e.g., count/deployments.apps, count/pods), or, for certain core Kubernetes resources, just the resource name (e.g., pods, services, configmaps). Both forms are supported by Kubernetes for object count quotas. See [Kubernetes Resource Quotas documentation](https://kubernetes.io/docs/concepts/policy/resource-quotas/#quota-on-object-count) for more details. """ K8S_RESOURCEQUOTA_UID: Final = "k8s.resourcequota.uid" @@ -536,6 +546,32 @@ class K8sNodeConditionTypeValues(Enum): """The network for the node is not correctly configured.""" +class K8sPodStatusPhaseValues(Enum): + PENDING = "Pending" + """The pod has been accepted by the system, but one or more of the containers has not been started. This includes time before being bound to a node, as well as time spent pulling images onto the host.""" + RUNNING = "Running" + """The pod has been bound to a node and all of the containers have been started. At least one container is still running or is in the process of being restarted.""" + SUCCEEDED = "Succeeded" + """All containers in the pod have voluntarily terminated with a container exit code of 0, and the system is not going to restart any of these containers.""" + FAILED = "Failed" + """All containers in the pod have terminated, and at least one container has terminated in a failure (exited with a non-zero exit code or was stopped by the system).""" + UNKNOWN = "Unknown" + """For some reason the state of the pod could not be obtained, typically due to an error in communicating with the host of the pod.""" + + +class K8sPodStatusReasonValues(Enum): + EVICTED = "Evicted" + """The pod is evicted.""" + NODE_AFFINITY = "NodeAffinity" + """The pod is in a status because of its node affinity.""" + NODE_LOST = "NodeLost" + """The reason on a pod when its state cannot be confirmed as kubelet is unresponsive on the node it is (was) running.""" + SHUTDOWN = "Shutdown" + """The node is shutdown.""" + UNEXPECTED_ADMISSION_ERROR = "UnexpectedAdmissionError" + """The pod was rejected admission to the node because of an error during admission that could not be categorized.""" + + class K8sVolumeTypeValues(Enum): PERSISTENT_VOLUME_CLAIM = "persistentVolumeClaim" """A [persistentVolumeClaim](https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#persistentvolumeclaim) volume.""" diff --git a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/messaging_attributes.py b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/messaging_attributes.py index 3deff31d15a..8791bc8f237 100644 --- a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/messaging_attributes.py +++ b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/messaging_attributes.py @@ -135,7 +135,7 @@ "messaging.kafka.destination.partition" ) """ -Deprecated: Replaced by `messaging.destination.partition.id`. +Deprecated: Record string representation of the partition id in `messaging.destination.partition.id` attribute. """ MESSAGING_KAFKA_MESSAGE_KEY: Final = "messaging.kafka.message.key" diff --git a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/nfs_attributes.py b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/nfs_attributes.py new file mode 100644 index 00000000000..aed898343c5 --- /dev/null +++ b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/nfs_attributes.py @@ -0,0 +1,25 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Final + +NFS_OPERATION_NAME: Final = "nfs.operation.name" +""" +NFSv4+ operation name. +""" + +NFS_SERVER_REPCACHE_STATUS: Final = "nfs.server.repcache.status" +""" +Linux: one of "hit" (NFSD_STATS_RC_HITS), "miss" (NFSD_STATS_RC_MISSES), or "nocache" (NFSD_STATS_RC_NOCACHE -- uncacheable). +""" diff --git a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/onc_rpc_attributes.py b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/onc_rpc_attributes.py new file mode 100644 index 00000000000..d8dd4dbb0c4 --- /dev/null +++ b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/onc_rpc_attributes.py @@ -0,0 +1,35 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Final + +ONC_RPC_PROCEDURE_NAME: Final = "onc_rpc.procedure.name" +""" +ONC/Sun RPC procedure name. +""" + +ONC_RPC_PROCEDURE_NUMBER: Final = "onc_rpc.procedure.number" +""" +ONC/Sun RPC procedure number. +""" + +ONC_RPC_PROGRAM_NAME: Final = "onc_rpc.program.name" +""" +ONC/Sun RPC program name. +""" + +ONC_RPC_VERSION: Final = "onc_rpc.version" +""" +ONC/Sun RPC program version. +""" diff --git a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/openshift_attributes.py b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/openshift_attributes.py new file mode 100644 index 00000000000..4a9afc808b4 --- /dev/null +++ b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/openshift_attributes.py @@ -0,0 +1,25 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Final + +OPENSHIFT_CLUSTERQUOTA_NAME: Final = "openshift.clusterquota.name" +""" +The name of the cluster quota. +""" + +OPENSHIFT_CLUSTERQUOTA_UID: Final = "openshift.clusterquota.uid" +""" +The UID of the cluster quota. +""" diff --git a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/peer_attributes.py b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/peer_attributes.py index eac8e77cb87..73b14232e43 100644 --- a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/peer_attributes.py +++ b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/peer_attributes.py @@ -17,4 +17,8 @@ PEER_SERVICE: Final = "peer.service" """ The [`service.name`](/docs/resource/README.md#service) of the remote service. SHOULD be equal to the actual `service.name` resource attribute of the remote service if any. +Note: Examples of `peer.service` that users may specify: + +- A Redis cache of auth tokens as `peer.service="AuthTokenCache"`. +- A gRPC service `rpc.service="io.opentelemetry.AuthService"` may be hosted in both a gateway, `peer.service="ExternalApiService"` and a backend, `peer.service="AuthService"`. """ diff --git a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/pprof_attributes.py b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/pprof_attributes.py new file mode 100644 index 00000000000..82d6873a3e4 --- /dev/null +++ b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/pprof_attributes.py @@ -0,0 +1,45 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Final + +PPROF_LOCATION_IS_FOLDED: Final = "pprof.location.is_folded" +""" +Provides an indication that multiple symbols map to this location's address, for example due to identical code folding by the linker. In that case the line information represents one of the multiple symbols. This field must be recomputed when the symbolization state of the profile changes. +""" + +PPROF_MAPPING_HAS_FILENAMES: Final = "pprof.mapping.has_filenames" +""" +Indicates that there are filenames related to this mapping. +""" + +PPROF_MAPPING_HAS_FUNCTIONS: Final = "pprof.mapping.has_functions" +""" +Indicates that there are functions related to this mapping. +""" + +PPROF_MAPPING_HAS_INLINE_FRAMES: Final = "pprof.mapping.has_inline_frames" +""" +Indicates that there are inline frames related to this mapping. +""" + +PPROF_MAPPING_HAS_LINE_NUMBERS: Final = "pprof.mapping.has_line_numbers" +""" +Indicates that there are line numbers related to this mapping. +""" + +PPROF_PROFILE_COMMENT: Final = "pprof.profile.comment" +""" +Free-form text associated with the profile. This field should not be used to store any machine-readable information, it is only for human-friendly content. +""" diff --git a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/process_attributes.py b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/process_attributes.py index 4472bba7a0f..8212e8c1d4f 100644 --- a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/process_attributes.py +++ b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/process_attributes.py @@ -38,7 +38,7 @@ The full command used to launch the process as a single string representing the full command. On Windows, can be set to the result of `GetCommandLineW`. Do not set this if you have to assemble it just for monitoring; use `process.command_args` instead. SHOULD NOT be collected by default unless there is sanitization that excludes sensitive data. """ -PROCESS_CONTEXT_SWITCH_TYPE: Final = "process.context_switch_type" +PROCESS_CONTEXT_SWITCH_TYPE: Final = "process.context_switch.type" """ Specifies whether the context switches for this data point were voluntary or involuntary. """ @@ -133,7 +133,7 @@ PROCESS_PAGING_FAULT_TYPE: Final = "process.paging.fault_type" """ -The type of page fault for this data point. Type `major` is for major/hard page faults, and `minor` is for minor/soft page faults. +Deprecated: Replaced by `system.paging.fault.type`. """ PROCESS_PARENT_PID: Final = "process.parent_pid" @@ -186,6 +186,11 @@ The PID of the process's session leader. This is also the session ID (SID) of the process. """ +PROCESS_STATE: Final = "process.state" +""" +The process state, e.g., [Linux Process State Codes](https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES). +""" + PROCESS_TITLE: Final = "process.title" """ Process title (proctitle). @@ -233,8 +238,22 @@ class ProcessCpuStateValues(Enum): """wait.""" +@deprecated( + "The attribute process.paging.fault_type is deprecated - Replaced by `system.paging.fault.type`" +) class ProcessPagingFaultTypeValues(Enum): MAJOR = "major" """major.""" MINOR = "minor" """minor.""" + + +class ProcessStateValues(Enum): + RUNNING = "running" + """running.""" + SLEEPING = "sleeping" + """sleeping.""" + STOPPED = "stopped" + """stopped.""" + DEFUNCT = "defunct" + """defunct.""" diff --git a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/rpc_attributes.py b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/rpc_attributes.py index f7ed8cf0b30..7d8ee8aa09a 100644 --- a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/rpc_attributes.py +++ b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/rpc_attributes.py @@ -112,14 +112,12 @@ RPC_METHOD: Final = "rpc.method" """ -The name of the (logical) method being called, must be equal to the $method part in the span name. -Note: This is the logical name of the method from the RPC interface perspective, which can be different from the name of any implementing method/function. The `code.function.name` attribute may be used to store the latter (e.g., method actually executing the call on the server side, RPC client stub method on the client side). +This is the logical name of the method from the RPC interface perspective. """ RPC_SERVICE: Final = "rpc.service" """ The full (logical) name of the service being called, including its package name, if applicable. -Note: This is the logical name of the service from the RPC interface perspective, which can be different from the name of any implementing class. The `code.namespace` attribute may be used to store the latter (despite the attribute name, it may include a class name; e.g., class with method actually executing the call on the server side, RPC client stub class on the client side). """ RPC_SYSTEM: Final = "rpc.system" @@ -218,3 +216,7 @@ class RpcSystemValues(Enum): """Apache Dubbo.""" CONNECT_RPC = "connect_rpc" """Connect RPC.""" + ONC_RPC = "onc_rpc" + """[ONC RPC (Sun RPC)](https://datatracker.ietf.org/doc/html/rfc5531).""" + JSONRPC = "jsonrpc" + """JSON-RPC.""" diff --git a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/system_attributes.py b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/system_attributes.py index 5d011f4313b..e51e2b50139 100644 --- a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/system_attributes.py +++ b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/system_attributes.py @@ -19,7 +19,7 @@ SYSTEM_CPU_LOGICAL_NUMBER: Final = "system.cpu.logical_number" """ -Deprecated, use `cpu.logical_number` instead. +Deprecated: Replaced by `cpu.logical_number`. """ SYSTEM_CPU_STATE: Final = "system.cpu.state" @@ -67,6 +67,11 @@ The paging access direction. """ +SYSTEM_PAGING_FAULT_TYPE: Final = "system.paging.fault.type" +""" +The paging fault type. +""" + SYSTEM_PAGING_STATE: Final = "system.paging.state" """ The memory paging state. @@ -74,17 +79,17 @@ SYSTEM_PAGING_TYPE: Final = "system.paging.type" """ -The memory paging type. +Deprecated: Replaced by `system.paging.fault.type`. """ SYSTEM_PROCESS_STATUS: Final = "system.process.status" """ -The process state, e.g., [Linux Process State Codes](https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES). +Deprecated: Replaced by `process.state`. """ SYSTEM_PROCESSES_STATUS: Final = "system.processes.status" """ -Deprecated: Replaced by `system.process.status`. +Deprecated: Replaced by `process.state`. """ @@ -182,6 +187,13 @@ class SystemPagingDirectionValues(Enum): """out.""" +class SystemPagingFaultTypeValues(Enum): + MAJOR = "major" + """major.""" + MINOR = "minor" + """minor.""" + + class SystemPagingStateValues(Enum): USED = "used" """used.""" @@ -189,6 +201,9 @@ class SystemPagingStateValues(Enum): """free.""" +@deprecated( + "The attribute system.paging.type is deprecated - Replaced by `system.paging.fault.type`" +) class SystemPagingTypeValues(Enum): MAJOR = "major" """major.""" @@ -196,6 +211,9 @@ class SystemPagingTypeValues(Enum): """minor.""" +@deprecated( + "The attribute system.process.status is deprecated - Replaced by `process.state`" +) class SystemProcessStatusValues(Enum): RUNNING = "running" """running.""" @@ -208,7 +226,7 @@ class SystemProcessStatusValues(Enum): @deprecated( - "The attribute system.processes.status is deprecated - Replaced by `system.process.status`" + "The attribute system.processes.status is deprecated - Replaced by `process.state`" ) class SystemProcessesStatusValues(Enum): RUNNING = "running" diff --git a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/thread_attributes.py b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/thread_attributes.py index a7b4ce82871..342c4725606 100644 --- a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/thread_attributes.py +++ b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/thread_attributes.py @@ -17,9 +17,28 @@ THREAD_ID: Final = "thread.id" """ Current "managed" thread ID (as opposed to OS thread ID). +Note: Examples of where the value can be extracted from: + +| Language or platform | Source | +| --- | --- | +| JVM | `Thread.currentThread().threadId()` | +| .NET | `Thread.CurrentThread.ManagedThreadId` | +| Python | `threading.current_thread().ident` | +| Ruby | `Thread.current.object_id` | +| C++ | `std::this_thread::get_id()` | +| Erlang | `erlang:self()` |. """ THREAD_NAME: Final = "thread.name" """ Current thread name. +Note: Examples of where the value can be extracted from: + +| Language or platform | Source | +| --- | --- | +| JVM | `Thread.currentThread().getName()` | +| .NET | `Thread.CurrentThread.Name` | +| Python | `threading.current_thread().name` | +| Ruby | `Thread.current.name` | +| Erlang | `erlang:process_info(self(), registered_name)` |. """ diff --git a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/vcs_attributes.py b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/vcs_attributes.py index 52edebe2869..8b7426f1cbb 100644 --- a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/vcs_attributes.py +++ b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes/vcs_attributes.py @@ -151,7 +151,7 @@ VCS_REPOSITORY_URL_FULL: Final = "vcs.repository.url.full" """ -The [canonical URL](https://support.google.com/webmasters/answer/10347851?hl=en#:~:text=A%20canonical%20URL%20is%20the,Google%20chooses%20one%20as%20canonical.) of the repository providing the complete HTTP(S) address in order to locate and identify the repository through a browser. +The [canonical URL](https://support.google.com/webmasters/answer/10347851) of the repository providing the complete HTTP(S) address in order to locate and identify the repository through a browser. Note: In Git Version Control Systems, the canonical URL SHOULD NOT include the `.git` extension. """ diff --git a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/container_metrics.py b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/container_metrics.py index f300e087133..496d0c5f666 100644 --- a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/container_metrics.py +++ b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/container_metrics.py @@ -160,6 +160,64 @@ def create_container_filesystem_usage(meter: Meter) -> UpDownCounter: ) +CONTAINER_MEMORY_AVAILABLE: Final = "container.memory.available" +""" +Container memory available +Instrument: updowncounter +Unit: By +Note: Available memory for use. This is defined as the memory limit - workingSetBytes. If memory limit is undefined, the available bytes is omitted. +In general, this metric can be derived from [cadvisor](https://github.com/google/cadvisor/blob/v0.53.0/docs/storage/prometheus.md#prometheus-container-metrics) and by subtracting the `container_memory_working_set_bytes` metric from the `container_spec_memory_limit_bytes` metric. +In K8s, this metric is derived from the [MemoryStats.AvailableBytes](https://pkg.go.dev/k8s.io/kubelet@v0.34.0/pkg/apis/stats/v1alpha1#MemoryStats) field of the [PodStats.Memory](https://pkg.go.dev/k8s.io/kubelet@v0.34.0/pkg/apis/stats/v1alpha1#PodStats) of the Kubelet's stats API. +""" + + +def create_container_memory_available(meter: Meter) -> UpDownCounter: + """Container memory available""" + return meter.create_up_down_counter( + name=CONTAINER_MEMORY_AVAILABLE, + description="Container memory available.", + unit="By", + ) + + +CONTAINER_MEMORY_PAGING_FAULTS: Final = "container.memory.paging.faults" +""" +Container memory paging faults +Instrument: counter +Unit: {fault} +Note: In general, this metric can be derived from [cadvisor](https://github.com/google/cadvisor/blob/v0.53.0/docs/storage/prometheus.md#prometheus-container-metrics) and specifically the `container_memory_failures_total{failure_type=pgfault, scope=container}` and `container_memory_failures_total{failure_type=pgmajfault, scope=container}`metric. +In K8s, this metric is derived from the [MemoryStats.PageFaults](https://pkg.go.dev/k8s.io/kubelet@v0.34.0/pkg/apis/stats/v1alpha1#MemoryStats) and [MemoryStats.MajorPageFaults](https://pkg.go.dev/k8s.io/kubelet@v0.34.0/pkg/apis/stats/v1alpha1#MemoryStats) field of the [PodStats.Memory](https://pkg.go.dev/k8s.io/kubelet@v0.34.0/pkg/apis/stats/v1alpha1#PodStats) of the Kubelet's stats API. +""" + + +def create_container_memory_paging_faults(meter: Meter) -> Counter: + """Container memory paging faults""" + return meter.create_counter( + name=CONTAINER_MEMORY_PAGING_FAULTS, + description="Container memory paging faults.", + unit="{fault}", + ) + + +CONTAINER_MEMORY_RSS: Final = "container.memory.rss" +""" +Container memory RSS +Instrument: updowncounter +Unit: By +Note: In general, this metric can be derived from [cadvisor](https://github.com/google/cadvisor/blob/v0.53.0/docs/storage/prometheus.md#prometheus-container-metrics) and specifically the `container_memory_rss` metric. +In K8s, this metric is derived from the [MemoryStats.RSSBytes](https://pkg.go.dev/k8s.io/kubelet@v0.34.0/pkg/apis/stats/v1alpha1#MemoryStats) field of the [PodStats.Memory](https://pkg.go.dev/k8s.io/kubelet@v0.34.0/pkg/apis/stats/v1alpha1#PodStats) of the Kubelet's stats API. +""" + + +def create_container_memory_rss(meter: Meter) -> UpDownCounter: + """Container memory RSS""" + return meter.create_up_down_counter( + name=CONTAINER_MEMORY_RSS, + description="Container memory RSS.", + unit="By", + ) + + CONTAINER_MEMORY_USAGE: Final = "container.memory.usage" """ Memory usage of the container @@ -178,6 +236,25 @@ def create_container_memory_usage(meter: Meter) -> Counter: ) +CONTAINER_MEMORY_WORKING_SET: Final = "container.memory.working_set" +""" +Container memory working set +Instrument: updowncounter +Unit: By +Note: In general, this metric can be derived from [cadvisor](https://github.com/google/cadvisor/blob/v0.53.0/docs/storage/prometheus.md#prometheus-container-metrics) and specifically the `container_memory_working_set_bytes` metric. +In K8s, this metric is derived from the [MemoryStats.WorkingSetBytes](https://pkg.go.dev/k8s.io/kubelet@v0.34.0/pkg/apis/stats/v1alpha1#MemoryStats) field of the [PodStats.Memory](https://pkg.go.dev/k8s.io/kubelet@v0.34.0/pkg/apis/stats/v1alpha1#PodStats) of the Kubelet's stats API. +""" + + +def create_container_memory_working_set(meter: Meter) -> UpDownCounter: + """Container memory working set""" + return meter.create_up_down_counter( + name=CONTAINER_MEMORY_WORKING_SET, + description="Container memory working set.", + unit="By", + ) + + CONTAINER_NETWORK_IO: Final = "container.network.io" """ Network bytes for the container diff --git a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/k8s_metrics.py b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/k8s_metrics.py index aa14e94dd98..0127376e2af 100644 --- a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/k8s_metrics.py +++ b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/k8s_metrics.py @@ -56,6 +56,29 @@ def create_k8s_container_cpu_limit(meter: Meter) -> UpDownCounter: ) +K8S_CONTAINER_CPU_LIMIT_UTILIZATION: Final = ( + "k8s.container.cpu.limit_utilization" +) +""" +The ratio of container CPU usage to its CPU limit +Instrument: gauge +Unit: 1 +Note: The value range is [0.0,1.0]. A value of 1.0 means the container is using 100% of its CPU limit. If the CPU limit is not set, this metric SHOULD NOT be emitted for that container. +""" + + +def create_k8s_container_cpu_limit_utilization( + meter: Meter, callbacks: Optional[Sequence[CallbackT]] +) -> ObservableGauge: + """The ratio of container CPU usage to its CPU limit""" + return meter.create_observable_gauge( + name=K8S_CONTAINER_CPU_LIMIT_UTILIZATION, + callbacks=callbacks, + description="The ratio of container CPU usage to its CPU limit.", + unit="1", + ) + + K8S_CONTAINER_CPU_REQUEST: Final = "k8s.container.cpu.request" """ CPU resource requested for the container @@ -74,6 +97,28 @@ def create_k8s_container_cpu_request(meter: Meter) -> UpDownCounter: ) +K8S_CONTAINER_CPU_REQUEST_UTILIZATION: Final = ( + "k8s.container.cpu.request_utilization" +) +""" +The ratio of container CPU usage to its CPU request +Instrument: gauge +Unit: 1 +""" + + +def create_k8s_container_cpu_request_utilization( + meter: Meter, callbacks: Optional[Sequence[CallbackT]] +) -> ObservableGauge: + """The ratio of container CPU usage to its CPU request""" + return meter.create_observable_gauge( + name=K8S_CONTAINER_CPU_REQUEST_UTILIZATION, + callbacks=callbacks, + description="The ratio of container CPU usage to its CPU request.", + unit="1", + ) + + K8S_CONTAINER_EPHEMERAL_STORAGE_LIMIT: Final = ( "k8s.container.ephemeral_storage.limit" ) @@ -271,6 +316,21 @@ def create_k8s_container_storage_request(meter: Meter) -> UpDownCounter: K8S_CRONJOB_ACTIVE_JOBS: Final = "k8s.cronjob.active_jobs" """ +Deprecated: Replaced by `k8s.cronjob.job.active`. +""" + + +def create_k8s_cronjob_active_jobs(meter: Meter) -> UpDownCounter: + """Deprecated, use `k8s.cronjob.job.active` instead""" + return meter.create_up_down_counter( + name=K8S_CRONJOB_ACTIVE_JOBS, + description="Deprecated, use `k8s.cronjob.job.active` instead.", + unit="{job}", + ) + + +K8S_CRONJOB_JOB_ACTIVE: Final = "k8s.cronjob.job.active" +""" The number of actively running jobs for a cronjob Instrument: updowncounter Unit: {job} @@ -279,10 +339,10 @@ def create_k8s_container_storage_request(meter: Meter) -> UpDownCounter: """ -def create_k8s_cronjob_active_jobs(meter: Meter) -> UpDownCounter: +def create_k8s_cronjob_job_active(meter: Meter) -> UpDownCounter: """The number of actively running jobs for a cronjob""" return meter.create_up_down_counter( - name=K8S_CRONJOB_ACTIVE_JOBS, + name=K8S_CRONJOB_JOB_ACTIVE, description="The number of actively running jobs for a cronjob.", unit="{job}", ) @@ -292,6 +352,59 @@ def create_k8s_cronjob_active_jobs(meter: Meter) -> UpDownCounter: "k8s.daemonset.current_scheduled_nodes" ) """ +Deprecated: Replaced by `k8s.daemonset.node.current_scheduled`. +""" + + +def create_k8s_daemonset_current_scheduled_nodes( + meter: Meter, +) -> UpDownCounter: + """Deprecated, use `k8s.daemonset.node.current_scheduled` instead""" + return meter.create_up_down_counter( + name=K8S_DAEMONSET_CURRENT_SCHEDULED_NODES, + description="Deprecated, use `k8s.daemonset.node.current_scheduled` instead.", + unit="{node}", + ) + + +K8S_DAEMONSET_DESIRED_SCHEDULED_NODES: Final = ( + "k8s.daemonset.desired_scheduled_nodes" +) +""" +Deprecated: Replaced by `k8s.daemonset.node.desired_scheduled`. +""" + + +def create_k8s_daemonset_desired_scheduled_nodes( + meter: Meter, +) -> UpDownCounter: + """Deprecated, use `k8s.daemonset.node.desired_scheduled` instead""" + return meter.create_up_down_counter( + name=K8S_DAEMONSET_DESIRED_SCHEDULED_NODES, + description="Deprecated, use `k8s.daemonset.node.desired_scheduled` instead.", + unit="{node}", + ) + + +K8S_DAEMONSET_MISSCHEDULED_NODES: Final = "k8s.daemonset.misscheduled_nodes" +""" +Deprecated: Replaced by `k8s.daemonset.node.misscheduled`. +""" + + +def create_k8s_daemonset_misscheduled_nodes(meter: Meter) -> UpDownCounter: + """Deprecated, use `k8s.daemonset.node.misscheduled` instead""" + return meter.create_up_down_counter( + name=K8S_DAEMONSET_MISSCHEDULED_NODES, + description="Deprecated, use `k8s.daemonset.node.misscheduled` instead.", + unit="{node}", + ) + + +K8S_DAEMONSET_NODE_CURRENT_SCHEDULED: Final = ( + "k8s.daemonset.node.current_scheduled" +) +""" Number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod Instrument: updowncounter Unit: {node} @@ -300,19 +413,17 @@ def create_k8s_cronjob_active_jobs(meter: Meter) -> UpDownCounter: """ -def create_k8s_daemonset_current_scheduled_nodes( - meter: Meter, -) -> UpDownCounter: +def create_k8s_daemonset_node_current_scheduled(meter: Meter) -> UpDownCounter: """Number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod""" return meter.create_up_down_counter( - name=K8S_DAEMONSET_CURRENT_SCHEDULED_NODES, + name=K8S_DAEMONSET_NODE_CURRENT_SCHEDULED, description="Number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod.", unit="{node}", ) -K8S_DAEMONSET_DESIRED_SCHEDULED_NODES: Final = ( - "k8s.daemonset.desired_scheduled_nodes" +K8S_DAEMONSET_NODE_DESIRED_SCHEDULED: Final = ( + "k8s.daemonset.node.desired_scheduled" ) """ Number of nodes that should be running the daemon pod (including nodes currently running the daemon pod) @@ -323,18 +434,16 @@ def create_k8s_daemonset_current_scheduled_nodes( """ -def create_k8s_daemonset_desired_scheduled_nodes( - meter: Meter, -) -> UpDownCounter: +def create_k8s_daemonset_node_desired_scheduled(meter: Meter) -> UpDownCounter: """Number of nodes that should be running the daemon pod (including nodes currently running the daemon pod)""" return meter.create_up_down_counter( - name=K8S_DAEMONSET_DESIRED_SCHEDULED_NODES, + name=K8S_DAEMONSET_NODE_DESIRED_SCHEDULED, description="Number of nodes that should be running the daemon pod (including nodes currently running the daemon pod).", unit="{node}", ) -K8S_DAEMONSET_MISSCHEDULED_NODES: Final = "k8s.daemonset.misscheduled_nodes" +K8S_DAEMONSET_NODE_MISSCHEDULED: Final = "k8s.daemonset.node.misscheduled" """ Number of nodes that are running the daemon pod, but are not supposed to run the daemon pod Instrument: updowncounter @@ -344,16 +453,16 @@ def create_k8s_daemonset_desired_scheduled_nodes( """ -def create_k8s_daemonset_misscheduled_nodes(meter: Meter) -> UpDownCounter: +def create_k8s_daemonset_node_misscheduled(meter: Meter) -> UpDownCounter: """Number of nodes that are running the daemon pod, but are not supposed to run the daemon pod""" return meter.create_up_down_counter( - name=K8S_DAEMONSET_MISSCHEDULED_NODES, + name=K8S_DAEMONSET_NODE_MISSCHEDULED, description="Number of nodes that are running the daemon pod, but are not supposed to run the daemon pod.", unit="{node}", ) -K8S_DAEMONSET_READY_NODES: Final = "k8s.daemonset.ready_nodes" +K8S_DAEMONSET_NODE_READY: Final = "k8s.daemonset.node.ready" """ Number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready Instrument: updowncounter @@ -363,17 +472,62 @@ def create_k8s_daemonset_misscheduled_nodes(meter: Meter) -> UpDownCounter: """ -def create_k8s_daemonset_ready_nodes(meter: Meter) -> UpDownCounter: +def create_k8s_daemonset_node_ready(meter: Meter) -> UpDownCounter: """Number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready""" return meter.create_up_down_counter( - name=K8S_DAEMONSET_READY_NODES, + name=K8S_DAEMONSET_NODE_READY, description="Number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready.", unit="{node}", ) +K8S_DAEMONSET_READY_NODES: Final = "k8s.daemonset.ready_nodes" +""" +Deprecated: Replaced by `k8s.daemonset.node.ready`. +""" + + +def create_k8s_daemonset_ready_nodes(meter: Meter) -> UpDownCounter: + """Deprecated, use `k8s.daemonset.node.ready` instead""" + return meter.create_up_down_counter( + name=K8S_DAEMONSET_READY_NODES, + description="Deprecated, use `k8s.daemonset.node.ready` instead.", + unit="{node}", + ) + + K8S_DEPLOYMENT_AVAILABLE_PODS: Final = "k8s.deployment.available_pods" """ +Deprecated: Replaced by `k8s.deployment.pod.available`. +""" + + +def create_k8s_deployment_available_pods(meter: Meter) -> UpDownCounter: + """Deprecated, use `k8s.deployment.pod.available` instead""" + return meter.create_up_down_counter( + name=K8S_DEPLOYMENT_AVAILABLE_PODS, + description="Deprecated, use `k8s.deployment.pod.available` instead.", + unit="{pod}", + ) + + +K8S_DEPLOYMENT_DESIRED_PODS: Final = "k8s.deployment.desired_pods" +""" +Deprecated: Replaced by `k8s.deployment.pod.desired`. +""" + + +def create_k8s_deployment_desired_pods(meter: Meter) -> UpDownCounter: + """Deprecated, use `k8s.deployment.pod.desired` instead""" + return meter.create_up_down_counter( + name=K8S_DEPLOYMENT_DESIRED_PODS, + description="Deprecated, use `k8s.deployment.pod.desired` instead.", + unit="{pod}", + ) + + +K8S_DEPLOYMENT_POD_AVAILABLE: Final = "k8s.deployment.pod.available" +""" Total number of available replica pods (ready for at least minReadySeconds) targeted by this deployment Instrument: updowncounter Unit: {pod} @@ -382,16 +536,16 @@ def create_k8s_daemonset_ready_nodes(meter: Meter) -> UpDownCounter: """ -def create_k8s_deployment_available_pods(meter: Meter) -> UpDownCounter: +def create_k8s_deployment_pod_available(meter: Meter) -> UpDownCounter: """Total number of available replica pods (ready for at least minReadySeconds) targeted by this deployment""" return meter.create_up_down_counter( - name=K8S_DEPLOYMENT_AVAILABLE_PODS, + name=K8S_DEPLOYMENT_POD_AVAILABLE, description="Total number of available replica pods (ready for at least minReadySeconds) targeted by this deployment.", unit="{pod}", ) -K8S_DEPLOYMENT_DESIRED_PODS: Final = "k8s.deployment.desired_pods" +K8S_DEPLOYMENT_POD_DESIRED: Final = "k8s.deployment.pod.desired" """ Number of desired replica pods in this deployment Instrument: updowncounter @@ -401,10 +555,10 @@ def create_k8s_deployment_available_pods(meter: Meter) -> UpDownCounter: """ -def create_k8s_deployment_desired_pods(meter: Meter) -> UpDownCounter: +def create_k8s_deployment_pod_desired(meter: Meter) -> UpDownCounter: """Number of desired replica pods in this deployment""" return meter.create_up_down_counter( - name=K8S_DEPLOYMENT_DESIRED_PODS, + name=K8S_DEPLOYMENT_POD_DESIRED, description="Number of desired replica pods in this deployment.", unit="{pod}", ) @@ -412,57 +566,45 @@ def create_k8s_deployment_desired_pods(meter: Meter) -> UpDownCounter: K8S_HPA_CURRENT_PODS: Final = "k8s.hpa.current_pods" """ -Current number of replica pods managed by this horizontal pod autoscaler, as last seen by the autoscaler -Instrument: updowncounter -Unit: {pod} -Note: This metric aligns with the `currentReplicas` field of the -[K8s HorizontalPodAutoscalerStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#horizontalpodautoscalerstatus-v2-autoscaling). +Deprecated: Replaced by `k8s.hpa.pod.current`. """ def create_k8s_hpa_current_pods(meter: Meter) -> UpDownCounter: - """Current number of replica pods managed by this horizontal pod autoscaler, as last seen by the autoscaler""" + """Deprecated, use `k8s.hpa.pod.current` instead""" return meter.create_up_down_counter( name=K8S_HPA_CURRENT_PODS, - description="Current number of replica pods managed by this horizontal pod autoscaler, as last seen by the autoscaler.", + description="Deprecated, use `k8s.hpa.pod.current` instead.", unit="{pod}", ) K8S_HPA_DESIRED_PODS: Final = "k8s.hpa.desired_pods" """ -Desired number of replica pods managed by this horizontal pod autoscaler, as last calculated by the autoscaler -Instrument: updowncounter -Unit: {pod} -Note: This metric aligns with the `desiredReplicas` field of the -[K8s HorizontalPodAutoscalerStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#horizontalpodautoscalerstatus-v2-autoscaling). +Deprecated: Replaced by `k8s.hpa.pod.desired`. """ def create_k8s_hpa_desired_pods(meter: Meter) -> UpDownCounter: - """Desired number of replica pods managed by this horizontal pod autoscaler, as last calculated by the autoscaler""" + """Deprecated, use `k8s.hpa.pod.desired` instead""" return meter.create_up_down_counter( name=K8S_HPA_DESIRED_PODS, - description="Desired number of replica pods managed by this horizontal pod autoscaler, as last calculated by the autoscaler.", + description="Deprecated, use `k8s.hpa.pod.desired` instead.", unit="{pod}", ) K8S_HPA_MAX_PODS: Final = "k8s.hpa.max_pods" """ -The upper limit for the number of replica pods to which the autoscaler can scale up -Instrument: updowncounter -Unit: {pod} -Note: This metric aligns with the `maxReplicas` field of the -[K8s HorizontalPodAutoscalerSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#horizontalpodautoscalerspec-v2-autoscaling). +Deprecated: Replaced by `k8s.hpa.pod.max`. """ def create_k8s_hpa_max_pods(meter: Meter) -> UpDownCounter: - """The upper limit for the number of replica pods to which the autoscaler can scale up""" + """Deprecated, use `k8s.hpa.pod.max` instead""" return meter.create_up_down_counter( name=K8S_HPA_MAX_PODS, - description="The upper limit for the number of replica pods to which the autoscaler can scale up.", + description="Deprecated, use `k8s.hpa.pod.max` instead.", unit="{pod}", ) @@ -545,6 +687,78 @@ def create_k8s_hpa_metric_target_cpu_value( K8S_HPA_MIN_PODS: Final = "k8s.hpa.min_pods" """ +Deprecated: Replaced by `k8s.hpa.pod.min`. +""" + + +def create_k8s_hpa_min_pods(meter: Meter) -> UpDownCounter: + """Deprecated, use `k8s.hpa.pod.min` instead""" + return meter.create_up_down_counter( + name=K8S_HPA_MIN_PODS, + description="Deprecated, use `k8s.hpa.pod.min` instead.", + unit="{pod}", + ) + + +K8S_HPA_POD_CURRENT: Final = "k8s.hpa.pod.current" +""" +Current number of replica pods managed by this horizontal pod autoscaler, as last seen by the autoscaler +Instrument: updowncounter +Unit: {pod} +Note: This metric aligns with the `currentReplicas` field of the +[K8s HorizontalPodAutoscalerStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#horizontalpodautoscalerstatus-v2-autoscaling). +""" + + +def create_k8s_hpa_pod_current(meter: Meter) -> UpDownCounter: + """Current number of replica pods managed by this horizontal pod autoscaler, as last seen by the autoscaler""" + return meter.create_up_down_counter( + name=K8S_HPA_POD_CURRENT, + description="Current number of replica pods managed by this horizontal pod autoscaler, as last seen by the autoscaler.", + unit="{pod}", + ) + + +K8S_HPA_POD_DESIRED: Final = "k8s.hpa.pod.desired" +""" +Desired number of replica pods managed by this horizontal pod autoscaler, as last calculated by the autoscaler +Instrument: updowncounter +Unit: {pod} +Note: This metric aligns with the `desiredReplicas` field of the +[K8s HorizontalPodAutoscalerStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#horizontalpodautoscalerstatus-v2-autoscaling). +""" + + +def create_k8s_hpa_pod_desired(meter: Meter) -> UpDownCounter: + """Desired number of replica pods managed by this horizontal pod autoscaler, as last calculated by the autoscaler""" + return meter.create_up_down_counter( + name=K8S_HPA_POD_DESIRED, + description="Desired number of replica pods managed by this horizontal pod autoscaler, as last calculated by the autoscaler.", + unit="{pod}", + ) + + +K8S_HPA_POD_MAX: Final = "k8s.hpa.pod.max" +""" +The upper limit for the number of replica pods to which the autoscaler can scale up +Instrument: updowncounter +Unit: {pod} +Note: This metric aligns with the `maxReplicas` field of the +[K8s HorizontalPodAutoscalerSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#horizontalpodautoscalerspec-v2-autoscaling). +""" + + +def create_k8s_hpa_pod_max(meter: Meter) -> UpDownCounter: + """The upper limit for the number of replica pods to which the autoscaler can scale up""" + return meter.create_up_down_counter( + name=K8S_HPA_POD_MAX, + description="The upper limit for the number of replica pods to which the autoscaler can scale up.", + unit="{pod}", + ) + + +K8S_HPA_POD_MIN: Final = "k8s.hpa.pod.min" +""" The lower limit for the number of replica pods to which the autoscaler can scale down Instrument: updowncounter Unit: {pod} @@ -553,10 +767,10 @@ def create_k8s_hpa_metric_target_cpu_value( """ -def create_k8s_hpa_min_pods(meter: Meter) -> UpDownCounter: +def create_k8s_hpa_pod_min(meter: Meter) -> UpDownCounter: """The lower limit for the number of replica pods to which the autoscaler can scale down""" return meter.create_up_down_counter( - name=K8S_HPA_MIN_PODS, + name=K8S_HPA_POD_MIN, description="The lower limit for the number of replica pods to which the autoscaler can scale down.", unit="{pod}", ) @@ -564,6 +778,66 @@ def create_k8s_hpa_min_pods(meter: Meter) -> UpDownCounter: K8S_JOB_ACTIVE_PODS: Final = "k8s.job.active_pods" """ +Deprecated: Replaced by `k8s.job.pod.active`. +""" + + +def create_k8s_job_active_pods(meter: Meter) -> UpDownCounter: + """Deprecated, use `k8s.job.pod.active` instead""" + return meter.create_up_down_counter( + name=K8S_JOB_ACTIVE_PODS, + description="Deprecated, use `k8s.job.pod.active` instead.", + unit="{pod}", + ) + + +K8S_JOB_DESIRED_SUCCESSFUL_PODS: Final = "k8s.job.desired_successful_pods" +""" +Deprecated: Replaced by `k8s.job.pod.desired_successful`. +""" + + +def create_k8s_job_desired_successful_pods(meter: Meter) -> UpDownCounter: + """Deprecated, use `k8s.job.pod.desired_successful` instead""" + return meter.create_up_down_counter( + name=K8S_JOB_DESIRED_SUCCESSFUL_PODS, + description="Deprecated, use `k8s.job.pod.desired_successful` instead.", + unit="{pod}", + ) + + +K8S_JOB_FAILED_PODS: Final = "k8s.job.failed_pods" +""" +Deprecated: Replaced by `k8s.job.pod.failed`. +""" + + +def create_k8s_job_failed_pods(meter: Meter) -> UpDownCounter: + """Deprecated, use `k8s.job.pod.failed` instead""" + return meter.create_up_down_counter( + name=K8S_JOB_FAILED_PODS, + description="Deprecated, use `k8s.job.pod.failed` instead.", + unit="{pod}", + ) + + +K8S_JOB_MAX_PARALLEL_PODS: Final = "k8s.job.max_parallel_pods" +""" +Deprecated: Replaced by `k8s.job.pod.max_parallel`. +""" + + +def create_k8s_job_max_parallel_pods(meter: Meter) -> UpDownCounter: + """Deprecated, use `k8s.job.pod.max_parallel` instead""" + return meter.create_up_down_counter( + name=K8S_JOB_MAX_PARALLEL_PODS, + description="Deprecated, use `k8s.job.pod.max_parallel` instead.", + unit="{pod}", + ) + + +K8S_JOB_POD_ACTIVE: Final = "k8s.job.pod.active" +""" The number of pending and actively running pods for a job Instrument: updowncounter Unit: {pod} @@ -572,16 +846,16 @@ def create_k8s_hpa_min_pods(meter: Meter) -> UpDownCounter: """ -def create_k8s_job_active_pods(meter: Meter) -> UpDownCounter: +def create_k8s_job_pod_active(meter: Meter) -> UpDownCounter: """The number of pending and actively running pods for a job""" return meter.create_up_down_counter( - name=K8S_JOB_ACTIVE_PODS, + name=K8S_JOB_POD_ACTIVE, description="The number of pending and actively running pods for a job.", unit="{pod}", ) -K8S_JOB_DESIRED_SUCCESSFUL_PODS: Final = "k8s.job.desired_successful_pods" +K8S_JOB_POD_DESIRED_SUCCESSFUL: Final = "k8s.job.pod.desired_successful" """ The desired number of successfully finished pods the job should be run with Instrument: updowncounter @@ -591,16 +865,16 @@ def create_k8s_job_active_pods(meter: Meter) -> UpDownCounter: """ -def create_k8s_job_desired_successful_pods(meter: Meter) -> UpDownCounter: +def create_k8s_job_pod_desired_successful(meter: Meter) -> UpDownCounter: """The desired number of successfully finished pods the job should be run with""" return meter.create_up_down_counter( - name=K8S_JOB_DESIRED_SUCCESSFUL_PODS, + name=K8S_JOB_POD_DESIRED_SUCCESSFUL, description="The desired number of successfully finished pods the job should be run with.", unit="{pod}", ) -K8S_JOB_FAILED_PODS: Final = "k8s.job.failed_pods" +K8S_JOB_POD_FAILED: Final = "k8s.job.pod.failed" """ The number of pods which reached phase Failed for a job Instrument: updowncounter @@ -610,16 +884,16 @@ def create_k8s_job_desired_successful_pods(meter: Meter) -> UpDownCounter: """ -def create_k8s_job_failed_pods(meter: Meter) -> UpDownCounter: +def create_k8s_job_pod_failed(meter: Meter) -> UpDownCounter: """The number of pods which reached phase Failed for a job""" return meter.create_up_down_counter( - name=K8S_JOB_FAILED_PODS, + name=K8S_JOB_POD_FAILED, description="The number of pods which reached phase Failed for a job.", unit="{pod}", ) -K8S_JOB_MAX_PARALLEL_PODS: Final = "k8s.job.max_parallel_pods" +K8S_JOB_POD_MAX_PARALLEL: Final = "k8s.job.pod.max_parallel" """ The max desired number of pods the job should run at any given time Instrument: updowncounter @@ -629,16 +903,16 @@ def create_k8s_job_failed_pods(meter: Meter) -> UpDownCounter: """ -def create_k8s_job_max_parallel_pods(meter: Meter) -> UpDownCounter: +def create_k8s_job_pod_max_parallel(meter: Meter) -> UpDownCounter: """The max desired number of pods the job should run at any given time""" return meter.create_up_down_counter( - name=K8S_JOB_MAX_PARALLEL_PODS, + name=K8S_JOB_POD_MAX_PARALLEL, description="The max desired number of pods the job should run at any given time.", unit="{pod}", ) -K8S_JOB_SUCCESSFUL_PODS: Final = "k8s.job.successful_pods" +K8S_JOB_POD_SUCCESSFUL: Final = "k8s.job.pod.successful" """ The number of pods which reached phase Succeeded for a job Instrument: updowncounter @@ -648,15 +922,30 @@ def create_k8s_job_max_parallel_pods(meter: Meter) -> UpDownCounter: """ -def create_k8s_job_successful_pods(meter: Meter) -> UpDownCounter: +def create_k8s_job_pod_successful(meter: Meter) -> UpDownCounter: """The number of pods which reached phase Succeeded for a job""" return meter.create_up_down_counter( - name=K8S_JOB_SUCCESSFUL_PODS, + name=K8S_JOB_POD_SUCCESSFUL, description="The number of pods which reached phase Succeeded for a job.", unit="{pod}", ) +K8S_JOB_SUCCESSFUL_PODS: Final = "k8s.job.successful_pods" +""" +Deprecated: Replaced by `k8s.job.pod.successful`. +""" + + +def create_k8s_job_successful_pods(meter: Meter) -> UpDownCounter: + """Deprecated, use `k8s.job.pod.successful` instead""" + return meter.create_up_down_counter( + name=K8S_JOB_SUCCESSFUL_PODS, + description="Deprecated, use `k8s.job.pod.successful` instead.", + unit="{pod}", + ) + + K8S_NAMESPACE_PHASE: Final = "k8s.namespace.phase" """ Describes number of K8s namespaces that are currently in a given phase @@ -676,17 +965,15 @@ def create_k8s_namespace_phase(meter: Meter) -> UpDownCounter: K8S_NODE_ALLOCATABLE_CPU: Final = "k8s.node.allocatable.cpu" """ -Amount of cpu allocatable on the node -Instrument: updowncounter -Unit: {cpu} +Deprecated: Replaced by `k8s.node.cpu.allocatable`. """ def create_k8s_node_allocatable_cpu(meter: Meter) -> UpDownCounter: - """Amount of cpu allocatable on the node""" + """Deprecated, use `k8s.node.cpu.allocatable` instead""" return meter.create_up_down_counter( name=K8S_NODE_ALLOCATABLE_CPU, - description="Amount of cpu allocatable on the node.", + description="Deprecated, use `k8s.node.cpu.allocatable` instead.", unit="{cpu}", ) @@ -695,53 +982,47 @@ def create_k8s_node_allocatable_cpu(meter: Meter) -> UpDownCounter: "k8s.node.allocatable.ephemeral_storage" ) """ -Amount of ephemeral-storage allocatable on the node -Instrument: updowncounter -Unit: By +Deprecated: Replaced by `k8s.node.ephemeral_storage.allocatable`. """ def create_k8s_node_allocatable_ephemeral_storage( meter: Meter, ) -> UpDownCounter: - """Amount of ephemeral-storage allocatable on the node""" + """Deprecated, use `k8s.node.ephemeral_storage.allocatable` instead""" return meter.create_up_down_counter( name=K8S_NODE_ALLOCATABLE_EPHEMERAL_STORAGE, - description="Amount of ephemeral-storage allocatable on the node.", + description="Deprecated, use `k8s.node.ephemeral_storage.allocatable` instead.", unit="By", ) K8S_NODE_ALLOCATABLE_MEMORY: Final = "k8s.node.allocatable.memory" """ -Amount of memory allocatable on the node -Instrument: updowncounter -Unit: By +Deprecated: Replaced by `k8s.node.memory.allocatable`. """ def create_k8s_node_allocatable_memory(meter: Meter) -> UpDownCounter: - """Amount of memory allocatable on the node""" + """Deprecated, use `k8s.node.memory.allocatable` instead""" return meter.create_up_down_counter( name=K8S_NODE_ALLOCATABLE_MEMORY, - description="Amount of memory allocatable on the node.", + description="Deprecated, use `k8s.node.memory.allocatable` instead.", unit="By", ) K8S_NODE_ALLOCATABLE_PODS: Final = "k8s.node.allocatable.pods" """ -Amount of pods allocatable on the node -Instrument: updowncounter -Unit: {pod} +Deprecated: Replaced by `k8s.node.pod.allocatable`. """ def create_k8s_node_allocatable_pods(meter: Meter) -> UpDownCounter: - """Amount of pods allocatable on the node""" + """Deprecated, use `k8s.node.pod.allocatable` instead""" return meter.create_up_down_counter( name=K8S_NODE_ALLOCATABLE_PODS, - description="Amount of pods allocatable on the node.", + description="Deprecated, use `k8s.node.pod.allocatable` instead.", unit="{pod}", ) @@ -764,6 +1045,23 @@ def create_k8s_node_condition_status(meter: Meter) -> UpDownCounter: ) +K8S_NODE_CPU_ALLOCATABLE: Final = "k8s.node.cpu.allocatable" +""" +Amount of cpu allocatable on the node +Instrument: updowncounter +Unit: {cpu} +""" + + +def create_k8s_node_cpu_allocatable(meter: Meter) -> UpDownCounter: + """Amount of cpu allocatable on the node""" + return meter.create_up_down_counter( + name=K8S_NODE_CPU_ALLOCATABLE, + description="Amount of cpu allocatable on the node.", + unit="{cpu}", + ) + + K8S_NODE_CPU_TIME: Final = "k8s.node.cpu.time" """ Total CPU time consumed @@ -803,6 +1101,27 @@ def create_k8s_node_cpu_usage( ) +K8S_NODE_EPHEMERAL_STORAGE_ALLOCATABLE: Final = ( + "k8s.node.ephemeral_storage.allocatable" +) +""" +Amount of ephemeral-storage allocatable on the node +Instrument: updowncounter +Unit: By +""" + + +def create_k8s_node_ephemeral_storage_allocatable( + meter: Meter, +) -> UpDownCounter: + """Amount of ephemeral-storage allocatable on the node""" + return meter.create_up_down_counter( + name=K8S_NODE_EPHEMERAL_STORAGE_ALLOCATABLE, + description="Amount of ephemeral-storage allocatable on the node.", + unit="By", + ) + + K8S_NODE_FILESYSTEM_AVAILABLE: Final = "k8s.node.filesystem.available" """ Node filesystem available bytes @@ -868,6 +1187,80 @@ def create_k8s_node_filesystem_usage(meter: Meter) -> UpDownCounter: ) +K8S_NODE_MEMORY_ALLOCATABLE: Final = "k8s.node.memory.allocatable" +""" +Amount of memory allocatable on the node +Instrument: updowncounter +Unit: By +""" + + +def create_k8s_node_memory_allocatable(meter: Meter) -> UpDownCounter: + """Amount of memory allocatable on the node""" + return meter.create_up_down_counter( + name=K8S_NODE_MEMORY_ALLOCATABLE, + description="Amount of memory allocatable on the node.", + unit="By", + ) + + +K8S_NODE_MEMORY_AVAILABLE: Final = "k8s.node.memory.available" +""" +Node memory available +Instrument: updowncounter +Unit: By +Note: Available memory for use. This is defined as the memory limit - workingSetBytes. If memory limit is undefined, the available bytes is omitted. +This metric is derived from the [MemoryStats.AvailableBytes](https://pkg.go.dev/k8s.io/kubelet@v0.34.0/pkg/apis/stats/v1alpha1#MemoryStats) field of the [NodeStats.Memory](https://pkg.go.dev/k8s.io/kubelet@v0.34.0/pkg/apis/stats/v1alpha1#NodeStats) of the Kubelet's stats API. +""" + + +def create_k8s_node_memory_available(meter: Meter) -> UpDownCounter: + """Node memory available""" + return meter.create_up_down_counter( + name=K8S_NODE_MEMORY_AVAILABLE, + description="Node memory available.", + unit="By", + ) + + +K8S_NODE_MEMORY_PAGING_FAULTS: Final = "k8s.node.memory.paging.faults" +""" +Node memory paging faults +Instrument: counter +Unit: {fault} +Note: Cumulative number of major/minor page faults. +This metric is derived from the [MemoryStats.PageFaults](https://pkg.go.dev/k8s.io/kubelet@v0.34.0/pkg/apis/stats/v1alpha1#MemoryStats) and [MemoryStats.MajorPageFaults](https://pkg.go.dev/k8s.io/kubelet@v0.34.0/pkg/apis/stats/v1alpha1#MemoryStats) fields of the [NodeStats.Memory](https://pkg.go.dev/k8s.io/kubelet@v0.34.0/pkg/apis/stats/v1alpha1#NodeStats) of the Kubelet's stats API. +""" + + +def create_k8s_node_memory_paging_faults(meter: Meter) -> Counter: + """Node memory paging faults""" + return meter.create_counter( + name=K8S_NODE_MEMORY_PAGING_FAULTS, + description="Node memory paging faults.", + unit="{fault}", + ) + + +K8S_NODE_MEMORY_RSS: Final = "k8s.node.memory.rss" +""" +Node memory RSS +Instrument: updowncounter +Unit: By +Note: The amount of anonymous and swap cache memory (includes transparent hugepages). +This metric is derived from the [MemoryStats.RSSBytes](https://pkg.go.dev/k8s.io/kubelet@v0.34.0/pkg/apis/stats/v1alpha1#MemoryStats) field of the [NodeStats.Memory](https://pkg.go.dev/k8s.io/kubelet@v0.34.0/pkg/apis/stats/v1alpha1#NodeStats) of the Kubelet's stats API. +""" + + +def create_k8s_node_memory_rss(meter: Meter) -> UpDownCounter: + """Node memory RSS""" + return meter.create_up_down_counter( + name=K8S_NODE_MEMORY_RSS, + description="Node memory RSS.", + unit="By", + ) + + K8S_NODE_MEMORY_USAGE: Final = "k8s.node.memory.usage" """ Memory usage of the Node @@ -889,6 +1282,25 @@ def create_k8s_node_memory_usage( ) +K8S_NODE_MEMORY_WORKING_SET: Final = "k8s.node.memory.working_set" +""" +Node memory working set +Instrument: updowncounter +Unit: By +Note: The amount of working set memory. This includes recently accessed memory, dirty memory, and kernel memory. WorkingSetBytes is <= UsageBytes. +This metric is derived from the [MemoryStats.WorkingSetBytes](https://pkg.go.dev/k8s.io/kubelet@v0.34.0/pkg/apis/stats/v1alpha1#MemoryStats) field of the [NodeStats.Memory](https://pkg.go.dev/k8s.io/kubelet@v0.34.0/pkg/apis/stats/v1alpha1#NodeStats) of the Kubelet's stats API. +""" + + +def create_k8s_node_memory_working_set(meter: Meter) -> UpDownCounter: + """Node memory working set""" + return meter.create_up_down_counter( + name=K8S_NODE_MEMORY_WORKING_SET, + description="Node memory working set.", + unit="By", + ) + + K8S_NODE_NETWORK_ERRORS: Final = "k8s.node.network.errors" """ Node network errors @@ -923,6 +1335,23 @@ def create_k8s_node_network_io(meter: Meter) -> Counter: ) +K8S_NODE_POD_ALLOCATABLE: Final = "k8s.node.pod.allocatable" +""" +Amount of pods allocatable on the node +Instrument: updowncounter +Unit: {pod} +""" + + +def create_k8s_node_pod_allocatable(meter: Meter) -> UpDownCounter: + """Amount of pods allocatable on the node""" + return meter.create_up_down_counter( + name=K8S_NODE_POD_ALLOCATABLE, + description="Amount of pods allocatable on the node.", + unit="{pod}", + ) + + K8S_NODE_UPTIME: Final = "k8s.node.uptime" """ The time the Node has been running @@ -1049,6 +1478,63 @@ def create_k8s_pod_filesystem_usage(meter: Meter) -> UpDownCounter: ) +K8S_POD_MEMORY_AVAILABLE: Final = "k8s.pod.memory.available" +""" +Pod memory available +Instrument: updowncounter +Unit: By +Note: Available memory for use. This is defined as the memory limit - workingSetBytes. If memory limit is undefined, the available bytes is omitted. +This metric is derived from the [MemoryStats.AvailableBytes](https://pkg.go.dev/k8s.io/kubelet@v0.34.0/pkg/apis/stats/v1alpha1#MemoryStats) field of the [PodStats.Memory](https://pkg.go.dev/k8s.io/kubelet@v0.34.0/pkg/apis/stats/v1alpha1#PodStats) of the Kubelet's stats API. +""" + + +def create_k8s_pod_memory_available(meter: Meter) -> UpDownCounter: + """Pod memory available""" + return meter.create_up_down_counter( + name=K8S_POD_MEMORY_AVAILABLE, + description="Pod memory available.", + unit="By", + ) + + +K8S_POD_MEMORY_PAGING_FAULTS: Final = "k8s.pod.memory.paging.faults" +""" +Pod memory paging faults +Instrument: counter +Unit: {fault} +Note: Cumulative number of major/minor page faults. +This metric is derived from the [MemoryStats.PageFaults](https://pkg.go.dev/k8s.io/kubelet@v0.34.0/pkg/apis/stats/v1alpha1#MemoryStats) and [MemoryStats.MajorPageFaults](https://pkg.go.dev/k8s.io/kubelet@v0.34.0/pkg/apis/stats/v1alpha1#MemoryStats) field of the [PodStats.Memory](https://pkg.go.dev/k8s.io/kubelet@v0.34.0/pkg/apis/stats/v1alpha1#PodStats) of the Kubelet's stats API. +""" + + +def create_k8s_pod_memory_paging_faults(meter: Meter) -> Counter: + """Pod memory paging faults""" + return meter.create_counter( + name=K8S_POD_MEMORY_PAGING_FAULTS, + description="Pod memory paging faults.", + unit="{fault}", + ) + + +K8S_POD_MEMORY_RSS: Final = "k8s.pod.memory.rss" +""" +Pod memory RSS +Instrument: updowncounter +Unit: By +Note: The amount of anonymous and swap cache memory (includes transparent hugepages). +This metric is derived from the [MemoryStats.RSSBytes](https://pkg.go.dev/k8s.io/kubelet@v0.34.0/pkg/apis/stats/v1alpha1#MemoryStats) field of the [PodStats.Memory](https://pkg.go.dev/k8s.io/kubelet@v0.34.0/pkg/apis/stats/v1alpha1#PodStats) of the Kubelet's stats API. +""" + + +def create_k8s_pod_memory_rss(meter: Meter) -> UpDownCounter: + """Pod memory RSS""" + return meter.create_up_down_counter( + name=K8S_POD_MEMORY_RSS, + description="Pod memory RSS.", + unit="By", + ) + + K8S_POD_MEMORY_USAGE: Final = "k8s.pod.memory.usage" """ Memory usage of the Pod @@ -1070,6 +1556,25 @@ def create_k8s_pod_memory_usage( ) +K8S_POD_MEMORY_WORKING_SET: Final = "k8s.pod.memory.working_set" +""" +Pod memory working set +Instrument: updowncounter +Unit: By +Note: The amount of working set memory. This includes recently accessed memory, dirty memory, and kernel memory. WorkingSetBytes is <= UsageBytes. +This metric is derived from the [MemoryStats.WorkingSetBytes](https://pkg.go.dev/k8s.io/kubelet@v0.34.0/pkg/apis/stats/v1alpha1#MemoryStats) field of the [PodStats.Memory](https://pkg.go.dev/k8s.io/kubelet@v0.34.0/pkg/apis/stats/v1alpha1#PodStats) of the Kubelet's stats API. +""" + + +def create_k8s_pod_memory_working_set(meter: Meter) -> UpDownCounter: + """Pod memory working set""" + return meter.create_up_down_counter( + name=K8S_POD_MEMORY_WORKING_SET, + description="Pod memory working set.", + unit="By", + ) + + K8S_POD_NETWORK_ERRORS: Final = "k8s.pod.network.errors" """ Pod network errors @@ -1104,6 +1609,44 @@ def create_k8s_pod_network_io(meter: Meter) -> Counter: ) +K8S_POD_STATUS_PHASE: Final = "k8s.pod.status.phase" +""" +Describes number of K8s Pods that are currently in a given phase +Instrument: updowncounter +Unit: {pod} +Note: All possible pod phases will be reported at each time interval to avoid missing metrics. +Only the value corresponding to the current phase will be non-zero. +""" + + +def create_k8s_pod_status_phase(meter: Meter) -> UpDownCounter: + """Describes number of K8s Pods that are currently in a given phase""" + return meter.create_up_down_counter( + name=K8S_POD_STATUS_PHASE, + description="Describes number of K8s Pods that are currently in a given phase.", + unit="{pod}", + ) + + +K8S_POD_STATUS_REASON: Final = "k8s.pod.status.reason" +""" +Describes the number of K8s Pods that are currently in a state for a given reason +Instrument: updowncounter +Unit: {pod} +Note: All possible pod status reasons will be reported at each time interval to avoid missing metrics. +Only the value corresponding to the current reason will be non-zero. +""" + + +def create_k8s_pod_status_reason(meter: Meter) -> UpDownCounter: + """Describes the number of K8s Pods that are currently in a state for a given reason""" + return meter.create_up_down_counter( + name=K8S_POD_STATUS_REASON, + description="Describes the number of K8s Pods that are currently in a state for a given reason.", + unit="{pod}", + ) + + K8S_POD_UPTIME: Final = "k8s.pod.uptime" """ The time the Pod has been running @@ -1258,6 +1801,36 @@ def create_k8s_pod_volume_usage(meter: Meter) -> UpDownCounter: K8S_REPLICASET_AVAILABLE_PODS: Final = "k8s.replicaset.available_pods" """ +Deprecated: Replaced by `k8s.replicaset.pod.available`. +""" + + +def create_k8s_replicaset_available_pods(meter: Meter) -> UpDownCounter: + """Deprecated, use `k8s.replicaset.pod.available` instead""" + return meter.create_up_down_counter( + name=K8S_REPLICASET_AVAILABLE_PODS, + description="Deprecated, use `k8s.replicaset.pod.available` instead.", + unit="{pod}", + ) + + +K8S_REPLICASET_DESIRED_PODS: Final = "k8s.replicaset.desired_pods" +""" +Deprecated: Replaced by `k8s.replicaset.pod.desired`. +""" + + +def create_k8s_replicaset_desired_pods(meter: Meter) -> UpDownCounter: + """Deprecated, use `k8s.replicaset.pod.desired` instead""" + return meter.create_up_down_counter( + name=K8S_REPLICASET_DESIRED_PODS, + description="Deprecated, use `k8s.replicaset.pod.desired` instead.", + unit="{pod}", + ) + + +K8S_REPLICASET_POD_AVAILABLE: Final = "k8s.replicaset.pod.available" +""" Total number of available replica pods (ready for at least minReadySeconds) targeted by this replicaset Instrument: updowncounter Unit: {pod} @@ -1266,16 +1839,16 @@ def create_k8s_pod_volume_usage(meter: Meter) -> UpDownCounter: """ -def create_k8s_replicaset_available_pods(meter: Meter) -> UpDownCounter: +def create_k8s_replicaset_pod_available(meter: Meter) -> UpDownCounter: """Total number of available replica pods (ready for at least minReadySeconds) targeted by this replicaset""" return meter.create_up_down_counter( - name=K8S_REPLICASET_AVAILABLE_PODS, + name=K8S_REPLICASET_POD_AVAILABLE, description="Total number of available replica pods (ready for at least minReadySeconds) targeted by this replicaset.", unit="{pod}", ) -K8S_REPLICASET_DESIRED_PODS: Final = "k8s.replicaset.desired_pods" +K8S_REPLICASET_POD_DESIRED: Final = "k8s.replicaset.pod.desired" """ Number of desired replica pods in this replicaset Instrument: updowncounter @@ -1285,10 +1858,10 @@ def create_k8s_replicaset_available_pods(meter: Meter) -> UpDownCounter: """ -def create_k8s_replicaset_desired_pods(meter: Meter) -> UpDownCounter: +def create_k8s_replicaset_pod_desired(meter: Meter) -> UpDownCounter: """Number of desired replica pods in this replicaset""" return meter.create_up_down_counter( - name=K8S_REPLICASET_DESIRED_PODS, + name=K8S_REPLICASET_POD_DESIRED, description="Number of desired replica pods in this replicaset.", unit="{pod}", ) @@ -1298,17 +1871,17 @@ def create_k8s_replicaset_desired_pods(meter: Meter) -> UpDownCounter: "k8s.replication_controller.available_pods" ) """ -Deprecated: Replaced by `k8s.replicationcontroller.available_pods`. +Deprecated: Replaced by `k8s.replicationcontroller.pod.available`. """ def create_k8s_replication_controller_available_pods( meter: Meter, ) -> UpDownCounter: - """Deprecated, use `k8s.replicationcontroller.available_pods` instead""" + """Deprecated, use `k8s.replicationcontroller.pod.available` instead""" return meter.create_up_down_counter( name=K8S_REPLICATION_CONTROLLER_AVAILABLE_PODS, - description="Deprecated, use `k8s.replicationcontroller.available_pods` instead.", + description="Deprecated, use `k8s.replicationcontroller.pod.available` instead.", unit="{pod}", ) @@ -1317,17 +1890,17 @@ def create_k8s_replication_controller_available_pods( "k8s.replication_controller.desired_pods" ) """ -Deprecated: Replaced by `k8s.replicationcontroller.desired_pods`. +Deprecated: Replaced by `k8s.replicationcontroller.pod.desired`. """ def create_k8s_replication_controller_desired_pods( meter: Meter, ) -> UpDownCounter: - """Deprecated, use `k8s.replicationcontroller.desired_pods` instead""" + """Deprecated, use `k8s.replicationcontroller.pod.desired` instead""" return meter.create_up_down_counter( name=K8S_REPLICATION_CONTROLLER_DESIRED_PODS, - description="Deprecated, use `k8s.replicationcontroller.desired_pods` instead.", + description="Deprecated, use `k8s.replicationcontroller.pod.desired` instead.", unit="{pod}", ) @@ -1336,6 +1909,44 @@ def create_k8s_replication_controller_desired_pods( "k8s.replicationcontroller.available_pods" ) """ +Deprecated: Replaced by `k8s.replicationcontroller.pod.available`. +""" + + +def create_k8s_replicationcontroller_available_pods( + meter: Meter, +) -> UpDownCounter: + """Deprecated, use `k8s.replicationcontroller.pod.available` instead""" + return meter.create_up_down_counter( + name=K8S_REPLICATIONCONTROLLER_AVAILABLE_PODS, + description="Deprecated, use `k8s.replicationcontroller.pod.available` instead.", + unit="{pod}", + ) + + +K8S_REPLICATIONCONTROLLER_DESIRED_PODS: Final = ( + "k8s.replicationcontroller.desired_pods" +) +""" +Deprecated: Replaced by `k8s.replicationcontroller.pod.desired`. +""" + + +def create_k8s_replicationcontroller_desired_pods( + meter: Meter, +) -> UpDownCounter: + """Deprecated, use `k8s.replicationcontroller.pod.desired` instead""" + return meter.create_up_down_counter( + name=K8S_REPLICATIONCONTROLLER_DESIRED_PODS, + description="Deprecated, use `k8s.replicationcontroller.pod.desired` instead.", + unit="{pod}", + ) + + +K8S_REPLICATIONCONTROLLER_POD_AVAILABLE: Final = ( + "k8s.replicationcontroller.pod.available" +) +""" Total number of available replica pods (ready for at least minReadySeconds) targeted by this replication controller Instrument: updowncounter Unit: {pod} @@ -1344,19 +1955,19 @@ def create_k8s_replication_controller_desired_pods( """ -def create_k8s_replicationcontroller_available_pods( +def create_k8s_replicationcontroller_pod_available( meter: Meter, ) -> UpDownCounter: """Total number of available replica pods (ready for at least minReadySeconds) targeted by this replication controller""" return meter.create_up_down_counter( - name=K8S_REPLICATIONCONTROLLER_AVAILABLE_PODS, + name=K8S_REPLICATIONCONTROLLER_POD_AVAILABLE, description="Total number of available replica pods (ready for at least minReadySeconds) targeted by this replication controller.", unit="{pod}", ) -K8S_REPLICATIONCONTROLLER_DESIRED_PODS: Final = ( - "k8s.replicationcontroller.desired_pods" +K8S_REPLICATIONCONTROLLER_POD_DESIRED: Final = ( + "k8s.replicationcontroller.pod.desired" ) """ Number of desired replica pods in this replication controller @@ -1367,12 +1978,12 @@ def create_k8s_replicationcontroller_available_pods( """ -def create_k8s_replicationcontroller_desired_pods( +def create_k8s_replicationcontroller_pod_desired( meter: Meter, ) -> UpDownCounter: """Number of desired replica pods in this replication controller""" return meter.create_up_down_counter( - name=K8S_REPLICATIONCONTROLLER_DESIRED_PODS, + name=K8S_REPLICATIONCONTROLLER_POD_DESIRED, description="Number of desired replica pods in this replication controller.", unit="{pod}", ) @@ -1872,6 +2483,36 @@ def create_k8s_resourcequota_storage_request_used( K8S_STATEFULSET_CURRENT_PODS: Final = "k8s.statefulset.current_pods" """ +Deprecated: Replaced by `k8s.statefulset.pod.current`. +""" + + +def create_k8s_statefulset_current_pods(meter: Meter) -> UpDownCounter: + """Deprecated, use `k8s.statefulset.pod.current` instead""" + return meter.create_up_down_counter( + name=K8S_STATEFULSET_CURRENT_PODS, + description="Deprecated, use `k8s.statefulset.pod.current` instead.", + unit="{pod}", + ) + + +K8S_STATEFULSET_DESIRED_PODS: Final = "k8s.statefulset.desired_pods" +""" +Deprecated: Replaced by `k8s.statefulset.pod.desired`. +""" + + +def create_k8s_statefulset_desired_pods(meter: Meter) -> UpDownCounter: + """Deprecated, use `k8s.statefulset.pod.desired` instead""" + return meter.create_up_down_counter( + name=K8S_STATEFULSET_DESIRED_PODS, + description="Deprecated, use `k8s.statefulset.pod.desired` instead.", + unit="{pod}", + ) + + +K8S_STATEFULSET_POD_CURRENT: Final = "k8s.statefulset.pod.current" +""" The number of replica pods created by the statefulset controller from the statefulset version indicated by currentRevision Instrument: updowncounter Unit: {pod} @@ -1880,16 +2521,16 @@ def create_k8s_resourcequota_storage_request_used( """ -def create_k8s_statefulset_current_pods(meter: Meter) -> UpDownCounter: +def create_k8s_statefulset_pod_current(meter: Meter) -> UpDownCounter: """The number of replica pods created by the statefulset controller from the statefulset version indicated by currentRevision""" return meter.create_up_down_counter( - name=K8S_STATEFULSET_CURRENT_PODS, + name=K8S_STATEFULSET_POD_CURRENT, description="The number of replica pods created by the statefulset controller from the statefulset version indicated by currentRevision.", unit="{pod}", ) -K8S_STATEFULSET_DESIRED_PODS: Final = "k8s.statefulset.desired_pods" +K8S_STATEFULSET_POD_DESIRED: Final = "k8s.statefulset.pod.desired" """ Number of desired replica pods in this statefulset Instrument: updowncounter @@ -1899,16 +2540,16 @@ def create_k8s_statefulset_current_pods(meter: Meter) -> UpDownCounter: """ -def create_k8s_statefulset_desired_pods(meter: Meter) -> UpDownCounter: +def create_k8s_statefulset_pod_desired(meter: Meter) -> UpDownCounter: """Number of desired replica pods in this statefulset""" return meter.create_up_down_counter( - name=K8S_STATEFULSET_DESIRED_PODS, + name=K8S_STATEFULSET_POD_DESIRED, description="Number of desired replica pods in this statefulset.", unit="{pod}", ) -K8S_STATEFULSET_READY_PODS: Final = "k8s.statefulset.ready_pods" +K8S_STATEFULSET_POD_READY: Final = "k8s.statefulset.pod.ready" """ The number of replica pods created for this statefulset with a Ready Condition Instrument: updowncounter @@ -1918,16 +2559,16 @@ def create_k8s_statefulset_desired_pods(meter: Meter) -> UpDownCounter: """ -def create_k8s_statefulset_ready_pods(meter: Meter) -> UpDownCounter: +def create_k8s_statefulset_pod_ready(meter: Meter) -> UpDownCounter: """The number of replica pods created for this statefulset with a Ready Condition""" return meter.create_up_down_counter( - name=K8S_STATEFULSET_READY_PODS, + name=K8S_STATEFULSET_POD_READY, description="The number of replica pods created for this statefulset with a Ready Condition.", unit="{pod}", ) -K8S_STATEFULSET_UPDATED_PODS: Final = "k8s.statefulset.updated_pods" +K8S_STATEFULSET_POD_UPDATED: Final = "k8s.statefulset.pod.updated" """ Number of replica pods created by the statefulset controller from the statefulset version indicated by updateRevision Instrument: updowncounter @@ -1937,10 +2578,40 @@ def create_k8s_statefulset_ready_pods(meter: Meter) -> UpDownCounter: """ -def create_k8s_statefulset_updated_pods(meter: Meter) -> UpDownCounter: +def create_k8s_statefulset_pod_updated(meter: Meter) -> UpDownCounter: """Number of replica pods created by the statefulset controller from the statefulset version indicated by updateRevision""" return meter.create_up_down_counter( - name=K8S_STATEFULSET_UPDATED_PODS, + name=K8S_STATEFULSET_POD_UPDATED, description="Number of replica pods created by the statefulset controller from the statefulset version indicated by updateRevision.", unit="{pod}", ) + + +K8S_STATEFULSET_READY_PODS: Final = "k8s.statefulset.ready_pods" +""" +Deprecated: Replaced by `k8s.statefulset.pod.ready`. +""" + + +def create_k8s_statefulset_ready_pods(meter: Meter) -> UpDownCounter: + """Deprecated, use `k8s.statefulset.pod.ready` instead""" + return meter.create_up_down_counter( + name=K8S_STATEFULSET_READY_PODS, + description="Deprecated, use `k8s.statefulset.pod.ready` instead.", + unit="{pod}", + ) + + +K8S_STATEFULSET_UPDATED_PODS: Final = "k8s.statefulset.updated_pods" +""" +Deprecated: Replaced by `k8s.statefulset.pod.updated`. +""" + + +def create_k8s_statefulset_updated_pods(meter: Meter) -> UpDownCounter: + """Deprecated, use `k8s.statefulset.pod.updated` instead""" + return meter.create_up_down_counter( + name=K8S_STATEFULSET_UPDATED_PODS, + description="Deprecated, use `k8s.statefulset.pod.updated` instead.", + unit="{pod}", + ) diff --git a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/nfs_metrics.py b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/nfs_metrics.py new file mode 100644 index 00000000000..e23b049ed9f --- /dev/null +++ b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/nfs_metrics.py @@ -0,0 +1,305 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import Final + +from opentelemetry.metrics import Counter, Meter, UpDownCounter + +NFS_CLIENT_NET_COUNT: Final = "nfs.client.net.count" +""" +Reports the count of kernel NFS client TCP segments and UDP datagrams handled +Instrument: counter +Unit: {record} +Note: Linux: this metric is taken from the Linux kernel's svc_stat.netudpcnt and svc_stat.nettcpcnt. +""" + + +def create_nfs_client_net_count(meter: Meter) -> Counter: + """Reports the count of kernel NFS client TCP segments and UDP datagrams handled""" + return meter.create_counter( + name=NFS_CLIENT_NET_COUNT, + description="Reports the count of kernel NFS client TCP segments and UDP datagrams handled.", + unit="{record}", + ) + + +NFS_CLIENT_NET_TCP_CONNECTION_ACCEPTED: Final = ( + "nfs.client.net.tcp.connection.accepted" +) +""" +Reports the count of kernel NFS client TCP connections accepted +Instrument: counter +Unit: {connection} +Note: Linux: this metric is taken from the Linux kernel's svc_stat.nettcpconn. +""" + + +def create_nfs_client_net_tcp_connection_accepted(meter: Meter) -> Counter: + """Reports the count of kernel NFS client TCP connections accepted""" + return meter.create_counter( + name=NFS_CLIENT_NET_TCP_CONNECTION_ACCEPTED, + description="Reports the count of kernel NFS client TCP connections accepted.", + unit="{connection}", + ) + + +NFS_CLIENT_OPERATION_COUNT: Final = "nfs.client.operation.count" +""" +Reports the count of kernel NFSv4+ client operations +Instrument: counter +Unit: {operation} +""" + + +def create_nfs_client_operation_count(meter: Meter) -> Counter: + """Reports the count of kernel NFSv4+ client operations""" + return meter.create_counter( + name=NFS_CLIENT_OPERATION_COUNT, + description="Reports the count of kernel NFSv4+ client operations.", + unit="{operation}", + ) + + +NFS_CLIENT_PROCEDURE_COUNT: Final = "nfs.client.procedure.count" +""" +Reports the count of kernel NFS client procedures +Instrument: counter +Unit: {procedure} +""" + + +def create_nfs_client_procedure_count(meter: Meter) -> Counter: + """Reports the count of kernel NFS client procedures""" + return meter.create_counter( + name=NFS_CLIENT_PROCEDURE_COUNT, + description="Reports the count of kernel NFS client procedures.", + unit="{procedure}", + ) + + +NFS_CLIENT_RPC_AUTHREFRESH_COUNT: Final = "nfs.client.rpc.authrefresh.count" +""" +Reports the count of kernel NFS client RPC authentication refreshes +Instrument: counter +Unit: {authrefresh} +Note: Linux: this metric is taken from the Linux kernel's svc_stat.rpcauthrefresh. +""" + + +def create_nfs_client_rpc_authrefresh_count(meter: Meter) -> Counter: + """Reports the count of kernel NFS client RPC authentication refreshes""" + return meter.create_counter( + name=NFS_CLIENT_RPC_AUTHREFRESH_COUNT, + description="Reports the count of kernel NFS client RPC authentication refreshes.", + unit="{authrefresh}", + ) + + +NFS_CLIENT_RPC_COUNT: Final = "nfs.client.rpc.count" +""" +Reports the count of kernel NFS client RPCs sent, regardless of whether they're accepted/rejected by the server +Instrument: counter +Unit: {request} +Note: Linux: this metric is taken from the Linux kernel's svc_stat.rpccnt. +""" + + +def create_nfs_client_rpc_count(meter: Meter) -> Counter: + """Reports the count of kernel NFS client RPCs sent, regardless of whether they're accepted/rejected by the server""" + return meter.create_counter( + name=NFS_CLIENT_RPC_COUNT, + description="Reports the count of kernel NFS client RPCs sent, regardless of whether they're accepted/rejected by the server.", + unit="{request}", + ) + + +NFS_CLIENT_RPC_RETRANSMIT_COUNT: Final = "nfs.client.rpc.retransmit.count" +""" +Reports the count of kernel NFS client RPC retransmits +Instrument: counter +Unit: {retransmit} +Note: Linux: this metric is taken from the Linux kernel's svc_stat.rpcretrans. +""" + + +def create_nfs_client_rpc_retransmit_count(meter: Meter) -> Counter: + """Reports the count of kernel NFS client RPC retransmits""" + return meter.create_counter( + name=NFS_CLIENT_RPC_RETRANSMIT_COUNT, + description="Reports the count of kernel NFS client RPC retransmits.", + unit="{retransmit}", + ) + + +NFS_SERVER_FH_STALE_COUNT: Final = "nfs.server.fh.stale.count" +""" +Reports the count of kernel NFS server stale file handles +Instrument: counter +Unit: {fh} +Note: Linux: this metric is taken from the Linux kernel NFSD_STATS_FH_STALE counter in the nfsd_net struct. +""" + + +def create_nfs_server_fh_stale_count(meter: Meter) -> Counter: + """Reports the count of kernel NFS server stale file handles""" + return meter.create_counter( + name=NFS_SERVER_FH_STALE_COUNT, + description="Reports the count of kernel NFS server stale file handles.", + unit="{fh}", + ) + + +NFS_SERVER_IO: Final = "nfs.server.io" +""" +Reports the count of kernel NFS server bytes returned to receive and transmit (read and write) requests +Instrument: counter +Unit: By +Note: Linux: this metric is taken from the Linux kernel NFSD_STATS_IO_READ and NFSD_STATS_IO_WRITE counters in the nfsd_net struct. +""" + + +def create_nfs_server_io(meter: Meter) -> Counter: + """Reports the count of kernel NFS server bytes returned to receive and transmit (read and write) requests""" + return meter.create_counter( + name=NFS_SERVER_IO, + description="Reports the count of kernel NFS server bytes returned to receive and transmit (read and write) requests.", + unit="By", + ) + + +NFS_SERVER_NET_COUNT: Final = "nfs.server.net.count" +""" +Reports the count of kernel NFS server TCP segments and UDP datagrams handled +Instrument: counter +Unit: {record} +Note: Linux: this metric is taken from the Linux kernel's svc_stat.nettcpcnt and svc_stat.netudpcnt. +""" + + +def create_nfs_server_net_count(meter: Meter) -> Counter: + """Reports the count of kernel NFS server TCP segments and UDP datagrams handled""" + return meter.create_counter( + name=NFS_SERVER_NET_COUNT, + description="Reports the count of kernel NFS server TCP segments and UDP datagrams handled.", + unit="{record}", + ) + + +NFS_SERVER_NET_TCP_CONNECTION_ACCEPTED: Final = ( + "nfs.server.net.tcp.connection.accepted" +) +""" +Reports the count of kernel NFS server TCP connections accepted +Instrument: counter +Unit: {connection} +Note: Linux: this metric is taken from the Linux kernel's svc_stat.nettcpconn. +""" + + +def create_nfs_server_net_tcp_connection_accepted(meter: Meter) -> Counter: + """Reports the count of kernel NFS server TCP connections accepted""" + return meter.create_counter( + name=NFS_SERVER_NET_TCP_CONNECTION_ACCEPTED, + description="Reports the count of kernel NFS server TCP connections accepted.", + unit="{connection}", + ) + + +NFS_SERVER_OPERATION_COUNT: Final = "nfs.server.operation.count" +""" +Reports the count of kernel NFSv4+ server operations +Instrument: counter +Unit: {operation} +""" + + +def create_nfs_server_operation_count(meter: Meter) -> Counter: + """Reports the count of kernel NFSv4+ server operations""" + return meter.create_counter( + name=NFS_SERVER_OPERATION_COUNT, + description="Reports the count of kernel NFSv4+ server operations.", + unit="{operation}", + ) + + +NFS_SERVER_PROCEDURE_COUNT: Final = "nfs.server.procedure.count" +""" +Reports the count of kernel NFS server procedures +Instrument: counter +Unit: {procedure} +""" + + +def create_nfs_server_procedure_count(meter: Meter) -> Counter: + """Reports the count of kernel NFS server procedures""" + return meter.create_counter( + name=NFS_SERVER_PROCEDURE_COUNT, + description="Reports the count of kernel NFS server procedures.", + unit="{procedure}", + ) + + +NFS_SERVER_REPCACHE_REQUESTS: Final = "nfs.server.repcache.requests" +""" +Reports the kernel NFS server reply cache request count by cache hit status +Instrument: counter +Unit: {request} +""" + + +def create_nfs_server_repcache_requests(meter: Meter) -> Counter: + """Reports the kernel NFS server reply cache request count by cache hit status""" + return meter.create_counter( + name=NFS_SERVER_REPCACHE_REQUESTS, + description="Reports the kernel NFS server reply cache request count by cache hit status.", + unit="{request}", + ) + + +NFS_SERVER_RPC_COUNT: Final = "nfs.server.rpc.count" +""" +Reports the count of kernel NFS server RPCs handled +Instrument: counter +Unit: {request} +Note: Linux: this metric is taken from the Linux kernel's svc_stat.rpccnt, the count of good RPCs. This metric can have +an error.type of "format", "auth", or "client" for svc_stat.badfmt, svc_stat.badauth, and svc_stat.badclnt. +""" + + +def create_nfs_server_rpc_count(meter: Meter) -> Counter: + """Reports the count of kernel NFS server RPCs handled""" + return meter.create_counter( + name=NFS_SERVER_RPC_COUNT, + description="Reports the count of kernel NFS server RPCs handled.", + unit="{request}", + ) + + +NFS_SERVER_THREAD_COUNT: Final = "nfs.server.thread.count" +""" +Reports the count of kernel NFS server available threads +Instrument: updowncounter +Unit: {thread} +Note: Linux: this metric is taken from the Linux kernel nfsd_th_cnt variable. +""" + + +def create_nfs_server_thread_count(meter: Meter) -> UpDownCounter: + """Reports the count of kernel NFS server available threads""" + return meter.create_up_down_counter( + name=NFS_SERVER_THREAD_COUNT, + description="Reports the count of kernel NFS server available threads.", + unit="{thread}", + ) diff --git a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/openshift_metrics.py b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/openshift_metrics.py new file mode 100644 index 00000000000..08f6343e73f --- /dev/null +++ b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/openshift_metrics.py @@ -0,0 +1,529 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import Final + +from opentelemetry.metrics import Meter, UpDownCounter + +OPENSHIFT_CLUSTERQUOTA_CPU_LIMIT_HARD: Final = ( + "openshift.clusterquota.cpu.limit.hard" +) +""" +The enforced hard limit of the resource across all projects +Instrument: updowncounter +Unit: {cpu} +Note: This metric is retrieved from the `Status.Total.Hard` field of the +[K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core) +of the +[ClusterResourceQuota](https://docs.redhat.com/en/documentation/openshift_container_platform/4.19/html/schedule_and_quota_apis/clusterresourcequota-quota-openshift-io-v1#status-total). +""" + + +def create_openshift_clusterquota_cpu_limit_hard( + meter: Meter, +) -> UpDownCounter: + """The enforced hard limit of the resource across all projects""" + return meter.create_up_down_counter( + name=OPENSHIFT_CLUSTERQUOTA_CPU_LIMIT_HARD, + description="The enforced hard limit of the resource across all projects.", + unit="{cpu}", + ) + + +OPENSHIFT_CLUSTERQUOTA_CPU_LIMIT_USED: Final = ( + "openshift.clusterquota.cpu.limit.used" +) +""" +The current observed total usage of the resource across all projects +Instrument: updowncounter +Unit: {cpu} +Note: This metric is retrieved from the `Status.Total.Used` field of the +[K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core) +of the +[ClusterResourceQuota](https://docs.redhat.com/en/documentation/openshift_container_platform/4.19/html/schedule_and_quota_apis/clusterresourcequota-quota-openshift-io-v1#status-total). +""" + + +def create_openshift_clusterquota_cpu_limit_used( + meter: Meter, +) -> UpDownCounter: + """The current observed total usage of the resource across all projects""" + return meter.create_up_down_counter( + name=OPENSHIFT_CLUSTERQUOTA_CPU_LIMIT_USED, + description="The current observed total usage of the resource across all projects.", + unit="{cpu}", + ) + + +OPENSHIFT_CLUSTERQUOTA_CPU_REQUEST_HARD: Final = ( + "openshift.clusterquota.cpu.request.hard" +) +""" +The enforced hard limit of the resource across all projects +Instrument: updowncounter +Unit: {cpu} +Note: This metric is retrieved from the `Status.Total.Hard` field of the +[K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core) +of the +[ClusterResourceQuota](https://docs.redhat.com/en/documentation/openshift_container_platform/4.19/html/schedule_and_quota_apis/clusterresourcequota-quota-openshift-io-v1#status-total). +""" + + +def create_openshift_clusterquota_cpu_request_hard( + meter: Meter, +) -> UpDownCounter: + """The enforced hard limit of the resource across all projects""" + return meter.create_up_down_counter( + name=OPENSHIFT_CLUSTERQUOTA_CPU_REQUEST_HARD, + description="The enforced hard limit of the resource across all projects.", + unit="{cpu}", + ) + + +OPENSHIFT_CLUSTERQUOTA_CPU_REQUEST_USED: Final = ( + "openshift.clusterquota.cpu.request.used" +) +""" +The current observed total usage of the resource across all projects +Instrument: updowncounter +Unit: {cpu} +Note: This metric is retrieved from the `Status.Total.Used` field of the +[K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core) +of the +[ClusterResourceQuota](https://docs.redhat.com/en/documentation/openshift_container_platform/4.19/html/schedule_and_quota_apis/clusterresourcequota-quota-openshift-io-v1#status-total). +""" + + +def create_openshift_clusterquota_cpu_request_used( + meter: Meter, +) -> UpDownCounter: + """The current observed total usage of the resource across all projects""" + return meter.create_up_down_counter( + name=OPENSHIFT_CLUSTERQUOTA_CPU_REQUEST_USED, + description="The current observed total usage of the resource across all projects.", + unit="{cpu}", + ) + + +OPENSHIFT_CLUSTERQUOTA_EPHEMERAL_STORAGE_LIMIT_HARD: Final = ( + "openshift.clusterquota.ephemeral_storage.limit.hard" +) +""" +The enforced hard limit of the resource across all projects +Instrument: updowncounter +Unit: By +Note: This metric is retrieved from the `Status.Total.Hard` field of the +[K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core) +of the +[ClusterResourceQuota](https://docs.redhat.com/en/documentation/openshift_container_platform/4.19/html/schedule_and_quota_apis/clusterresourcequota-quota-openshift-io-v1#status-total). +""" + + +def create_openshift_clusterquota_ephemeral_storage_limit_hard( + meter: Meter, +) -> UpDownCounter: + """The enforced hard limit of the resource across all projects""" + return meter.create_up_down_counter( + name=OPENSHIFT_CLUSTERQUOTA_EPHEMERAL_STORAGE_LIMIT_HARD, + description="The enforced hard limit of the resource across all projects.", + unit="By", + ) + + +OPENSHIFT_CLUSTERQUOTA_EPHEMERAL_STORAGE_LIMIT_USED: Final = ( + "openshift.clusterquota.ephemeral_storage.limit.used" +) +""" +The current observed total usage of the resource across all projects +Instrument: updowncounter +Unit: By +Note: This metric is retrieved from the `Status.Total.Used` field of the +[K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core) +of the +[ClusterResourceQuota](https://docs.redhat.com/en/documentation/openshift_container_platform/4.19/html/schedule_and_quota_apis/clusterresourcequota-quota-openshift-io-v1#status-total). +""" + + +def create_openshift_clusterquota_ephemeral_storage_limit_used( + meter: Meter, +) -> UpDownCounter: + """The current observed total usage of the resource across all projects""" + return meter.create_up_down_counter( + name=OPENSHIFT_CLUSTERQUOTA_EPHEMERAL_STORAGE_LIMIT_USED, + description="The current observed total usage of the resource across all projects.", + unit="By", + ) + + +OPENSHIFT_CLUSTERQUOTA_EPHEMERAL_STORAGE_REQUEST_HARD: Final = ( + "openshift.clusterquota.ephemeral_storage.request.hard" +) +""" +The enforced hard limit of the resource across all projects +Instrument: updowncounter +Unit: By +Note: This metric is retrieved from the `Status.Total.Hard` field of the +[K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core) +of the +[ClusterResourceQuota](https://docs.redhat.com/en/documentation/openshift_container_platform/4.19/html/schedule_and_quota_apis/clusterresourcequota-quota-openshift-io-v1#status-total). +""" + + +def create_openshift_clusterquota_ephemeral_storage_request_hard( + meter: Meter, +) -> UpDownCounter: + """The enforced hard limit of the resource across all projects""" + return meter.create_up_down_counter( + name=OPENSHIFT_CLUSTERQUOTA_EPHEMERAL_STORAGE_REQUEST_HARD, + description="The enforced hard limit of the resource across all projects.", + unit="By", + ) + + +OPENSHIFT_CLUSTERQUOTA_EPHEMERAL_STORAGE_REQUEST_USED: Final = ( + "openshift.clusterquota.ephemeral_storage.request.used" +) +""" +The current observed total usage of the resource across all projects +Instrument: updowncounter +Unit: By +Note: This metric is retrieved from the `Status.Total.Used` field of the +[K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core) +of the +[ClusterResourceQuota](https://docs.redhat.com/en/documentation/openshift_container_platform/4.19/html/schedule_and_quota_apis/clusterresourcequota-quota-openshift-io-v1#status-total). +""" + + +def create_openshift_clusterquota_ephemeral_storage_request_used( + meter: Meter, +) -> UpDownCounter: + """The current observed total usage of the resource across all projects""" + return meter.create_up_down_counter( + name=OPENSHIFT_CLUSTERQUOTA_EPHEMERAL_STORAGE_REQUEST_USED, + description="The current observed total usage of the resource across all projects.", + unit="By", + ) + + +OPENSHIFT_CLUSTERQUOTA_HUGEPAGE_COUNT_REQUEST_HARD: Final = ( + "openshift.clusterquota.hugepage_count.request.hard" +) +""" +The enforced hard limit of the resource across all projects +Instrument: updowncounter +Unit: {hugepage} +Note: This metric is retrieved from the `Status.Total.Hard` field of the +[K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core) +of the +[ClusterResourceQuota](https://docs.redhat.com/en/documentation/openshift_container_platform/4.19/html/schedule_and_quota_apis/clusterresourcequota-quota-openshift-io-v1#status-total). +""" + + +def create_openshift_clusterquota_hugepage_count_request_hard( + meter: Meter, +) -> UpDownCounter: + """The enforced hard limit of the resource across all projects""" + return meter.create_up_down_counter( + name=OPENSHIFT_CLUSTERQUOTA_HUGEPAGE_COUNT_REQUEST_HARD, + description="The enforced hard limit of the resource across all projects.", + unit="{hugepage}", + ) + + +OPENSHIFT_CLUSTERQUOTA_HUGEPAGE_COUNT_REQUEST_USED: Final = ( + "openshift.clusterquota.hugepage_count.request.used" +) +""" +The current observed total usage of the resource across all projects +Instrument: updowncounter +Unit: {hugepage} +Note: This metric is retrieved from the `Status.Total.Used` field of the +[K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core) +of the +[ClusterResourceQuota](https://docs.redhat.com/en/documentation/openshift_container_platform/4.19/html/schedule_and_quota_apis/clusterresourcequota-quota-openshift-io-v1#status-total). +""" + + +def create_openshift_clusterquota_hugepage_count_request_used( + meter: Meter, +) -> UpDownCounter: + """The current observed total usage of the resource across all projects""" + return meter.create_up_down_counter( + name=OPENSHIFT_CLUSTERQUOTA_HUGEPAGE_COUNT_REQUEST_USED, + description="The current observed total usage of the resource across all projects.", + unit="{hugepage}", + ) + + +OPENSHIFT_CLUSTERQUOTA_MEMORY_LIMIT_HARD: Final = ( + "openshift.clusterquota.memory.limit.hard" +) +""" +The enforced hard limit of the resource across all projects +Instrument: updowncounter +Unit: By +Note: This metric is retrieved from the `Status.Total.Hard` field of the +[K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core) +of the +[ClusterResourceQuota](https://docs.redhat.com/en/documentation/openshift_container_platform/4.19/html/schedule_and_quota_apis/clusterresourcequota-quota-openshift-io-v1#status-total). +""" + + +def create_openshift_clusterquota_memory_limit_hard( + meter: Meter, +) -> UpDownCounter: + """The enforced hard limit of the resource across all projects""" + return meter.create_up_down_counter( + name=OPENSHIFT_CLUSTERQUOTA_MEMORY_LIMIT_HARD, + description="The enforced hard limit of the resource across all projects.", + unit="By", + ) + + +OPENSHIFT_CLUSTERQUOTA_MEMORY_LIMIT_USED: Final = ( + "openshift.clusterquota.memory.limit.used" +) +""" +The current observed total usage of the resource across all projects +Instrument: updowncounter +Unit: By +Note: This metric is retrieved from the `Status.Total.Used` field of the +[K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core) +of the +[ClusterResourceQuota](https://docs.redhat.com/en/documentation/openshift_container_platform/4.19/html/schedule_and_quota_apis/clusterresourcequota-quota-openshift-io-v1#status-total). +""" + + +def create_openshift_clusterquota_memory_limit_used( + meter: Meter, +) -> UpDownCounter: + """The current observed total usage of the resource across all projects""" + return meter.create_up_down_counter( + name=OPENSHIFT_CLUSTERQUOTA_MEMORY_LIMIT_USED, + description="The current observed total usage of the resource across all projects.", + unit="By", + ) + + +OPENSHIFT_CLUSTERQUOTA_MEMORY_REQUEST_HARD: Final = ( + "openshift.clusterquota.memory.request.hard" +) +""" +The enforced hard limit of the resource across all projects +Instrument: updowncounter +Unit: By +Note: This metric is retrieved from the `Status.Total.Hard` field of the +[K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core) +of the +[ClusterResourceQuota](https://docs.redhat.com/en/documentation/openshift_container_platform/4.19/html/schedule_and_quota_apis/clusterresourcequota-quota-openshift-io-v1#status-total). +""" + + +def create_openshift_clusterquota_memory_request_hard( + meter: Meter, +) -> UpDownCounter: + """The enforced hard limit of the resource across all projects""" + return meter.create_up_down_counter( + name=OPENSHIFT_CLUSTERQUOTA_MEMORY_REQUEST_HARD, + description="The enforced hard limit of the resource across all projects.", + unit="By", + ) + + +OPENSHIFT_CLUSTERQUOTA_MEMORY_REQUEST_USED: Final = ( + "openshift.clusterquota.memory.request.used" +) +""" +The current observed total usage of the resource across all projects +Instrument: updowncounter +Unit: By +Note: This metric is retrieved from the `Status.Total.Used` field of the +[K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core) +of the +[ClusterResourceQuota](https://docs.redhat.com/en/documentation/openshift_container_platform/4.19/html/schedule_and_quota_apis/clusterresourcequota-quota-openshift-io-v1#status-total). +""" + + +def create_openshift_clusterquota_memory_request_used( + meter: Meter, +) -> UpDownCounter: + """The current observed total usage of the resource across all projects""" + return meter.create_up_down_counter( + name=OPENSHIFT_CLUSTERQUOTA_MEMORY_REQUEST_USED, + description="The current observed total usage of the resource across all projects.", + unit="By", + ) + + +OPENSHIFT_CLUSTERQUOTA_OBJECT_COUNT_HARD: Final = ( + "openshift.clusterquota.object_count.hard" +) +""" +The enforced hard limit of the resource across all projects +Instrument: updowncounter +Unit: {object} +Note: This metric is retrieved from the `Status.Total.Hard` field of the +[K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core) +of the +[ClusterResourceQuota](https://docs.redhat.com/en/documentation/openshift_container_platform/4.19/html/schedule_and_quota_apis/clusterresourcequota-quota-openshift-io-v1#status-total). +""" + + +def create_openshift_clusterquota_object_count_hard( + meter: Meter, +) -> UpDownCounter: + """The enforced hard limit of the resource across all projects""" + return meter.create_up_down_counter( + name=OPENSHIFT_CLUSTERQUOTA_OBJECT_COUNT_HARD, + description="The enforced hard limit of the resource across all projects.", + unit="{object}", + ) + + +OPENSHIFT_CLUSTERQUOTA_OBJECT_COUNT_USED: Final = ( + "openshift.clusterquota.object_count.used" +) +""" +The current observed total usage of the resource across all projects +Instrument: updowncounter +Unit: {object} +Note: This metric is retrieved from the `Status.Total.Used` field of the +[K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core) +of the +[ClusterResourceQuota](https://docs.redhat.com/en/documentation/openshift_container_platform/4.19/html/schedule_and_quota_apis/clusterresourcequota-quota-openshift-io-v1#status-total). +""" + + +def create_openshift_clusterquota_object_count_used( + meter: Meter, +) -> UpDownCounter: + """The current observed total usage of the resource across all projects""" + return meter.create_up_down_counter( + name=OPENSHIFT_CLUSTERQUOTA_OBJECT_COUNT_USED, + description="The current observed total usage of the resource across all projects.", + unit="{object}", + ) + + +OPENSHIFT_CLUSTERQUOTA_PERSISTENTVOLUMECLAIM_COUNT_HARD: Final = ( + "openshift.clusterquota.persistentvolumeclaim_count.hard" +) +""" +The enforced hard limit of the resource across all projects +Instrument: updowncounter +Unit: {persistentvolumeclaim} +Note: This metric is retrieved from the `Status.Total.Hard` field of the +[K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core) +of the +[ClusterResourceQuota](https://docs.redhat.com/en/documentation/openshift_container_platform/4.19/html/schedule_and_quota_apis/clusterresourcequota-quota-openshift-io-v1#status-total). + +The `k8s.storageclass.name` should be required when a resource quota is defined for a specific +storage class. +""" + + +def create_openshift_clusterquota_persistentvolumeclaim_count_hard( + meter: Meter, +) -> UpDownCounter: + """The enforced hard limit of the resource across all projects""" + return meter.create_up_down_counter( + name=OPENSHIFT_CLUSTERQUOTA_PERSISTENTVOLUMECLAIM_COUNT_HARD, + description="The enforced hard limit of the resource across all projects.", + unit="{persistentvolumeclaim}", + ) + + +OPENSHIFT_CLUSTERQUOTA_PERSISTENTVOLUMECLAIM_COUNT_USED: Final = ( + "openshift.clusterquota.persistentvolumeclaim_count.used" +) +""" +The current observed total usage of the resource across all projects +Instrument: updowncounter +Unit: {persistentvolumeclaim} +Note: This metric is retrieved from the `Status.Total.Used` field of the +[K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core) +of the +[ClusterResourceQuota](https://docs.redhat.com/en/documentation/openshift_container_platform/4.19/html/schedule_and_quota_apis/clusterresourcequota-quota-openshift-io-v1#status-total). + +The `k8s.storageclass.name` should be required when a resource quota is defined for a specific +storage class. +""" + + +def create_openshift_clusterquota_persistentvolumeclaim_count_used( + meter: Meter, +) -> UpDownCounter: + """The current observed total usage of the resource across all projects""" + return meter.create_up_down_counter( + name=OPENSHIFT_CLUSTERQUOTA_PERSISTENTVOLUMECLAIM_COUNT_USED, + description="The current observed total usage of the resource across all projects.", + unit="{persistentvolumeclaim}", + ) + + +OPENSHIFT_CLUSTERQUOTA_STORAGE_REQUEST_HARD: Final = ( + "openshift.clusterquota.storage.request.hard" +) +""" +The enforced hard limit of the resource across all projects +Instrument: updowncounter +Unit: By +Note: This metric is retrieved from the `Status.Total.Hard` field of the +[K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core) +of the +[ClusterResourceQuota](https://docs.redhat.com/en/documentation/openshift_container_platform/4.19/html/schedule_and_quota_apis/clusterresourcequota-quota-openshift-io-v1#status-total). + +The `k8s.storageclass.name` should be required when a resource quota is defined for a specific +storage class. +""" + + +def create_openshift_clusterquota_storage_request_hard( + meter: Meter, +) -> UpDownCounter: + """The enforced hard limit of the resource across all projects""" + return meter.create_up_down_counter( + name=OPENSHIFT_CLUSTERQUOTA_STORAGE_REQUEST_HARD, + description="The enforced hard limit of the resource across all projects.", + unit="By", + ) + + +OPENSHIFT_CLUSTERQUOTA_STORAGE_REQUEST_USED: Final = ( + "openshift.clusterquota.storage.request.used" +) +""" +The current observed total usage of the resource across all projects +Instrument: updowncounter +Unit: By +Note: This metric is retrieved from the `Status.Total.Used` field of the +[K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core) +of the +[ClusterResourceQuota](https://docs.redhat.com/en/documentation/openshift_container_platform/4.19/html/schedule_and_quota_apis/clusterresourcequota-quota-openshift-io-v1#status-total). + +The `k8s.storageclass.name` should be required when a resource quota is defined for a specific +storage class. +""" + + +def create_openshift_clusterquota_storage_request_used( + meter: Meter, +) -> UpDownCounter: + """The current observed total usage of the resource across all projects""" + return meter.create_up_down_counter( + name=OPENSHIFT_CLUSTERQUOTA_STORAGE_REQUEST_USED, + description="The current observed total usage of the resource across all projects.", + unit="By", + ) diff --git a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/rpc_metrics.py b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/rpc_metrics.py index e3f4ad6edd8..5e3319d5e96 100644 --- a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/rpc_metrics.py +++ b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/rpc_metrics.py @@ -58,12 +58,7 @@ def create_rpc_client_request_size(meter: Meter) -> Histogram: RPC_CLIENT_REQUESTS_PER_RPC: Final = "rpc.client.requests_per_rpc" """ -Measures the number of messages received per RPC -Instrument: histogram -Unit: {count} -Note: Should be 1 for all non-streaming RPCs. - -**Streaming**: This metric is required for server and client streaming RPCs. +Deprecated: Removed, no replacement at this time. """ @@ -96,12 +91,7 @@ def create_rpc_client_response_size(meter: Meter) -> Histogram: RPC_CLIENT_RESPONSES_PER_RPC: Final = "rpc.client.responses_per_rpc" """ -Measures the number of messages sent per RPC -Instrument: histogram -Unit: {count} -Note: Should be 1 for all non-streaming RPCs. - -**Streaming**: This metric is required for server and client streaming RPCs. +Deprecated: Removed, no replacement at this time. """ @@ -155,12 +145,7 @@ def create_rpc_server_request_size(meter: Meter) -> Histogram: RPC_SERVER_REQUESTS_PER_RPC: Final = "rpc.server.requests_per_rpc" """ -Measures the number of messages received per RPC -Instrument: histogram -Unit: {count} -Note: Should be 1 for all non-streaming RPCs. - -**Streaming** : This metric is required for server and client streaming RPCs. +Deprecated: Removed, no replacement at this time. """ @@ -193,12 +178,7 @@ def create_rpc_server_response_size(meter: Meter) -> Histogram: RPC_SERVER_RESPONSES_PER_RPC: Final = "rpc.server.responses_per_rpc" """ -Measures the number of messages sent per RPC -Instrument: histogram -Unit: {count} -Note: Should be 1 for all non-streaming RPCs. - -**Streaming**: This metric is required for server and client streaming RPCs. +Deprecated: Removed, no replacement at this time. """ diff --git a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/system_metrics.py b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/system_metrics.py index 0bdec11b367..e4d3cfdffe0 100644 --- a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/system_metrics.py +++ b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics/system_metrics.py @@ -133,17 +133,17 @@ def create_system_cpu_utilization( SYSTEM_DISK_IO: Final = "system.disk.io" """ -TODO +Disk bytes transferred Instrument: counter Unit: By """ def create_system_disk_io(meter: Meter) -> Counter: - """TODO""" + """Disk bytes transferred""" return meter.create_counter( name=SYSTEM_DISK_IO, - description="TODO.", + description="Disk bytes transferred.", unit="By", ) @@ -190,17 +190,17 @@ def create_system_disk_limit(meter: Meter) -> UpDownCounter: SYSTEM_DISK_MERGED: Final = "system.disk.merged" """ -TODO +The number of disk reads/writes merged into single physical disk access operations Instrument: counter Unit: {operation} """ def create_system_disk_merged(meter: Meter) -> Counter: - """TODO""" + """The number of disk reads/writes merged into single physical disk access operations""" return meter.create_counter( name=SYSTEM_DISK_MERGED, - description="TODO.", + description="The number of disk reads/writes merged into single physical disk access operations.", unit="{operation}", ) @@ -228,17 +228,17 @@ def create_system_disk_operation_time(meter: Meter) -> Counter: SYSTEM_DISK_OPERATIONS: Final = "system.disk.operations" """ -TODO +Disk operations count Instrument: counter Unit: {operation} """ def create_system_disk_operations(meter: Meter) -> Counter: - """TODO""" + """Disk operations count""" return meter.create_counter( name=SYSTEM_DISK_OPERATIONS, - description="TODO.", + description="Disk operations count.", unit="{operation}", ) @@ -281,7 +281,7 @@ def create_system_filesystem_usage(meter: Meter) -> UpDownCounter: SYSTEM_FILESYSTEM_UTILIZATION: Final = "system.filesystem.utilization" """ -TODO +Fraction of filesystem bytes used Instrument: gauge Unit: 1 """ @@ -290,11 +290,11 @@ def create_system_filesystem_usage(meter: Meter) -> UpDownCounter: def create_system_filesystem_utilization( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: - """TODO""" + """Fraction of filesystem bytes used""" return meter.create_observable_gauge( name=SYSTEM_FILESYSTEM_UTILIZATION, callbacks=callbacks, - description="TODO.", + description="Fraction of filesystem bytes used.", unit="1", ) @@ -396,7 +396,7 @@ def create_system_memory_usage(meter: Meter) -> UpDownCounter: SYSTEM_MEMORY_UTILIZATION: Final = "system.memory.utilization" """ -TODO +Percentage of memory bytes in use Instrument: gauge Unit: 1 """ @@ -405,28 +405,28 @@ def create_system_memory_usage(meter: Meter) -> UpDownCounter: def create_system_memory_utilization( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: - """TODO""" + """Percentage of memory bytes in use""" return meter.create_observable_gauge( name=SYSTEM_MEMORY_UTILIZATION, callbacks=callbacks, - description="TODO.", + description="Percentage of memory bytes in use.", unit="1", ) SYSTEM_NETWORK_CONNECTION_COUNT: Final = "system.network.connection.count" """ -TODO +The number of connections Instrument: updowncounter Unit: {connection} """ def create_system_network_connection_count(meter: Meter) -> UpDownCounter: - """TODO""" + """The number of connections""" return meter.create_up_down_counter( name=SYSTEM_NETWORK_CONNECTION_COUNT, - description="TODO.", + description="The number of connections.", unit="{connection}", ) @@ -446,6 +446,21 @@ def create_system_network_connections(meter: Meter) -> UpDownCounter: ) +SYSTEM_NETWORK_DROPPED: Final = "system.network.dropped" +""" +Deprecated: Replaced by `system.network.packet.dropped`. +""" + + +def create_system_network_dropped(meter: Meter) -> Counter: + """Count of packets that are dropped or discarded even though there was no error""" + return meter.create_counter( + name=SYSTEM_NETWORK_DROPPED, + description="Count of packets that are dropped or discarded even though there was no error.", + unit="{packet}", + ) + + SYSTEM_NETWORK_ERRORS: Final = "system.network.errors" """ Count of network errors detected @@ -470,34 +485,34 @@ def create_system_network_errors(meter: Meter) -> Counter: SYSTEM_NETWORK_IO: Final = "system.network.io" """ -TODO +The number of bytes transmitted and received Instrument: counter Unit: By """ def create_system_network_io(meter: Meter) -> Counter: - """TODO""" + """The number of bytes transmitted and received""" return meter.create_counter( name=SYSTEM_NETWORK_IO, - description="TODO.", + description="The number of bytes transmitted and received.", unit="By", ) SYSTEM_NETWORK_PACKET_COUNT: Final = "system.network.packet.count" """ -TODO +The number of packets transferred Instrument: counter Unit: {packet} """ def create_system_network_packet_count(meter: Meter) -> Counter: - """TODO""" + """The number of packets transferred""" return meter.create_counter( name=SYSTEM_NETWORK_PACKET_COUNT, - description="TODO.", + description="The number of packets transferred.", unit="{packet}", ) @@ -524,36 +539,51 @@ def create_system_network_packet_dropped(meter: Meter) -> Counter: ) +SYSTEM_NETWORK_PACKETS: Final = "system.network.packets" +""" +Deprecated: Replaced by `system.network.packet.count`. +""" + + +def create_system_network_packets(meter: Meter) -> Counter: + """The number of packets transferred""" + return meter.create_counter( + name=SYSTEM_NETWORK_PACKETS, + description="The number of packets transferred.", + unit="{packet}", + ) + + SYSTEM_PAGING_FAULTS: Final = "system.paging.faults" """ -TODO +The number of page faults Instrument: counter Unit: {fault} """ def create_system_paging_faults(meter: Meter) -> Counter: - """TODO""" + """The number of page faults""" return meter.create_counter( name=SYSTEM_PAGING_FAULTS, - description="TODO.", + description="The number of page faults.", unit="{fault}", ) SYSTEM_PAGING_OPERATIONS: Final = "system.paging.operations" """ -TODO +The number of paging operations Instrument: counter Unit: {operation} """ def create_system_paging_operations(meter: Meter) -> Counter: - """TODO""" + """The number of paging operations""" return meter.create_counter( name=SYSTEM_PAGING_OPERATIONS, - description="TODO.", + description="The number of paging operations.", unit="{operation}", ) @@ -577,7 +607,7 @@ def create_system_paging_usage(meter: Meter) -> UpDownCounter: SYSTEM_PAGING_UTILIZATION: Final = "system.paging.utilization" """ -TODO +Swap (unix) or pagefile (windows) utilization Instrument: gauge Unit: 1 """ @@ -586,11 +616,11 @@ def create_system_paging_usage(meter: Meter) -> UpDownCounter: def create_system_paging_utilization( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: - """TODO""" + """Swap (unix) or pagefile (windows) utilization""" return meter.create_observable_gauge( name=SYSTEM_PAGING_UTILIZATION, callbacks=callbacks, - description="TODO.", + description="Swap (unix) or pagefile (windows) utilization.", unit="1", ) diff --git a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/attributes/http_attributes.py b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/attributes/http_attributes.py index fb14068bbf1..8f7e7158c5d 100644 --- a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/attributes/http_attributes.py +++ b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/attributes/http_attributes.py @@ -40,8 +40,9 @@ """ HTTP request method. Note: HTTP request method value SHOULD be "known" to the instrumentation. -By default, this convention defines "known" methods as the ones listed in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods) -and the PATCH method defined in [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html). +By default, this convention defines "known" methods as the ones listed in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods), +the PATCH method defined in [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html) +and the QUERY method defined in [httpbis-safe-method-w-body](https://datatracker.ietf.org/doc/draft-ietf-httpbis-safe-method-w-body/?include_text=1). If the HTTP request method is not known to instrumentation, it MUST set the `http.request.method` attribute to `_OTHER`. @@ -93,9 +94,17 @@ HTTP_ROUTE: Final = "http.route" """ -The matched route, that is, the path template in the format used by the respective server framework. +The matched route template for the request. This MUST be low-cardinality and include all static path segments, with dynamic path segments represented with placeholders. Note: MUST NOT be populated when this is not supported by the HTTP server framework as the route attribute should have low-cardinality and the URI path can NOT substitute it. SHOULD include the [application root](/docs/http/http-spans.md#http-server-definitions) if there is one. + +A static path segment is a part of the route template with a fixed, low-cardinality value. This includes literal strings like `/users/` and placeholders that +are constrained to a finite, predefined set of values, e.g. `{controller}` or `{action}`. + +A dynamic path segment is a placeholder for a value that can have high cardinality and is not constrained to a predefined list like static path segments. + +Instrumentations SHOULD use routing information provided by the corresponding web framework. They SHOULD pick the most precise source of routing information and MAY +support custom route formatting. Instrumentations SHOULD document the format and the API used to obtain the route string. """ diff --git a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/schemas.py b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/schemas.py index 6adaa8c89a4..fdee4d8a9e0 100644 --- a/opentelemetry-semantic-conventions/src/opentelemetry/semconv/schemas.py +++ b/opentelemetry-semantic-conventions/src/opentelemetry/semconv/schemas.py @@ -85,5 +85,10 @@ class Schemas(Enum): The URL of the OpenTelemetry schema version 1.37.0. """ + V1_38_0 = "https://opentelemetry.io/schemas/1.38.0" + """ + The URL of the OpenTelemetry schema version 1.38.0. + """ + # when generating new semantic conventions, # make sure to add new versions version here. diff --git a/scripts/semconv/generate.sh b/scripts/semconv/generate.sh index cc5f7bfec38..41f6406becb 100755 --- a/scripts/semconv/generate.sh +++ b/scripts/semconv/generate.sh @@ -5,9 +5,9 @@ SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" ROOT_DIR="${SCRIPT_DIR}/../.." # freeze the spec version to make SemanticAttributes generation reproducible -SEMCONV_VERSION=1.37.0 +SEMCONV_VERSION=1.38.0 SEMCONV_VERSION_TAG=v$SEMCONV_VERSION -OTEL_WEAVER_IMG_VERSION=v0.17.0 +OTEL_WEAVER_IMG_VERSION=v0.18.0 INCUBATING_DIR=_incubating cd ${SCRIPT_DIR}