diff --git a/api/build.gradle b/api/build.gradle index 7ac4b9744..582ed0ed4 100644 --- a/api/build.gradle +++ b/api/build.gradle @@ -34,6 +34,7 @@ dependencies { implementation libs.apache.avro implementation libs.apache.commons + implementation libs.apache.commons.text implementation libs.apache.commons.pool2 implementation libs.apache.datasketches @@ -81,6 +82,13 @@ dependencies { because("CVE Fix: It is excluded above because of a vulnerability") } + implementation libs.prometheus.metrics.core + implementation libs.prometheus.metrics.textformats + implementation (libs.prometheus.metrics.exporter.pushgateway) { + exclude group: 'com.google.protobuf', module: 'protobuf-java' because("PushGW lib pulls protobuf-java 4.x, which is incompatible with protobuf-java 3.x used by various dependencies of this project.") + } + implementation libs.snappy + // Annotation processors implementation libs.lombok implementation libs.mapstruct @@ -107,11 +115,11 @@ dependencies { testImplementation libs.okhttp3 testImplementation libs.okhttp3.mockwebserver + testImplementation libs.prometheus.metrics.core } generateGrammarSource { maxHeapSize = "64m" - arguments += ["-package", "ksql"] } tasks.withType(JavaCompile) { @@ -133,6 +141,7 @@ sourceSets { tasks.withType(Checkstyle).configureEach { exclude '**/ksql/**' + exclude '**/promql/**' } checkstyle { diff --git a/api/src/main/antlr/ksql/KsqlGrammar.g4 b/api/src/main/antlr/ksql/KsqlGrammar.g4 index 2fcd623e3..e46da04ba 100644 --- a/api/src/main/antlr/ksql/KsqlGrammar.g4 +++ b/api/src/main/antlr/ksql/KsqlGrammar.g4 @@ -1,5 +1,8 @@ grammar KsqlGrammar; +@header {package ksql;} + + tokens { DELIMITER } diff --git a/api/src/main/antlr/promql/PromQL.g4 b/api/src/main/antlr/promql/PromQL.g4 new file mode 100644 index 000000000..a405f9ce8 --- /dev/null +++ b/api/src/main/antlr/promql/PromQL.g4 @@ -0,0 +1,287 @@ +grammar PromQL; + +@header {package promql;} + +options { + caseInsensitive = true; +} + +expression: vectorOperation EOF; + +// Binary operations are ordered by precedence + +// Unary operations have the same precedence as multiplications + +vectorOperation + : vectorOperation powOp vectorOperation + | vectorOperation subqueryOp + | unaryOp vectorOperation + | vectorOperation multOp vectorOperation + | vectorOperation addOp vectorOperation + | vectorOperation compareOp vectorOperation + | vectorOperation andUnlessOp vectorOperation + | vectorOperation orOp vectorOperation + | vectorOperation vectorMatchOp vectorOperation + | vectorOperation AT vectorOperation + | vector + ; + +// Operators + +unaryOp: (ADD | SUB); +powOp: POW grouping?; +multOp: (MULT | DIV | MOD) grouping?; +addOp: (ADD | SUB) grouping?; +compareOp: (DEQ | NE | GT | LT | GE | LE) BOOL? grouping?; +andUnlessOp: (AND | UNLESS) grouping?; +orOp: OR grouping?; +vectorMatchOp: (ON | UNLESS) grouping?; +subqueryOp: SUBQUERY_RANGE offsetOp?; +offsetOp: OFFSET DURATION; + +vector + : function_ + | aggregation + | instantSelector + | matrixSelector + | offset + | literal + | parens + ; + +parens: LEFT_PAREN vectorOperation RIGHT_PAREN; + +// Selectors + +instantSelector + : METRIC_NAME (LEFT_BRACE labelMatcherList? RIGHT_BRACE)? + | LEFT_BRACE labelMatcherList RIGHT_BRACE + ; + +labelMatcher: labelName labelMatcherOperator STRING; +labelMatcherOperator: EQ | NE | RE | NRE; +labelMatcherList: labelMatcher (COMMA labelMatcher)* COMMA?; + +matrixSelector: instantSelector TIME_RANGE; + +offset + : instantSelector OFFSET DURATION + | matrixSelector OFFSET DURATION + ; + +// Functions + +function_: FUNCTION LEFT_PAREN (parameter (COMMA parameter)*)? RIGHT_PAREN; + +parameter: literal | vectorOperation; +parameterList: LEFT_PAREN (parameter (COMMA parameter)*)? RIGHT_PAREN; + +// Aggregations + +aggregation + : AGGREGATION_OPERATOR parameterList + | AGGREGATION_OPERATOR (by | without) parameterList + | AGGREGATION_OPERATOR parameterList ( by | without) + ; +by: BY labelNameList; +without: WITHOUT labelNameList; + +// Vector one-to-one/one-to-many joins + +grouping: (on_ | ignoring) (groupLeft | groupRight)?; +on_: ON labelNameList; +ignoring: IGNORING labelNameList; +groupLeft: GROUP_LEFT labelNameList?; +groupRight: GROUP_RIGHT labelNameList?; + +// Label names + +labelName: keyword | METRIC_NAME | LABEL_NAME; +labelNameList: LEFT_PAREN (labelName (COMMA labelName)*)? RIGHT_PAREN; + +keyword + : AND + | OR + | UNLESS + | BY + | WITHOUT + | ON + | IGNORING + | GROUP_LEFT + | GROUP_RIGHT + | OFFSET + | BOOL + | AGGREGATION_OPERATOR + | FUNCTION + ; + +literal: NUMBER | STRING; + +fragment NUMERAL: [0-9]+ ('.' [0-9]+)?; + +fragment SCIENTIFIC_NUMBER + : NUMERAL ('e' [-+]? NUMERAL)? + ; + +NUMBER + : NUMERAL + | SCIENTIFIC_NUMBER; + +STRING + : '\'' (~('\'' | '\\') | '\\' .)* '\'' + | '"' (~('"' | '\\') | '\\' .)* '"' + ; + +// Binary operators + +ADD: '+'; +SUB: '-'; +MULT: '*'; +DIV: '/'; +MOD: '%'; +POW: '^'; + +AND: 'and'; +OR: 'or'; +UNLESS: 'unless'; + +// Comparison operators + +EQ: '='; +DEQ: '=='; +NE: '!='; +GT: '>'; +LT: '<'; +GE: '>='; +LE: '<='; +RE: '=~'; +NRE: '!~'; + +// Aggregation modifiers + +BY: 'by'; +WITHOUT: 'without'; + +// Join modifiers + +ON: 'on'; +IGNORING: 'ignoring'; +GROUP_LEFT: 'group_left'; +GROUP_RIGHT: 'group_right'; + +OFFSET: 'offset'; + +BOOL: 'bool'; + +AGGREGATION_OPERATOR + : 'sum' + | 'min' + | 'max' + | 'avg' + | 'group' + | 'stddev' + | 'stdvar' + | 'count' + | 'count_values' + | 'bottomk' + | 'topk' + | 'quantile' + ; + +FUNCTION + : 'abs' + | 'absent' + | 'absent_over_time' + | 'ceil' + | 'changes' + | 'clamp_max' + | 'clamp_min' + | 'day_of_month' + | 'day_of_week' + | 'days_in_month' + | 'delta' + | 'deriv' + | 'exp' + | 'floor' + | 'histogram_quantile' + | 'holt_winters' + | 'hour' + | 'idelta' + | 'increase' + | 'irate' + | 'label_join' + | 'label_replace' + | 'ln' + | 'log2' + | 'log10' + | 'minute' + | 'month' + | 'predict_linear' + | 'rate' + | 'resets' + | 'round' + | 'scalar' + | 'sort' + | 'sort_desc' + | 'sqrt' + | 'time' + | 'timestamp' + | 'vector' + | 'year' + | 'avg_over_time' + | 'min_over_time' + | 'max_over_time' + | 'sum_over_time' + | 'count_over_time' + | 'quantile_over_time' + | 'stddev_over_time' + | 'stdvar_over_time' + | 'last_over_time' + | 'acos' + | 'acosh' + | 'asin' + | 'asinh' + | 'atan' + | 'atanh' + | 'cos' + | 'cosh' + | 'sin' + | 'sinh' + | 'tan' + | 'tanh' + | 'deg' + | 'pi' + | 'rad' + ; + +LEFT_BRACE: '{'; +RIGHT_BRACE: '}'; + +LEFT_PAREN: '('; +RIGHT_PAREN: ')'; + +LEFT_BRACKET: '['; +RIGHT_BRACKET: ']'; + +COMMA: ','; + +AT: '@'; + +SUBQUERY_RANGE + : LEFT_BRACKET DURATION ':' DURATION? RIGHT_BRACKET; + +TIME_RANGE + : LEFT_BRACKET DURATION RIGHT_BRACKET; + +// The proper order (longest to the shortest) must be validated after parsing +DURATION: ([0-9]+ ('ms' | [smhdwy]))+; + +METRIC_NAME: [a-z_:] [a-z0-9_:]*; +LABEL_NAME: [a-z_] [a-z0-9_]*; + + + +WS: [\r\t\n ]+ -> channel(HIDDEN); +SL_COMMENT + : '#' .*? '\n' -> channel(HIDDEN) + ; diff --git a/api/src/main/java/io/kafbat/ui/client/RetryingKafkaConnectClient.java b/api/src/main/java/io/kafbat/ui/client/RetryingKafkaConnectClient.java index 30b664da1..8a03c3dee 100644 --- a/api/src/main/java/io/kafbat/ui/client/RetryingKafkaConnectClient.java +++ b/api/src/main/java/io/kafbat/ui/client/RetryingKafkaConnectClient.java @@ -1,5 +1,7 @@ package io.kafbat.ui.client; +import static org.apache.commons.lang3.Strings.CI; + import com.fasterxml.jackson.annotation.JsonProperty; import io.kafbat.ui.config.ClustersProperties; import io.kafbat.ui.connect.ApiClient; @@ -22,7 +24,6 @@ import java.util.Objects; import javax.annotation.Nullable; import lombok.extern.slf4j.Slf4j; -import org.apache.commons.lang3.StringUtils; import org.springframework.http.ResponseEntity; import org.springframework.util.unit.DataSize; import org.springframework.web.client.RestClientException; @@ -58,7 +59,7 @@ private static Retry conflictCodeRetry() { if (e instanceof WebClientResponseException.InternalServerError exception) { final var errorMessage = getMessage(exception); - return StringUtils.equals(errorMessage, + return CI.equals(errorMessage, // From https://github.com/apache/kafka/blob/dfc07e0e0c6e737a56a5402644265f634402b864/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/DistributedHerder.java#L2340 "Request cannot be completed because a rebalance is expected"); } diff --git a/api/src/main/java/io/kafbat/ui/config/ClustersProperties.java b/api/src/main/java/io/kafbat/ui/config/ClustersProperties.java index b58ef1543..8d5be375a 100644 --- a/api/src/main/java/io/kafbat/ui/config/ClustersProperties.java +++ b/api/src/main/java/io/kafbat/ui/config/ClustersProperties.java @@ -1,6 +1,7 @@ package io.kafbat.ui.config; -import io.kafbat.ui.model.MetricsConfig; +import static io.kafbat.ui.model.MetricsScrapeProperties.JMX_METRICS_TYPE; + import jakarta.annotation.PostConstruct; import jakarta.validation.Valid; import jakarta.validation.constraints.NotBlank; @@ -37,6 +38,8 @@ public class ClustersProperties { PollingProperties polling = new PollingProperties(); + MetricsStorage defaultMetricsStorage = new MetricsStorage(); + CacheProperties cache = new CacheProperties(); @Data @@ -62,7 +65,7 @@ public static class Cluster { String defaultKeySerde; String defaultValueSerde; - MetricsConfigData metrics; + MetricsConfig metrics; Map properties; Map consumerProperties; Map producerProperties; @@ -84,8 +87,8 @@ public static class PollingProperties { } @Data - @ToString(exclude = "password") - public static class MetricsConfigData { + @ToString(exclude = {"password", "keystorePassword"}) + public static class MetricsConfig { String type; Integer port; Boolean ssl; @@ -93,6 +96,25 @@ public static class MetricsConfigData { String password; String keystoreLocation; String keystorePassword; + + Boolean prometheusExpose; + MetricsStorage store; + } + + @Data + public static class MetricsStorage { + PrometheusStorage prometheus; + } + + @Data + @ToString(exclude = {"pushGatewayPassword"}) + public static class PrometheusStorage { + String url; + String pushGatewayUrl; + String pushGatewayUsername; + String pushGatewayPassword; + String pushGatewayJobName; + Boolean remoteWrite; } @Data @@ -207,7 +229,7 @@ public void validateAndSetDefaults() { private void setMetricsDefaults() { for (Cluster cluster : clusters) { if (cluster.getMetrics() != null && !StringUtils.hasText(cluster.getMetrics().getType())) { - cluster.getMetrics().setType(MetricsConfig.JMX_METRICS_TYPE); + cluster.getMetrics().setType(JMX_METRICS_TYPE); } } } diff --git a/api/src/main/java/io/kafbat/ui/config/auth/AbstractAuthSecurityConfig.java b/api/src/main/java/io/kafbat/ui/config/auth/AbstractAuthSecurityConfig.java index 550afe121..5199d3b22 100644 --- a/api/src/main/java/io/kafbat/ui/config/auth/AbstractAuthSecurityConfig.java +++ b/api/src/main/java/io/kafbat/ui/config/auth/AbstractAuthSecurityConfig.java @@ -26,6 +26,7 @@ protected AbstractAuthSecurityConfig() { "/static/**", "/resources/**", + "/metrics", /* ACTUATOR */ "/actuator/health/**", diff --git a/api/src/main/java/io/kafbat/ui/controller/GraphsController.java b/api/src/main/java/io/kafbat/ui/controller/GraphsController.java new file mode 100644 index 000000000..02a5421d4 --- /dev/null +++ b/api/src/main/java/io/kafbat/ui/controller/GraphsController.java @@ -0,0 +1,80 @@ +package io.kafbat.ui.controller; + +import io.kafbat.ui.api.GraphsApi; +import io.kafbat.ui.model.GraphDataRequestDTO; +import io.kafbat.ui.model.GraphDescriptionDTO; +import io.kafbat.ui.model.GraphDescriptionsDTO; +import io.kafbat.ui.model.GraphParameterDTO; +import io.kafbat.ui.model.PrometheusApiQueryResponseDTO; +import io.kafbat.ui.model.rbac.AccessContext; +import io.kafbat.ui.prometheus.model.QueryResponse; +import io.kafbat.ui.service.graphs.GraphDescription; +import io.kafbat.ui.service.graphs.GraphsService; +import java.time.Duration; +import java.time.OffsetDateTime; +import java.util.Optional; +import lombok.RequiredArgsConstructor; +import org.mapstruct.Mapper; +import org.mapstruct.factory.Mappers; +import org.springframework.http.ResponseEntity; +import org.springframework.web.bind.annotation.RestController; +import org.springframework.web.server.ServerWebExchange; +import reactor.core.publisher.Mono; + +@RestController +@RequiredArgsConstructor +public class GraphsController extends AbstractController implements GraphsApi { + + private static final PrometheusApiMapper MAPPER = Mappers.getMapper(PrometheusApiMapper.class); + + @Mapper + interface PrometheusApiMapper { + PrometheusApiQueryResponseDTO fromClientResponse(QueryResponse resp); + } + + private final GraphsService graphsService; + + @Override + public Mono> getGraphData(String clusterName, + Mono graphDataRequestDto, + ServerWebExchange exchange) { + var context = AccessContext.builder() + .cluster(clusterName) + .operationName("getGraphData") + .build(); + + return accessControlService.validateAccess(context) + .then( + graphDataRequestDto.flatMap(req -> + graphsService.getGraphData( + getCluster(clusterName), + req.getId(), + Optional.ofNullable(req.getFrom()).map(OffsetDateTime::toInstant).orElse(null), + Optional.ofNullable(req.getTo()).map(OffsetDateTime::toInstant).orElse(null), + req.getParameters() + ).map(MAPPER::fromClientResponse)) + .map(ResponseEntity::ok) + ).doOnEach(sig -> auditService.audit(context, sig)); + } + + @Override + public Mono> getGraphsList(String clusterName, + ServerWebExchange exchange) { + var context = AccessContext.builder() + .cluster(clusterName) + .operationName("getGraphsList") + .build(); + + var graphs = graphsService.getGraphs(getCluster(clusterName)); + return accessControlService.validateAccess(context).then( + Mono.just(ResponseEntity.ok(new GraphDescriptionsDTO().graphs(graphs.map(this::map).toList())))); + } + + private GraphDescriptionDTO map(GraphDescription graph) { + return new GraphDescriptionDTO() + .id(graph.id()) + .defaultPeriod(Optional.ofNullable(graph.defaultInterval()).map(Duration::toString).orElse(null)) + .type(graph.isRange() ? GraphDescriptionDTO.TypeEnum.RANGE : GraphDescriptionDTO.TypeEnum.INSTANT) + .parameters(graph.params().stream().map(GraphParameterDTO::new).toList()); + } +} diff --git a/api/src/main/java/io/kafbat/ui/controller/MessagesController.java b/api/src/main/java/io/kafbat/ui/controller/MessagesController.java index b99fa2aba..6f6b3e455 100644 --- a/api/src/main/java/io/kafbat/ui/controller/MessagesController.java +++ b/api/src/main/java/io/kafbat/ui/controller/MessagesController.java @@ -75,7 +75,7 @@ public Mono> executeSmartFilte .map(ResponseEntity::ok); } - @Deprecated + @Deprecated(forRemoval = true, since = "1.1.0") @Override public Mono>> getTopicMessages(String clusterName, String topicName, diff --git a/api/src/main/java/io/kafbat/ui/controller/PrometheusExposeController.java b/api/src/main/java/io/kafbat/ui/controller/PrometheusExposeController.java new file mode 100644 index 000000000..26bceb8e0 --- /dev/null +++ b/api/src/main/java/io/kafbat/ui/controller/PrometheusExposeController.java @@ -0,0 +1,47 @@ +package io.kafbat.ui.controller; + +import io.kafbat.ui.api.PrometheusExposeApi; +import io.kafbat.ui.model.KafkaCluster; +import io.kafbat.ui.service.StatisticsCache; +import io.kafbat.ui.service.metrics.prometheus.PrometheusMetricsExposer; +import java.util.Map; +import java.util.Optional; +import java.util.stream.Collectors; +import lombok.RequiredArgsConstructor; +import org.springframework.http.ResponseEntity; +import org.springframework.web.bind.annotation.RestController; +import org.springframework.web.server.ServerWebExchange; +import reactor.core.publisher.Mono; + +@RestController +@RequiredArgsConstructor +public class PrometheusExposeController extends AbstractController implements PrometheusExposeApi { + + private final StatisticsCache statisticsCache; + + @Override + public Mono> exposeAllMetrics(ServerWebExchange exchange) { + return Mono.just( + PrometheusMetricsExposer.exposeAllMetrics( + clustersStorage.getKafkaClusters() + .stream() + .filter(KafkaCluster::isExposeMetricsViaPrometheusEndpoint) + .collect(Collectors.toMap(KafkaCluster::getName, c -> statisticsCache.get(c).getMetrics())) + ) + ); + } + + @Override + public Mono> exposeClusterMetrics(String clusterName, + ServerWebExchange exchange) { + Optional cluster = clustersStorage.getClusterByName(clusterName); + if (cluster.isPresent() && cluster.get().isExposeMetricsViaPrometheusEndpoint()) { + return Mono.just(PrometheusMetricsExposer.exposeAllMetrics( + Map.of(clusterName, statisticsCache.get(cluster.get()).getMetrics()) + )); + } else { + return Mono.just(ResponseEntity.notFound().build()); + } + } + +} diff --git a/api/src/main/java/io/kafbat/ui/controller/SchemasController.java b/api/src/main/java/io/kafbat/ui/controller/SchemasController.java index 6f73d3525..a34110031 100644 --- a/api/src/main/java/io/kafbat/ui/controller/SchemasController.java +++ b/api/src/main/java/io/kafbat/ui/controller/SchemasController.java @@ -1,5 +1,7 @@ package io.kafbat.ui.controller; +import static org.apache.commons.lang3.Strings.CI; + import io.kafbat.ui.api.SchemasApi; import io.kafbat.ui.exception.ValidationException; import io.kafbat.ui.mapper.KafkaSrMapper; @@ -222,7 +224,7 @@ public Mono> getSchemas(String cluster int subjectToSkip = ((pageNum != null && pageNum > 0 ? pageNum : 1) - 1) * pageSize; List filteredSubjects = subjects .stream() - .filter(subj -> search == null || StringUtils.containsIgnoreCase(subj, search)) + .filter(subj -> search == null || CI.contains(subj, search)) .sorted().toList(); var totalPages = (filteredSubjects.size() / pageSize) + (filteredSubjects.size() % pageSize == 0 ? 0 : 1); diff --git a/api/src/main/java/io/kafbat/ui/controller/TopicsController.java b/api/src/main/java/io/kafbat/ui/controller/TopicsController.java index 208ca59cb..b72a4e395 100644 --- a/api/src/main/java/io/kafbat/ui/controller/TopicsController.java +++ b/api/src/main/java/io/kafbat/ui/controller/TopicsController.java @@ -7,6 +7,7 @@ import static io.kafbat.ui.model.rbac.permission.TopicAction.EDIT; import static io.kafbat.ui.model.rbac.permission.TopicAction.VIEW; import static java.util.stream.Collectors.toList; +import static org.apache.commons.lang3.Strings.CI; import io.kafbat.ui.api.TopicsApi; import io.kafbat.ui.mapper.ClusterMapper; @@ -190,7 +191,7 @@ public Mono> getTopics(String clusterName, List filtered = topics.stream() .filter(topic -> !topic.isInternal() || showInternal != null && showInternal) - .filter(topic -> search == null || StringUtils.containsIgnoreCase(topic.getName(), search)) + .filter(topic -> search == null || CI.contains(topic.getName(), search)) .sorted(comparator) .toList(); var totalPages = (filtered.size() / pageSize) diff --git a/api/src/main/java/io/kafbat/ui/emitter/MessageFilters.java b/api/src/main/java/io/kafbat/ui/emitter/MessageFilters.java index fbbc84ab3..1500a7696 100644 --- a/api/src/main/java/io/kafbat/ui/emitter/MessageFilters.java +++ b/api/src/main/java/io/kafbat/ui/emitter/MessageFilters.java @@ -1,6 +1,7 @@ package io.kafbat.ui.emitter; import static java.util.Collections.emptyMap; +import static org.apache.commons.lang3.Strings.CS; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.core.type.TypeReference; @@ -35,7 +36,6 @@ import javax.annotation.Nullable; import lombok.experimental.UtilityClass; import lombok.extern.slf4j.Slf4j; -import org.apache.commons.lang3.StringUtils; @Slf4j @UtilityClass @@ -55,8 +55,8 @@ public static Predicate noop() { } public static Predicate containsStringFilter(String string) { - return msg -> StringUtils.contains(msg.getKey(), string) - || StringUtils.contains(msg.getValue(), string) || headersContains(msg, string); + return msg -> CS.contains(msg.getKey(), string) + || CS.contains(msg.getValue(), string) || headersContains(msg, string); } private static boolean headersContains(TopicMessageDTO msg, String searchString) { @@ -67,7 +67,7 @@ private static boolean headersContains(TopicMessageDTO msg, String searchString) } for (final var entry : headers.entrySet()) { - if (StringUtils.contains(entry.getKey(), searchString) || StringUtils.contains(entry.getValue(), searchString)) { + if (CS.contains(entry.getKey(), searchString) || CS.contains(entry.getValue(), searchString)) { return true; } } diff --git a/api/src/main/java/io/kafbat/ui/emitter/OffsetsInfo.java b/api/src/main/java/io/kafbat/ui/emitter/OffsetsInfo.java index 7f34e1708..1d0634c42 100644 --- a/api/src/main/java/io/kafbat/ui/emitter/OffsetsInfo.java +++ b/api/src/main/java/io/kafbat/ui/emitter/OffsetsInfo.java @@ -83,7 +83,7 @@ boolean assignedPartitionsFullyPolled() { long summaryOffsetsRange() { MutableLong cnt = new MutableLong(); nonEmptyPartitions.forEach(tp -> cnt.add(endOffsets.get(tp) - beginOffsets.get(tp))); - return cnt.getValue(); + return cnt.get().longValue(); } public Set allTargetPartitions() { diff --git a/api/src/main/java/io/kafbat/ui/emitter/SeekOperations.java b/api/src/main/java/io/kafbat/ui/emitter/SeekOperations.java index 87f29102c..b9135b495 100644 --- a/api/src/main/java/io/kafbat/ui/emitter/SeekOperations.java +++ b/api/src/main/java/io/kafbat/ui/emitter/SeekOperations.java @@ -55,7 +55,7 @@ public long summaryOffsetsRange() { public long offsetsProcessedFromSeek() { MutableLong count = new MutableLong(); offsetsForSeek.forEach((tp, initialOffset) -> count.add(consumer.position(tp) - initialOffset)); - return count.getValue(); + return count.get().longValue(); } // Get offsets to seek to. NOTE: offsets do not contain empty partitions offsets diff --git a/api/src/main/java/io/kafbat/ui/mapper/ClusterMapper.java b/api/src/main/java/io/kafbat/ui/mapper/ClusterMapper.java index 53e624528..28793be33 100644 --- a/api/src/main/java/io/kafbat/ui/mapper/ClusterMapper.java +++ b/api/src/main/java/io/kafbat/ui/mapper/ClusterMapper.java @@ -1,8 +1,12 @@ package io.kafbat.ui.mapper; +import static io.kafbat.ui.util.MetricsUtils.readPointValue; +import static java.util.stream.Collectors.toList; +import static java.util.stream.Collectors.toMap; + +import io.kafbat.ui.config.ClustersProperties; import io.kafbat.ui.model.BrokerConfigDTO; import io.kafbat.ui.model.BrokerDTO; -import io.kafbat.ui.model.BrokerDiskUsageDTO; import io.kafbat.ui.model.BrokerMetricsDTO; import io.kafbat.ui.model.ClusterDTO; import io.kafbat.ui.model.ClusterFeature; @@ -10,9 +14,9 @@ import io.kafbat.ui.model.ClusterStatsDTO; import io.kafbat.ui.model.ConfigSourceDTO; import io.kafbat.ui.model.ConfigSynonymDTO; +import io.kafbat.ui.model.ConnectDTO; import io.kafbat.ui.model.InternalBroker; import io.kafbat.ui.model.InternalBrokerConfig; -import io.kafbat.ui.model.InternalBrokerDiskUsage; import io.kafbat.ui.model.InternalClusterState; import io.kafbat.ui.model.InternalPartition; import io.kafbat.ui.model.InternalReplica; @@ -29,9 +33,13 @@ import io.kafbat.ui.model.TopicDTO; import io.kafbat.ui.model.TopicDetailsDTO; import io.kafbat.ui.model.TopicProducerStateDTO; -import io.kafbat.ui.service.metrics.RawMetric; +import io.kafbat.ui.service.metrics.SummarizedMetrics; +import io.prometheus.metrics.model.snapshots.Label; +import io.prometheus.metrics.model.snapshots.MetricSnapshot; +import java.math.BigDecimal; import java.util.List; import java.util.Map; +import java.util.stream.Stream; import org.apache.kafka.clients.admin.ConfigEntry; import org.apache.kafka.clients.admin.ProducerState; import org.apache.kafka.common.acl.AccessControlEntry; @@ -55,19 +63,24 @@ public interface ClusterMapper { default ClusterMetricsDTO toClusterMetrics(Metrics metrics) { return new ClusterMetricsDTO() - .items(metrics.getSummarizedMetrics().map(this::convert).toList()); + .items(convert(new SummarizedMetrics(metrics).asStream()).toList()); } - private MetricDTO convert(RawMetric rawMetric) { - return new MetricDTO() - .name(rawMetric.name()) - .labels(rawMetric.labels()) - .value(rawMetric.value()); + private Stream convert(Stream metrics) { + return metrics + .flatMap(m -> + m.getDataPoints().stream() + .map(p -> + new MetricDTO() + .name(m.getMetadata().getName()) + .labels(p.getLabels().stream().collect(toMap(Label::getName, Label::getValue))) + .value(BigDecimal.valueOf(readPointValue(p))) + ) + ); } - default BrokerMetricsDTO toBrokerMetrics(List metrics) { - return new BrokerMetricsDTO() - .metrics(metrics.stream().map(this::convert).toList()); + default BrokerMetricsDTO toBrokerMetrics(List metrics) { + return new BrokerMetricsDTO().metrics(convert(metrics.stream()).toList()); } @Mapping(target = "isSensitive", source = "sensitive") @@ -105,19 +118,14 @@ default ConfigSynonymDTO toConfigSynonym(ConfigEntry.ConfigSynonym config) { ReplicaDTO toReplica(InternalReplica replica); + ConnectDTO toKafkaConnect(ClustersProperties.ConnectCluster connect); + List toFeaturesEnum(List features); default List map(Map map) { - return map.values().stream().map(this::toPartition).toList(); + return map.values().stream().map(this::toPartition).collect(toList()); } - default BrokerDiskUsageDTO map(Integer id, InternalBrokerDiskUsage internalBrokerDiskUsage) { - final BrokerDiskUsageDTO brokerDiskUsage = new BrokerDiskUsageDTO(); - brokerDiskUsage.setBrokerId(id); - brokerDiskUsage.segmentCount((int) internalBrokerDiskUsage.getSegmentCount()); - brokerDiskUsage.segmentSize(internalBrokerDiskUsage.getSegmentSize()); - return brokerDiskUsage; - } default TopicProducerStateDTO map(int partition, ProducerState state) { return new TopicProducerStateDTO() diff --git a/api/src/main/java/io/kafbat/ui/mapper/DescribeLogDirsMapper.java b/api/src/main/java/io/kafbat/ui/mapper/DescribeLogDirsMapper.java index 3831169b1..dac046703 100644 --- a/api/src/main/java/io/kafbat/ui/mapper/DescribeLogDirsMapper.java +++ b/api/src/main/java/io/kafbat/ui/mapper/DescribeLogDirsMapper.java @@ -42,8 +42,10 @@ private BrokersLogdirsDTO toBrokerLogDirs(Integer broker, String dirName, return result; } - private BrokerTopicLogdirsDTO toTopicLogDirs(Integer broker, String name, - List> partitions) { + private BrokerTopicLogdirsDTO toTopicLogDirs( + Integer broker, String name, + List> partitions) { + BrokerTopicLogdirsDTO topic = new BrokerTopicLogdirsDTO(); topic.setName(name); topic.setPartitions( diff --git a/api/src/main/java/io/kafbat/ui/mapper/DynamicConfigMapper.java b/api/src/main/java/io/kafbat/ui/mapper/DynamicConfigMapper.java index 59861d49e..ebe465660 100644 --- a/api/src/main/java/io/kafbat/ui/mapper/DynamicConfigMapper.java +++ b/api/src/main/java/io/kafbat/ui/mapper/DynamicConfigMapper.java @@ -1,15 +1,18 @@ package io.kafbat.ui.mapper; +import io.kafbat.ui.config.ClustersProperties; import io.kafbat.ui.model.ActionDTO; import io.kafbat.ui.model.ApplicationConfigPropertiesAuthOauth2ResourceServerDTO; import io.kafbat.ui.model.ApplicationConfigPropertiesAuthOauth2ResourceServerJwtDTO; import io.kafbat.ui.model.ApplicationConfigPropertiesAuthOauth2ResourceServerOpaquetokenDTO; import io.kafbat.ui.model.ApplicationConfigPropertiesDTO; +import io.kafbat.ui.model.ApplicationConfigPropertiesKafkaClustersInnerDTO; import io.kafbat.ui.model.ApplicationConfigPropertiesRbacRolesInnerPermissionsInnerDTO; import io.kafbat.ui.model.rbac.Permission; import io.kafbat.ui.util.DynamicConfigOperations; import java.util.Optional; import org.mapstruct.Mapper; +import org.mapstruct.Mapping; import org.springframework.boot.autoconfigure.security.oauth2.resource.OAuth2ResourceServerProperties; import org.springframework.core.io.FileSystemResource; import org.springframework.core.io.Resource; @@ -19,12 +22,16 @@ public interface DynamicConfigMapper { DynamicConfigOperations.PropertiesStructure fromDto(ApplicationConfigPropertiesDTO dto); + @Mapping(target = "kafka.clusters[].metrics.store", ignore = true) ApplicationConfigPropertiesDTO toDto(DynamicConfigOperations.PropertiesStructure propertiesStructure); default String map(Resource resource) { return resource.getFilename(); } + @Mapping(source = "metrics.store", target = "metrics.store", ignore = true) + ApplicationConfigPropertiesKafkaClustersInnerDTO map(ClustersProperties.Cluster cluster); + default Permission map(ApplicationConfigPropertiesRbacRolesInnerPermissionsInnerDTO perm) { Permission permission = new Permission(); permission.setResource(perm.getResource().getValue()); diff --git a/api/src/main/java/io/kafbat/ui/model/ClusterFeature.java b/api/src/main/java/io/kafbat/ui/model/ClusterFeature.java index 6a88534e0..bdd66ea04 100644 --- a/api/src/main/java/io/kafbat/ui/model/ClusterFeature.java +++ b/api/src/main/java/io/kafbat/ui/model/ClusterFeature.java @@ -7,5 +7,6 @@ public enum ClusterFeature { TOPIC_DELETION, KAFKA_ACL_VIEW, KAFKA_ACL_EDIT, - CLIENT_QUOTA_MANAGEMENT + CLIENT_QUOTA_MANAGEMENT, + GRAPHS_ENABLED } diff --git a/api/src/main/java/io/kafbat/ui/model/InternalBroker.java b/api/src/main/java/io/kafbat/ui/model/InternalBroker.java index fd203c70f..e90cd5f7c 100644 --- a/api/src/main/java/io/kafbat/ui/model/InternalBroker.java +++ b/api/src/main/java/io/kafbat/ui/model/InternalBroker.java @@ -21,12 +21,12 @@ public class InternalBroker { public InternalBroker(Node node, PartitionDistributionStats partitionDistribution, - Statistics statistics) { + Metrics metrics) { this.id = node.id(); this.host = node.host(); this.port = node.port(); - this.bytesInPerSec = statistics.getMetrics().getBrokerBytesInPerSec().get(node.id()); - this.bytesOutPerSec = statistics.getMetrics().getBrokerBytesOutPerSec().get(node.id()); + this.bytesInPerSec = metrics.getIoRates().brokerBytesInPerSec().get(node.id()); + this.bytesOutPerSec = metrics.getIoRates().brokerBytesOutPerSec().get(node.id()); this.partitionsLeader = partitionDistribution.getPartitionLeaders().get(node); this.partitions = partitionDistribution.getPartitionsCount().get(node); this.inSyncPartitions = partitionDistribution.getInSyncPartitions().get(node); diff --git a/api/src/main/java/io/kafbat/ui/model/InternalBrokerDiskUsage.java b/api/src/main/java/io/kafbat/ui/model/InternalBrokerDiskUsage.java deleted file mode 100644 index db48fab08..000000000 --- a/api/src/main/java/io/kafbat/ui/model/InternalBrokerDiskUsage.java +++ /dev/null @@ -1,11 +0,0 @@ -package io.kafbat.ui.model; - -import lombok.Builder; -import lombok.Data; - -@Data -@Builder(toBuilder = true) -public class InternalBrokerDiskUsage { - private final long segmentCount; - private final long segmentSize; -} diff --git a/api/src/main/java/io/kafbat/ui/model/InternalClusterState.java b/api/src/main/java/io/kafbat/ui/model/InternalClusterState.java index 5f3c1f308..1c50fdad7 100644 --- a/api/src/main/java/io/kafbat/ui/model/InternalClusterState.java +++ b/api/src/main/java/io/kafbat/ui/model/InternalClusterState.java @@ -36,39 +36,42 @@ public InternalClusterState(KafkaCluster cluster, Statistics statistics) { .message(e.getMessage()) .stackTrace(Throwables.getStackTraceAsString(e))) .orElse(null); - topicCount = statistics.getTopicDescriptions().size(); + topicCount = (int) statistics.topicDescriptions().count(); brokerCount = statistics.getClusterDescription().getNodes().size(); activeControllers = Optional.ofNullable(statistics.getClusterDescription().getController()) .map(Node::id) .orElse(null); version = statistics.getVersion(); - if (statistics.getLogDirInfo() != null) { - diskUsage = statistics.getLogDirInfo().getBrokerStats().entrySet().stream() - .map(e -> new BrokerDiskUsageDTO() - .brokerId(e.getKey()) - .segmentSize(e.getValue().getSegmentSize()) - .segmentCount(e.getValue().getSegmentsCount())) - .collect(Collectors.toList()); - } + diskUsage = statistics.getClusterState().getNodesStates().values().stream() + .filter(n -> n.segmentStats() != null) + .map(n -> new BrokerDiskUsageDTO() + .brokerId(n.id()) + .segmentSize(n.segmentStats().getSegmentSize()) + .segmentCount(n.segmentStats().getSegmentsCount())) + .collect(Collectors.toList()); features = statistics.getFeatures(); bytesInPerSec = statistics .getMetrics() - .getBrokerBytesInPerSec() - .values().stream() + .getIoRates() + .brokerBytesInPerSec() + .values() + .stream() .reduce(BigDecimal::add) .orElse(null); bytesOutPerSec = statistics .getMetrics() - .getBrokerBytesOutPerSec() - .values().stream() + .getIoRates() + .brokerBytesOutPerSec() + .values() + .stream() .reduce(BigDecimal::add) .orElse(null); - var partitionsStats = new PartitionsStats(statistics.getTopicDescriptions().values()); + var partitionsStats = new PartitionsStats(statistics.topicDescriptions().toList()); onlinePartitionCount = partitionsStats.getOnlinePartitionCount(); offlinePartitionCount = partitionsStats.getOfflinePartitionCount(); inSyncReplicasCount = partitionsStats.getInSyncReplicasCount(); diff --git a/api/src/main/java/io/kafbat/ui/model/InternalLogDirStats.java b/api/src/main/java/io/kafbat/ui/model/InternalLogDirStats.java index 09cc56dcf..caa47bc2b 100644 --- a/api/src/main/java/io/kafbat/ui/model/InternalLogDirStats.java +++ b/api/src/main/java/io/kafbat/ui/model/InternalLogDirStats.java @@ -4,13 +4,15 @@ import static java.util.stream.Collectors.groupingBy; import static java.util.stream.Collectors.summarizingLong; +import jakarta.annotation.Nullable; +import java.util.HashMap; import java.util.List; import java.util.LongSummaryStatistics; import java.util.Map; +import lombok.RequiredArgsConstructor; import lombok.Value; import org.apache.kafka.clients.admin.LogDirDescription; import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.requests.DescribeLogDirsResponse; import reactor.util.function.Tuple2; import reactor.util.function.Tuple3; import reactor.util.function.Tuples; @@ -19,27 +21,34 @@ public class InternalLogDirStats { @Value + @RequiredArgsConstructor public static class SegmentStats { - long segmentSize; - int segmentsCount; + Long segmentSize; + Integer segmentsCount; - public SegmentStats(LongSummaryStatistics s) { - segmentSize = s.getSum(); - segmentsCount = (int) s.getCount(); + private SegmentStats(LongSummaryStatistics s) { + this(s.getSum(), (int) s.getCount()); } } + public record LogDirSpaceStats(@Nullable Long totalBytes, + @Nullable Long usableBytes, + Map totalPerDir, + Map usablePerDir) { + } + Map partitionsStats; Map topicStats; Map brokerStats; + Map brokerDirsStats; public static InternalLogDirStats empty() { return new InternalLogDirStats(Map.of()); } - public InternalLogDirStats(Map> log) { + public InternalLogDirStats(Map> logsInfo) { final List> topicPartitions = - log.entrySet().stream().flatMap(b -> + logsInfo.entrySet().stream().flatMap(b -> b.getValue().entrySet().stream().flatMap(topicMap -> topicMap.getValue().replicaInfos().entrySet().stream() .map(e -> Tuples.of(b.getKey(), e.getKey(), e.getValue().size())) @@ -64,5 +73,34 @@ public InternalLogDirStats(Map> log) { Tuple2::getT1, collectingAndThen( summarizingLong(Tuple3::getT3), SegmentStats::new))); + + brokerDirsStats = calculateSpaceStats(logsInfo); + } + + private static Map calculateSpaceStats( + Map> logsInfo) { + + var stats = new HashMap(); + logsInfo.forEach((brokerId, logDirStats) -> { + Map totalBytes = new HashMap<>(); + Map usableBytes = new HashMap<>(); + logDirStats.forEach((logDir, descr) -> { + if (descr.error() != null) { + return; + } + descr.totalBytes().ifPresent(b -> totalBytes.merge(logDir, b, Long::sum)); + descr.usableBytes().ifPresent(b -> usableBytes.merge(logDir, b, Long::sum)); + }); + stats.put( + brokerId, + new LogDirSpaceStats( + totalBytes.isEmpty() ? null : totalBytes.values().stream().mapToLong(i -> i).sum(), + usableBytes.isEmpty() ? null : usableBytes.values().stream().mapToLong(i -> i).sum(), + totalBytes, + usableBytes + ) + ); + }); + return stats; } } diff --git a/api/src/main/java/io/kafbat/ui/model/InternalPartitionsOffsets.java b/api/src/main/java/io/kafbat/ui/model/InternalPartitionsOffsets.java index b4febb56d..57a9c3b82 100644 --- a/api/src/main/java/io/kafbat/ui/model/InternalPartitionsOffsets.java +++ b/api/src/main/java/io/kafbat/ui/model/InternalPartitionsOffsets.java @@ -4,6 +4,7 @@ import com.google.common.collect.Table; import java.util.Map; import java.util.Optional; +import java.util.stream.Collectors; import lombok.Value; import org.apache.kafka.common.TopicPartition; @@ -29,4 +30,11 @@ public Optional get(String topic, int partition) { return Optional.ofNullable(offsets.get(topic, partition)); } + public Map topicOffsets(String topic, boolean earliest) { + return offsets.row(topic) + .entrySet() + .stream() + .collect(Collectors.toMap(Map.Entry::getKey, e -> earliest ? e.getValue().earliest : e.getValue().getLatest())); + } + } diff --git a/api/src/main/java/io/kafbat/ui/model/InternalTopic.java b/api/src/main/java/io/kafbat/ui/model/InternalTopic.java index 6aa3a0a1a..1e8c31b5b 100644 --- a/api/src/main/java/io/kafbat/ui/model/InternalTopic.java +++ b/api/src/main/java/io/kafbat/ui/model/InternalTopic.java @@ -3,6 +3,7 @@ import java.math.BigDecimal; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.stream.Collectors; import javax.annotation.Nullable; import lombok.Builder; @@ -41,7 +42,8 @@ public static InternalTopic from(TopicDescription topicDescription, List configs, InternalPartitionsOffsets partitionsOffsets, Metrics metrics, - InternalLogDirStats logDirInfo, + @Nullable InternalLogDirStats.SegmentStats segmentStats, + @Nullable Map partitionsSegmentStats, @Nullable String internalTopicPrefix) { var topic = InternalTopic.builder(); @@ -78,13 +80,13 @@ public static InternalTopic from(TopicDescription topicDescription, partitionDto.offsetMax(offsets.getLatest()); }); - var segmentStats = - logDirInfo.getPartitionsStats().get( - new TopicPartition(topicDescription.name(), partition.partition())); - if (segmentStats != null) { - partitionDto.segmentCount(segmentStats.getSegmentsCount()); - partitionDto.segmentSize(segmentStats.getSegmentSize()); - } + Optional.ofNullable(partitionsSegmentStats) + .flatMap(s -> Optional.ofNullable(s.get(partition.partition()))) + .ifPresent(stats -> { + partitionDto.segmentCount(stats.getSegmentsCount()); + partitionDto.segmentSize(stats.getSegmentSize()); + }); + return partitionDto.build(); }) @@ -105,14 +107,14 @@ public static InternalTopic from(TopicDescription topicDescription, : topicDescription.partitions().get(0).replicas().size() ); - var segmentStats = logDirInfo.getTopicStats().get(topicDescription.name()); - if (segmentStats != null) { - topic.segmentCount(segmentStats.getSegmentsCount()); - topic.segmentSize(segmentStats.getSegmentSize()); - } + Optional.ofNullable(segmentStats) + .ifPresent(stats -> { + topic.segmentCount(stats.getSegmentsCount()); + topic.segmentSize(stats.getSegmentSize()); + }); - topic.bytesInPerSec(metrics.getTopicBytesInPerSec().get(topicDescription.name())); - topic.bytesOutPerSec(metrics.getTopicBytesOutPerSec().get(topicDescription.name())); + topic.bytesInPerSec(metrics.getIoRates().topicBytesInPerSec().get(topicDescription.name())); + topic.bytesOutPerSec(metrics.getIoRates().topicBytesOutPerSec().get(topicDescription.name())); topic.topicConfigs( configs.stream().map(InternalTopicConfig::from).collect(Collectors.toList())); diff --git a/api/src/main/java/io/kafbat/ui/model/KafkaCluster.java b/api/src/main/java/io/kafbat/ui/model/KafkaCluster.java index 6e2a00988..8fad56a49 100644 --- a/api/src/main/java/io/kafbat/ui/model/KafkaCluster.java +++ b/api/src/main/java/io/kafbat/ui/model/KafkaCluster.java @@ -3,8 +3,10 @@ import io.kafbat.ui.config.ClustersProperties; import io.kafbat.ui.connect.api.KafkaConnectClientApi; import io.kafbat.ui.emitter.PollingSettings; +import io.kafbat.ui.prometheus.api.PrometheusClientApi; import io.kafbat.ui.service.ksql.KsqlApiClient; import io.kafbat.ui.service.masking.DataMasking; +import io.kafbat.ui.service.metrics.scrape.MetricsScraper; import io.kafbat.ui.sr.api.KafkaSrClientApi; import io.kafbat.ui.util.ReactiveFailover; import java.util.Map; @@ -27,10 +29,12 @@ public class KafkaCluster { private final Properties consumerProperties; private final Properties producerProperties; private final boolean readOnly; - private final MetricsConfig metricsConfig; + private final boolean exposeMetricsViaPrometheusEndpoint; private final DataMasking masking; private final PollingSettings pollingSettings; + private final MetricsScraper metricsScrapping; private final ReactiveFailover schemaRegistryClient; private final Map> connectsClients; private final ReactiveFailover ksqlClient; + private final ReactiveFailover prometheusStorageClient; } diff --git a/api/src/main/java/io/kafbat/ui/model/Metrics.java b/api/src/main/java/io/kafbat/ui/model/Metrics.java index bb6d2ff0c..6c55f3172 100644 --- a/api/src/main/java/io/kafbat/ui/model/Metrics.java +++ b/api/src/main/java/io/kafbat/ui/model/Metrics.java @@ -1,13 +1,10 @@ package io.kafbat.ui.model; -import static java.util.stream.Collectors.toMap; - -import io.kafbat.ui.service.metrics.RawMetric; +import io.kafbat.ui.service.metrics.scrape.inferred.InferredMetrics; +import io.prometheus.metrics.model.snapshots.MetricSnapshot; import java.math.BigDecimal; -import java.util.Collection; import java.util.List; import java.util.Map; -import java.util.stream.Stream; import lombok.Builder; import lombok.Value; @@ -15,28 +12,32 @@ @Value public class Metrics { - Map brokerBytesInPerSec; - Map brokerBytesOutPerSec; - Map topicBytesInPerSec; - Map topicBytesOutPerSec; - Map> perBrokerMetrics; + IoRates ioRates; + InferredMetrics inferredMetrics; + Map> perBrokerScrapedMetrics; public static Metrics empty() { return Metrics.builder() - .brokerBytesInPerSec(Map.of()) - .brokerBytesOutPerSec(Map.of()) - .topicBytesInPerSec(Map.of()) - .topicBytesOutPerSec(Map.of()) - .perBrokerMetrics(Map.of()) + .ioRates(IoRates.empty()) + .perBrokerScrapedMetrics(Map.of()) + .inferredMetrics(InferredMetrics.empty()) .build(); } - public Stream getSummarizedMetrics() { - return perBrokerMetrics.values().stream() - .flatMap(Collection::stream) - .collect(toMap(RawMetric::identityKey, m -> m, (m1, m2) -> m1.copyWithValue(m1.value().add(m2.value())))) - .values() - .stream(); + @Builder + public record IoRates(Map brokerBytesInPerSec, + Map brokerBytesOutPerSec, + Map topicBytesInPerSec, + Map topicBytesOutPerSec) { + + static IoRates empty() { + return IoRates.builder() + .brokerBytesOutPerSec(Map.of()) + .brokerBytesInPerSec(Map.of()) + .topicBytesOutPerSec(Map.of()) + .topicBytesInPerSec(Map.of()) + .build(); + } } } diff --git a/api/src/main/java/io/kafbat/ui/model/MetricsConfig.java b/api/src/main/java/io/kafbat/ui/model/MetricsConfig.java deleted file mode 100644 index 46d036f92..000000000 --- a/api/src/main/java/io/kafbat/ui/model/MetricsConfig.java +++ /dev/null @@ -1,22 +0,0 @@ -package io.kafbat.ui.model; - -import lombok.AccessLevel; -import lombok.AllArgsConstructor; -import lombok.Builder; -import lombok.Data; - -@Data -@Builder(toBuilder = true) -@AllArgsConstructor(access = AccessLevel.PRIVATE) -public class MetricsConfig { - public static final String JMX_METRICS_TYPE = "JMX"; - public static final String PROMETHEUS_METRICS_TYPE = "PROMETHEUS"; - - private final String type; - private final Integer port; - private final boolean ssl; - private final String username; - private final String password; - private final String keystoreLocation; - private final String keystorePassword; -} diff --git a/api/src/main/java/io/kafbat/ui/model/MetricsScrapeProperties.java b/api/src/main/java/io/kafbat/ui/model/MetricsScrapeProperties.java new file mode 100644 index 000000000..171e14e09 --- /dev/null +++ b/api/src/main/java/io/kafbat/ui/model/MetricsScrapeProperties.java @@ -0,0 +1,47 @@ +package io.kafbat.ui.model; + +import static io.kafbat.ui.config.ClustersProperties.KeystoreConfig; +import static io.kafbat.ui.config.ClustersProperties.TruststoreConfig; + +import io.kafbat.ui.config.ClustersProperties; +import jakarta.annotation.Nullable; +import java.util.Objects; +import java.util.Optional; +import lombok.AccessLevel; +import lombok.Builder; +import lombok.Value; + +@Value +@Builder +public class MetricsScrapeProperties { + public static final String JMX_METRICS_TYPE = "JMX"; + public static final String PROMETHEUS_METRICS_TYPE = "PROMETHEUS"; + + Integer port; + boolean ssl; + String username; + String password; + + @Nullable + KeystoreConfig keystoreConfig; + + @Nullable + TruststoreConfig truststoreConfig; + + public static MetricsScrapeProperties create(ClustersProperties.Cluster cluster) { + var metrics = Objects.requireNonNull(cluster.getMetrics()); + return MetricsScrapeProperties.builder() + .port(metrics.getPort()) + .ssl(Optional.ofNullable(metrics.getSsl()).orElse(false)) + .username(metrics.getUsername()) + .password(metrics.getPassword()) + .truststoreConfig(cluster.getSsl()) + .keystoreConfig( + metrics.getKeystoreLocation() != null + ? new KeystoreConfig(metrics.getKeystoreLocation(), metrics.getKeystorePassword()) + : null + ) + .build(); + } + +} diff --git a/api/src/main/java/io/kafbat/ui/model/PartitionDistributionStats.java b/api/src/main/java/io/kafbat/ui/model/PartitionDistributionStats.java index 0f44a35e3..9eb77ea80 100644 --- a/api/src/main/java/io/kafbat/ui/model/PartitionDistributionStats.java +++ b/api/src/main/java/io/kafbat/ui/model/PartitionDistributionStats.java @@ -3,6 +3,7 @@ import java.math.BigDecimal; import java.math.RoundingMode; import java.util.HashMap; +import java.util.List; import java.util.Map; import javax.annotation.Nullable; import lombok.AccessLevel; @@ -29,15 +30,19 @@ public class PartitionDistributionStats { private final boolean skewCanBeCalculated; public static PartitionDistributionStats create(Statistics stats) { - return create(stats, MIN_PARTITIONS_FOR_SKEW_CALCULATION); + return create( + stats.topicDescriptions().toList(), + MIN_PARTITIONS_FOR_SKEW_CALCULATION + ); } - static PartitionDistributionStats create(Statistics stats, int minPartitionsForSkewCalculation) { + static PartitionDistributionStats create(List topicDescriptions, + int minPartitionsForSkewCalculation) { var partitionLeaders = new HashMap(); var partitionsReplicated = new HashMap(); var isr = new HashMap(); int partitionsCnt = 0; - for (TopicDescription td : stats.getTopicDescriptions().values()) { + for (TopicDescription td : topicDescriptions) { for (TopicPartitionInfo tp : td.partitions()) { partitionsCnt++; tp.replicas().forEach(r -> incr(partitionsReplicated, r)); diff --git a/api/src/main/java/io/kafbat/ui/model/Statistics.java b/api/src/main/java/io/kafbat/ui/model/Statistics.java index 43e6a26e0..6caba634a 100644 --- a/api/src/main/java/io/kafbat/ui/model/Statistics.java +++ b/api/src/main/java/io/kafbat/ui/model/Statistics.java @@ -1,12 +1,12 @@ package io.kafbat.ui.model; import io.kafbat.ui.service.ReactiveAdminClient; +import io.kafbat.ui.service.metrics.scrape.ScrapedClusterState; import java.util.List; -import java.util.Map; -import java.util.Set; +import java.util.function.UnaryOperator; +import java.util.stream.Stream; import lombok.Builder; import lombok.Value; -import org.apache.kafka.clients.admin.ConfigEntry; import org.apache.kafka.clients.admin.TopicDescription; @Value @@ -18,21 +18,32 @@ public class Statistics { List features; ReactiveAdminClient.ClusterDescription clusterDescription; Metrics metrics; - InternalLogDirStats logDirInfo; - Map topicDescriptions; - Map> topicConfigs; + ScrapedClusterState clusterState; public static Statistics empty() { return builder() .status(ServerStatusDTO.OFFLINE) .version("Unknown") .features(List.of()) - .clusterDescription( - new ReactiveAdminClient.ClusterDescription(null, null, List.of(), Set.of())) + .clusterDescription(ReactiveAdminClient.ClusterDescription.empty()) .metrics(Metrics.empty()) - .logDirInfo(InternalLogDirStats.empty()) - .topicDescriptions(Map.of()) - .topicConfigs(Map.of()) + .clusterState(ScrapedClusterState.empty()) .build(); } + + public static Statistics statsUpdateError(Throwable th) { + return empty().toBuilder().status(ServerStatusDTO.OFFLINE).lastKafkaException(th).build(); + } + + public static Statistics initializing() { + return empty().toBuilder().status(ServerStatusDTO.INITIALIZING).build(); + } + + public Stream topicDescriptions() { + return clusterState.getTopicStates().values().stream().map(ScrapedClusterState.TopicState::description); + } + + public Statistics withClusterState(UnaryOperator stateUpdate) { + return toBuilder().clusterState(stateUpdate.apply(clusterState)).build(); + } } diff --git a/api/src/main/java/io/kafbat/ui/service/BrokerService.java b/api/src/main/java/io/kafbat/ui/service/BrokerService.java index cc1b1a1a9..c1ae5fb1d 100644 --- a/api/src/main/java/io/kafbat/ui/service/BrokerService.java +++ b/api/src/main/java/io/kafbat/ui/service/BrokerService.java @@ -11,7 +11,7 @@ import io.kafbat.ui.model.InternalBrokerConfig; import io.kafbat.ui.model.KafkaCluster; import io.kafbat.ui.model.PartitionDistributionStats; -import io.kafbat.ui.service.metrics.RawMetric; +import io.prometheus.metrics.model.snapshots.MetricSnapshot; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -27,7 +27,6 @@ import org.apache.kafka.common.errors.LogDirNotFoundException; import org.apache.kafka.common.errors.TimeoutException; import org.apache.kafka.common.errors.UnknownTopicOrPartitionException; -import org.apache.kafka.common.requests.DescribeLogDirsResponse; import org.springframework.stereotype.Service; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; @@ -72,7 +71,7 @@ public Flux getBrokers(KafkaCluster cluster) { .get(cluster) .flatMap(ReactiveAdminClient::describeCluster) .map(description -> description.getNodes().stream() - .map(node -> new InternalBroker(node, partitionsDistribution, stats)) + .map(node -> new InternalBroker(node, partitionsDistribution, stats.getMetrics())) .collect(Collectors.toList())) .flatMapMany(Flux::fromIterable); } @@ -139,8 +138,8 @@ public Flux getBrokerConfig(KafkaCluster cluster, Integer return getBrokersConfig(cluster, brokerId); } - public Mono> getBrokerMetrics(KafkaCluster cluster, Integer brokerId) { - return Mono.justOrEmpty(statisticsCache.get(cluster).getMetrics().getPerBrokerMetrics().get(brokerId)); + public Mono> getBrokerMetrics(KafkaCluster cluster, Integer brokerId) { + return Mono.justOrEmpty(statisticsCache.get(cluster).getMetrics().getPerBrokerScrapedMetrics().get(brokerId)); } } diff --git a/api/src/main/java/io/kafbat/ui/service/ConsumerGroupService.java b/api/src/main/java/io/kafbat/ui/service/ConsumerGroupService.java index 00ea5179a..5fda6d4ce 100644 --- a/api/src/main/java/io/kafbat/ui/service/ConsumerGroupService.java +++ b/api/src/main/java/io/kafbat/ui/service/ConsumerGroupService.java @@ -1,5 +1,7 @@ package io.kafbat.ui.service; +import static org.apache.commons.lang3.Strings.CI; + import com.google.common.collect.Streams; import com.google.common.collect.Table; import io.kafbat.ui.emitter.EnhancedConsumer; @@ -115,7 +117,7 @@ public Mono getConsumerGroupsPage( .map(listing -> search == null ? listing : listing.stream() - .filter(g -> StringUtils.containsIgnoreCase(g.groupId(), search)) + .filter(g -> CI.contains(g.groupId(), search)) .toList() ) .flatMapIterable(lst -> lst) diff --git a/api/src/main/java/io/kafbat/ui/service/FeatureService.java b/api/src/main/java/io/kafbat/ui/service/FeatureService.java index 59a23236b..8fb19d065 100644 --- a/api/src/main/java/io/kafbat/ui/service/FeatureService.java +++ b/api/src/main/java/io/kafbat/ui/service/FeatureService.java @@ -36,6 +36,10 @@ public Mono> getAvailableFeatures(ReactiveAdminClient admin features.add(Mono.just(ClusterFeature.KSQL_DB)); } + if (cluster.getPrometheusStorageClient() != null) { + features.add(Mono.just(ClusterFeature.GRAPHS_ENABLED)); + } + if (cluster.getSchemaRegistryClient() != null) { features.add(Mono.just(ClusterFeature.SCHEMA_REGISTRY)); } diff --git a/api/src/main/java/io/kafbat/ui/service/KafkaClusterFactory.java b/api/src/main/java/io/kafbat/ui/service/KafkaClusterFactory.java index f8c528f90..5a0c5201a 100644 --- a/api/src/main/java/io/kafbat/ui/service/KafkaClusterFactory.java +++ b/api/src/main/java/io/kafbat/ui/service/KafkaClusterFactory.java @@ -1,5 +1,11 @@ package io.kafbat.ui.service; +import static io.kafbat.ui.util.KafkaServicesValidation.validateClusterConnection; +import static io.kafbat.ui.util.KafkaServicesValidation.validateKsql; +import static io.kafbat.ui.util.KafkaServicesValidation.validatePrometheusStore; +import static io.kafbat.ui.util.KafkaServicesValidation.validateSchemaRegistry; +import static io.kafbat.ui.util.KafkaServicesValidation.validateTruststore; + import io.kafbat.ui.client.RetryingKafkaConnectClient; import io.kafbat.ui.config.ClustersProperties; import io.kafbat.ui.config.WebclientProperties; @@ -8,9 +14,11 @@ import io.kafbat.ui.model.ApplicationPropertyValidationDTO; import io.kafbat.ui.model.ClusterConfigValidationDTO; import io.kafbat.ui.model.KafkaCluster; -import io.kafbat.ui.model.MetricsConfig; +import io.kafbat.ui.prometheus.api.PrometheusClientApi; import io.kafbat.ui.service.ksql.KsqlApiClient; import io.kafbat.ui.service.masking.DataMasking; +import io.kafbat.ui.service.metrics.scrape.MetricsScraper; +import io.kafbat.ui.service.metrics.scrape.jmx.JmxMetricsRetriever; import io.kafbat.ui.sr.ApiClient; import io.kafbat.ui.sr.api.KafkaSrClientApi; import io.kafbat.ui.util.KafkaServicesValidation; @@ -23,9 +31,9 @@ import java.util.Optional; import java.util.Properties; import java.util.stream.Stream; -import javax.annotation.Nullable; import lombok.extern.slf4j.Slf4j; import org.springframework.stereotype.Service; +import org.springframework.util.StringUtils; import org.springframework.util.unit.DataSize; import org.springframework.web.reactive.function.client.WebClient; import reactor.core.publisher.Flux; @@ -42,14 +50,17 @@ public class KafkaClusterFactory { private final DataSize webClientMaxBuffSize; private final Duration responseTimeout; + private final JmxMetricsRetriever jmxMetricsRetriever; - public KafkaClusterFactory(WebclientProperties webclientProperties) { + public KafkaClusterFactory(WebclientProperties webclientProperties, + JmxMetricsRetriever jmxMetricsRetriever) { this.webClientMaxBuffSize = Optional.ofNullable(webclientProperties.getMaxInMemoryBufferSize()) .map(DataSize::parse) .orElse(DEFAULT_WEBCLIENT_BUFFER); this.responseTimeout = Optional.ofNullable(webclientProperties.getResponseTimeoutMs()) .map(Duration::ofMillis) .orElse(DEFAULT_RESPONSE_TIMEOUT); + this.jmxMetricsRetriever = jmxMetricsRetriever; } public KafkaCluster create(ClustersProperties properties, @@ -62,8 +73,10 @@ public KafkaCluster create(ClustersProperties properties, builder.consumerProperties(convertProperties(clusterProperties.getConsumerProperties())); builder.producerProperties(convertProperties(clusterProperties.getProducerProperties())); builder.readOnly(clusterProperties.isReadOnly()); + builder.exposeMetricsViaPrometheusEndpoint(exposeMetricsViaPrometheusEndpoint(clusterProperties)); builder.masking(DataMasking.create(clusterProperties.getMasking())); builder.pollingSettings(PollingSettings.create(clusterProperties, properties)); + builder.metricsScrapping(MetricsScraper.create(clusterProperties, jmxMetricsRetriever)); if (schemaRegistryConfigured(clusterProperties)) { builder.schemaRegistryClient(schemaRegistryClient(clusterProperties)); @@ -74,16 +87,25 @@ public KafkaCluster create(ClustersProperties properties, if (ksqlConfigured(clusterProperties)) { builder.ksqlClient(ksqlClient(clusterProperties)); } - if (metricsConfigured(clusterProperties)) { - builder.metricsConfig(metricsConfigDataToMetricsConfig(clusterProperties.getMetrics())); + if (prometheusStorageConfigured(properties.getDefaultMetricsStorage())) { + builder.prometheusStorageClient( + prometheusStorageClient(properties.getDefaultMetricsStorage(), clusterProperties.getSsl()) + ); + } + if (prometheusStorageConfigured(clusterProperties)) { + builder.prometheusStorageClient(prometheusStorageClient( + clusterProperties.getMetrics().getStore(), + clusterProperties.getSsl()) + ); } + builder.originalProperties(clusterProperties); return builder.build(); } public Mono validate(ClustersProperties.Cluster clusterProperties) { if (clusterProperties.getSsl() != null) { - Optional errMsg = KafkaServicesValidation.validateTruststore(clusterProperties.getSsl()); + Optional errMsg = validateTruststore(clusterProperties.getSsl()); if (errMsg.isPresent()) { return Mono.just(new ClusterConfigValidationDTO() .kafka(new ApplicationPropertyValidationDTO() @@ -93,40 +115,49 @@ public Mono validate(ClustersProperties.Cluster clus } return Mono.zip( - KafkaServicesValidation.validateClusterConnection( + validateClusterConnection( clusterProperties.getBootstrapServers(), convertProperties(clusterProperties.getProperties()), clusterProperties.getSsl() ), schemaRegistryConfigured(clusterProperties) - ? KafkaServicesValidation.validateSchemaRegistry( - () -> schemaRegistryClient(clusterProperties)).map(Optional::of) + ? validateSchemaRegistry(() -> schemaRegistryClient(clusterProperties)).map(Optional::of) : Mono.>just(Optional.empty()), ksqlConfigured(clusterProperties) - ? KafkaServicesValidation.validateKsql(() -> ksqlClient(clusterProperties)).map(Optional::of) + ? validateKsql(() -> ksqlClient(clusterProperties)).map(Optional::of) : Mono.>just(Optional.empty()), connectClientsConfigured(clusterProperties) - ? - Flux.fromIterable(clusterProperties.getKafkaConnect()) - .flatMap(c -> - KafkaServicesValidation.validateConnect(() -> connectClient(clusterProperties, c)) - .map(r -> Tuples.of(c.getName(), r))) - .collectMap(Tuple2::getT1, Tuple2::getT2) - .map(Optional::of) - : - Mono.>>just(Optional.empty()) + ? Flux.fromIterable(clusterProperties.getKafkaConnect()) + .flatMap(c -> + KafkaServicesValidation.validateConnect(() -> connectClient(clusterProperties, c)) + .map(r -> Tuples.of(c.getName(), r))) + .collectMap(Tuple2::getT1, Tuple2::getT2) + .map(Optional::of) + : Mono.>>just(Optional.empty()), + + prometheusStorageConfigured(clusterProperties) + ? validatePrometheusStore(() -> prometheusStorageClient( + clusterProperties.getMetrics().getStore(), clusterProperties.getSsl())).map(Optional::of) + : Mono.>just(Optional.empty()) ).map(tuple -> { var validation = new ClusterConfigValidationDTO(); validation.kafka(tuple.getT1()); tuple.getT2().ifPresent(validation::schemaRegistry); tuple.getT3().ifPresent(validation::ksqldb); tuple.getT4().ifPresent(validation::kafkaConnects); + tuple.getT5().ifPresent(validation::prometheusStorage); return validation; }); } + private boolean exposeMetricsViaPrometheusEndpoint(ClustersProperties.Cluster clusterProperties) { + return Optional.ofNullable(clusterProperties.getMetrics()) + .map(m -> m.getPrometheusExpose() == null || m.getPrometheusExpose()) + .orElse(true); + } + private Properties convertProperties(Map propertiesMap) { Properties properties = new Properties(); if (propertiesMap != null) { @@ -135,6 +166,35 @@ private Properties convertProperties(Map propertiesMap) { return properties; } + private ReactiveFailover prometheusStorageClient( + ClustersProperties.MetricsStorage storage, ClustersProperties.TruststoreConfig ssl) { + WebClient webClient = new WebClientConfigurator() + .configureSsl(ssl, null) + .configureBufferSize(webClientMaxBuffSize) + .build(); + return ReactiveFailover.create( + parseUrlList(storage.getPrometheus().getUrl()), + url -> new PrometheusClientApi(new io.kafbat.ui.prometheus.ApiClient(webClient).setBasePath(url)), + ReactiveFailover.CONNECTION_REFUSED_EXCEPTION_FILTER, + "No live Prometheus instances available", + ReactiveFailover.DEFAULT_RETRY_GRACE_PERIOD_MS + ); + } + + private boolean prometheusStorageConfigured(ClustersProperties.Cluster cluster) { + return Optional.ofNullable(cluster.getMetrics()) + .flatMap(m -> Optional.ofNullable(m.getStore())) + .map(this::prometheusStorageConfigured) + .orElse(false); + } + + private boolean prometheusStorageConfigured(ClustersProperties.MetricsStorage storage) { + return Optional.ofNullable(storage) + .flatMap(s -> Optional.ofNullable(s.getPrometheus())) + .map(p -> StringUtils.hasText(p.getUrl())) + .orElse(false); + } + private boolean connectClientsConfigured(ClustersProperties.Cluster clusterProperties) { return clusterProperties.getKafkaConnect() != null; } @@ -206,25 +266,4 @@ private ReactiveFailover ksqlClient(ClustersProperties.Cluster cl private List parseUrlList(String url) { return Stream.of(url.split(",")).map(String::trim).filter(s -> !s.isBlank()).toList(); } - - private boolean metricsConfigured(ClustersProperties.Cluster clusterProperties) { - return clusterProperties.getMetrics() != null; - } - - @Nullable - private MetricsConfig metricsConfigDataToMetricsConfig(ClustersProperties.MetricsConfigData metricsConfigData) { - if (metricsConfigData == null) { - return null; - } - MetricsConfig.MetricsConfigBuilder builder = MetricsConfig.builder(); - builder.type(metricsConfigData.getType()); - builder.port(metricsConfigData.getPort()); - builder.ssl(Optional.ofNullable(metricsConfigData.getSsl()).orElse(false)); - builder.username(metricsConfigData.getUsername()); - builder.password(metricsConfigData.getPassword()); - builder.keystoreLocation(metricsConfigData.getKeystoreLocation()); - builder.keystorePassword(metricsConfigData.getKeystorePassword()); - return builder.build(); - } - } diff --git a/api/src/main/java/io/kafbat/ui/service/KafkaConnectService.java b/api/src/main/java/io/kafbat/ui/service/KafkaConnectService.java index c1aefcd4f..b3e5aa058 100644 --- a/api/src/main/java/io/kafbat/ui/service/KafkaConnectService.java +++ b/api/src/main/java/io/kafbat/ui/service/KafkaConnectService.java @@ -1,5 +1,6 @@ package io.kafbat.ui.service; +import static org.apache.commons.lang3.Strings.CI; import com.github.benmanes.caffeine.cache.AsyncCache; import com.github.benmanes.caffeine.cache.Caffeine; @@ -35,7 +36,6 @@ import java.util.stream.Stream; import javax.annotation.Nullable; import lombok.extern.slf4j.Slf4j; -import org.apache.commons.lang3.StringUtils; import org.springframework.stereotype.Service; import org.springframework.web.reactive.function.client.WebClientResponseException; import reactor.core.publisher.Flux; @@ -159,7 +159,7 @@ private Predicate matchesSearchTerm(@Nullable final String return c -> true; } return connector -> getStringsForSearch(connector) - .anyMatch(string -> StringUtils.containsIgnoreCase(string, search)); + .anyMatch(string -> CI.contains(string, search)); } private Stream getStringsForSearch(FullConnectorInfoDTO fullConnectorInfo) { diff --git a/api/src/main/java/io/kafbat/ui/service/MessagesService.java b/api/src/main/java/io/kafbat/ui/service/MessagesService.java index ad9244b40..3bf3d8330 100644 --- a/api/src/main/java/io/kafbat/ui/service/MessagesService.java +++ b/api/src/main/java/io/kafbat/ui/service/MessagesService.java @@ -198,8 +198,13 @@ private Mono sendMessageImpl(KafkaCluster cluster, public static KafkaProducer createProducer(KafkaCluster cluster, Map additionalProps) { + return createProducer(cluster.getOriginalProperties(), additionalProps); + } + + public static KafkaProducer createProducer(ClustersProperties.Cluster cluster, + Map additionalProps) { Properties properties = new Properties(); - KafkaClientSslPropertiesUtil.addKafkaSslProperties(cluster.getOriginalProperties().getSsl(), properties); + KafkaClientSslPropertiesUtil.addKafkaSslProperties(cluster.getSsl(), properties); properties.putAll(cluster.getProperties()); properties.putAll(cluster.getProducerProperties()); properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.getBootstrapServers()); diff --git a/api/src/main/java/io/kafbat/ui/service/PollingCursorsStorage.java b/api/src/main/java/io/kafbat/ui/service/PollingCursorsStorage.java index 2b760f010..206eeaef7 100644 --- a/api/src/main/java/io/kafbat/ui/service/PollingCursorsStorage.java +++ b/api/src/main/java/io/kafbat/ui/service/PollingCursorsStorage.java @@ -32,7 +32,7 @@ public Optional getCursor(String id) { } public String register(Cursor cursor) { - var id = RandomStringUtils.random(8, true, true); + var id = RandomStringUtils.secure().next(8, true, true); cursorsCache.put(id, cursor); return id; } diff --git a/api/src/main/java/io/kafbat/ui/service/ReactiveAdminClient.java b/api/src/main/java/io/kafbat/ui/service/ReactiveAdminClient.java index 0e0277c0c..0efe5e827 100644 --- a/api/src/main/java/io/kafbat/ui/service/ReactiveAdminClient.java +++ b/api/src/main/java/io/kafbat/ui/service/ReactiveAdminClient.java @@ -137,6 +137,10 @@ public static class ClusterDescription { Collection nodes; @Nullable // null, if ACL is disabled Set authorizedOperations; + + public static ClusterDescription empty() { + return new ReactiveAdminClient.ClusterDescription(null, null, List.of(), Set.of()); + } } @Builder diff --git a/api/src/main/java/io/kafbat/ui/service/StatisticsCache.java b/api/src/main/java/io/kafbat/ui/service/StatisticsCache.java index 134b723f4..04306e9e8 100644 --- a/api/src/main/java/io/kafbat/ui/service/StatisticsCache.java +++ b/api/src/main/java/io/kafbat/ui/service/StatisticsCache.java @@ -1,9 +1,8 @@ package io.kafbat.ui.service; +import io.kafbat.ui.model.InternalPartitionsOffsets; import io.kafbat.ui.model.KafkaCluster; -import io.kafbat.ui.model.ServerStatusDTO; import io.kafbat.ui.model.Statistics; -import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; @@ -18,7 +17,7 @@ public class StatisticsCache { private final Map cache = new ConcurrentHashMap<>(); public StatisticsCache(ClustersStorage clustersStorage) { - var initializing = Statistics.empty().toBuilder().status(ServerStatusDTO.INITIALIZING).build(); + Statistics initializing = Statistics.initializing(); clustersStorage.getKafkaClusters().forEach(c -> cache.put(c.getName(), initializing)); } @@ -28,38 +27,25 @@ public synchronized void replace(KafkaCluster c, Statistics stats) { public synchronized void update(KafkaCluster c, Map descriptions, - Map> configs) { - var metrics = get(c); - var updatedDescriptions = new HashMap<>(metrics.getTopicDescriptions()); - updatedDescriptions.putAll(descriptions); - var updatedConfigs = new HashMap<>(metrics.getTopicConfigs()); - updatedConfigs.putAll(configs); + Map> configs, + InternalPartitionsOffsets partitionsOffsets) { + var stats = get(c); replace( c, - metrics.toBuilder() - .topicDescriptions(updatedDescriptions) - .topicConfigs(updatedConfigs) - .build() + stats.withClusterState(s -> s.updateTopics(descriptions, configs, partitionsOffsets)) ); } public synchronized void onTopicDelete(KafkaCluster c, String topic) { - var metrics = get(c); - var updatedDescriptions = new HashMap<>(metrics.getTopicDescriptions()); - updatedDescriptions.remove(topic); - var updatedConfigs = new HashMap<>(metrics.getTopicConfigs()); - updatedConfigs.remove(topic); + var stats = get(c); replace( c, - metrics.toBuilder() - .topicDescriptions(updatedDescriptions) - .topicConfigs(updatedConfigs) - .build() + stats.withClusterState(s -> s.topicDeleted(topic)) ); } public Statistics get(KafkaCluster c) { - return Objects.requireNonNull(cache.get(c.getName()), "Unknown cluster metrics requested"); + return Objects.requireNonNull(cache.get(c.getName()), "Statistics for unknown cluster requested"); } } diff --git a/api/src/main/java/io/kafbat/ui/service/StatisticsService.java b/api/src/main/java/io/kafbat/ui/service/StatisticsService.java index bc4ce6a0b..9e302596e 100644 --- a/api/src/main/java/io/kafbat/ui/service/StatisticsService.java +++ b/api/src/main/java/io/kafbat/ui/service/StatisticsService.java @@ -3,20 +3,14 @@ import static io.kafbat.ui.service.ReactiveAdminClient.ClusterDescription; import io.kafbat.ui.model.ClusterFeature; -import io.kafbat.ui.model.InternalLogDirStats; import io.kafbat.ui.model.KafkaCluster; import io.kafbat.ui.model.Metrics; import io.kafbat.ui.model.ServerStatusDTO; import io.kafbat.ui.model.Statistics; -import io.kafbat.ui.service.metrics.MetricsCollector; +import io.kafbat.ui.service.metrics.scrape.ScrapedClusterState; import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; -import org.apache.kafka.clients.admin.ConfigEntry; -import org.apache.kafka.clients.admin.TopicDescription; -import org.apache.kafka.common.Node; import org.springframework.stereotype.Service; import reactor.core.publisher.Mono; @@ -25,7 +19,6 @@ @Slf4j public class StatisticsService { - private final MetricsCollector metricsCollector; private final AdminClientService adminClientService; private final FeatureService featureService; private final StatisticsCache cache; @@ -37,44 +30,46 @@ public Mono updateCache(KafkaCluster c) { @SuppressWarnings("unchecked") private Mono getStatistics(KafkaCluster cluster) { return adminClientService.get(cluster).flatMap(ac -> - ac.describeCluster().flatMap(description -> - ac.updateInternalStats(description.getController()).then( - Mono.zip( - List.of( - metricsCollector.getBrokerMetrics(cluster, description.getNodes()), - getLogDirInfo(description, ac), + ac.describeCluster() + .flatMap(description -> + ac.updateInternalStats(description.getController()) + .then( + Mono.zip( featureService.getAvailableFeatures(ac, cluster, description), - loadTopicConfigs(cluster), - describeTopics(cluster)), - results -> - Statistics.builder() - .status(ServerStatusDTO.ONLINE) - .clusterDescription(description) - .version(ac.getVersion()) - .metrics((Metrics) results[0]) - .logDirInfo((InternalLogDirStats) results[1]) - .features((List) results[2]) - .topicConfigs((Map>) results[3]) - .topicDescriptions((Map) results[4]) - .build() - )))) - .doOnError(e -> - log.error("Failed to collect cluster {} info", cluster.getName(), e)) - .onErrorResume( - e -> Mono.just(Statistics.empty().toBuilder().lastKafkaException(e).build())); + loadClusterState(description, ac) + ).flatMap(t -> + scrapeMetrics(cluster, t.getT2(), description) + .map(metrics -> createStats(description, t.getT1(), t.getT2(), metrics, ac))))) + .doOnError(e -> + log.error("Failed to collect cluster {} info", cluster.getName(), e)) + .onErrorResume(t -> Mono.just(Statistics.statsUpdateError(t)))); } - private Mono getLogDirInfo(ClusterDescription desc, ReactiveAdminClient ac) { - var brokerIds = desc.getNodes().stream().map(Node::id).collect(Collectors.toSet()); - return ac.describeLogDirs(brokerIds).map(InternalLogDirStats::new); + private Statistics createStats(ClusterDescription description, + List features, + ScrapedClusterState scrapedClusterState, + Metrics metrics, + ReactiveAdminClient ac) { + return Statistics.builder() + .status(ServerStatusDTO.ONLINE) + .clusterDescription(description) + .version(ac.getVersion()) + .metrics(metrics) + .features(features) + .clusterState(scrapedClusterState) + .build(); } - private Mono> describeTopics(KafkaCluster c) { - return adminClientService.get(c).flatMap(ReactiveAdminClient::describeTopics); + private Mono loadClusterState(ClusterDescription clusterDescription, + ReactiveAdminClient ac) { + return ScrapedClusterState.scrape(clusterDescription, ac); } - private Mono>> loadTopicConfigs(KafkaCluster c) { - return adminClientService.get(c).flatMap(ReactiveAdminClient::getTopicsConfig); + private Mono scrapeMetrics(KafkaCluster cluster, + ScrapedClusterState clusterState, + ClusterDescription clusterDescription) { + return cluster.getMetricsScrapping() + .scrape(clusterState, clusterDescription.getNodes()); } } diff --git a/api/src/main/java/io/kafbat/ui/service/TopicsService.java b/api/src/main/java/io/kafbat/ui/service/TopicsService.java index 04dd2d593..becfd55ad 100644 --- a/api/src/main/java/io/kafbat/ui/service/TopicsService.java +++ b/api/src/main/java/io/kafbat/ui/service/TopicsService.java @@ -10,7 +10,6 @@ import io.kafbat.ui.exception.TopicRecreationException; import io.kafbat.ui.exception.ValidationException; import io.kafbat.ui.model.ClusterFeature; -import io.kafbat.ui.model.InternalLogDirStats; import io.kafbat.ui.model.InternalPartition; import io.kafbat.ui.model.InternalPartitionsOffsets; import io.kafbat.ui.model.InternalReplica; @@ -25,6 +24,8 @@ import io.kafbat.ui.model.Statistics; import io.kafbat.ui.model.TopicCreationDTO; import io.kafbat.ui.model.TopicUpdateDTO; +import io.kafbat.ui.service.metrics.scrape.ScrapedClusterState; +import io.kafbat.ui.service.metrics.scrape.ScrapedClusterState.TopicState; import java.time.Duration; import java.util.ArrayList; import java.util.Collection; @@ -73,20 +74,19 @@ public Mono> loadTopics(KafkaCluster c, List topics) return adminClientService.get(c) .flatMap(ac -> ac.describeTopics(topics).zipWith(ac.getTopicsConfig(topics, false), - (descriptions, configs) -> { - statisticsCache.update(c, descriptions, configs); - return getPartitionOffsets(descriptions, ac).map(offsets -> { - var metrics = statisticsCache.get(c); - return createList( - topics, - descriptions, - configs, - offsets, - metrics.getMetrics(), - metrics.getLogDirInfo() - ); - }); - })).flatMap(Function.identity()); + (descriptions, configs) -> + getPartitionOffsets(descriptions, ac).map(offsets -> { + statisticsCache.update(c, descriptions, configs, offsets); + var stats = statisticsCache.get(c); + return createList( + topics, + descriptions, + configs, + offsets, + stats.getMetrics(), + stats.getClusterState() + ); + }))).flatMap(Function.identity()); } private Mono loadTopic(KafkaCluster c, String topicName) { @@ -124,7 +124,7 @@ private List createList(List orderedNames, Map> configs, InternalPartitionsOffsets partitionsOffsets, Metrics metrics, - InternalLogDirStats logDirInfo) { + ScrapedClusterState clusterState) { return orderedNames.stream() .filter(descriptions::containsKey) .map(t -> InternalTopic.from( @@ -132,7 +132,10 @@ private List createList(List orderedNames, configs.getOrDefault(t, List.of()), partitionsOffsets, metrics, - logDirInfo, + Optional.ofNullable(clusterState.getTopicStates().get(t)).map(TopicState::segmentStats) + .orElse(null), + Optional.ofNullable(clusterState.getTopicStates().get(t)).map(TopicState::partitionsSegmentStats) + .orElse(Map.of()), clustersProperties.getInternalTopicPrefix() )) .collect(toList()); @@ -225,7 +228,8 @@ private Mono updateTopic(KafkaCluster cluster, .then(loadTopic(cluster, topicName))); } - public Mono updateTopic(KafkaCluster cl, String topicName, Mono topicUpdate) { + public Mono updateTopic(KafkaCluster cl, String topicName, + Mono topicUpdate) { return topicUpdate .flatMap(t -> updateTopic(cl, topicName, t)); } @@ -463,19 +467,21 @@ public Mono cloneTopic( public Mono> getTopicsForPagination(KafkaCluster cluster) { Statistics stats = statisticsCache.get(cluster); - return filterExisting(cluster, stats.getTopicDescriptions().keySet()) + Map topicStates = stats.getClusterState().getTopicStates(); + return filterExisting(cluster, topicStates.keySet()) .map(lst -> lst.stream() .map(topicName -> InternalTopic.from( - stats.getTopicDescriptions().get(topicName), - stats.getTopicConfigs().getOrDefault(topicName, List.of()), + topicStates.get(topicName).description(), + topicStates.get(topicName).configs(), InternalPartitionsOffsets.empty(), stats.getMetrics(), - stats.getLogDirInfo(), + Optional.ofNullable(topicStates.get(topicName)) + .map(TopicState::segmentStats).orElse(null), + Optional.ofNullable(topicStates.get(topicName)) + .map(TopicState::partitionsSegmentStats).orElse(null), clustersProperties.getInternalTopicPrefix() - )) - .collect(toList()) - ); + )).collect(toList())); } public Mono>> getActiveProducersState(KafkaCluster cluster, String topic) { diff --git a/api/src/main/java/io/kafbat/ui/service/graphs/GraphDescription.java b/api/src/main/java/io/kafbat/ui/service/graphs/GraphDescription.java new file mode 100644 index 000000000..e11eab014 --- /dev/null +++ b/api/src/main/java/io/kafbat/ui/service/graphs/GraphDescription.java @@ -0,0 +1,25 @@ +package io.kafbat.ui.service.graphs; + +import java.time.Duration; +import java.util.Set; +import javax.annotation.Nullable; +import lombok.Builder; + +@Builder +public record GraphDescription(String id, + @Nullable Duration defaultInterval, //null for instant queries, set for range + String prometheusQuery, + Set params) { + + public static GraphDescriptionBuilder instant() { + return builder(); + } + + public static GraphDescriptionBuilder range(Duration defaultInterval) { + return builder().defaultInterval(defaultInterval); + } + + public boolean isRange() { + return defaultInterval != null; + } +} diff --git a/api/src/main/java/io/kafbat/ui/service/graphs/GraphDescriptions.java b/api/src/main/java/io/kafbat/ui/service/graphs/GraphDescriptions.java new file mode 100644 index 000000000..4645739e6 --- /dev/null +++ b/api/src/main/java/io/kafbat/ui/service/graphs/GraphDescriptions.java @@ -0,0 +1,74 @@ +package io.kafbat.ui.service.graphs; + +import static java.util.stream.Collectors.toMap; + +import io.kafbat.ui.exception.ValidationException; +import java.time.Duration; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.stream.Stream; +import org.springframework.stereotype.Component; + +@Component +class GraphDescriptions { + + private static final Duration DEFAULT_RANGE_DURATION = Duration.ofDays(7); + + private final Map graphsById; + + GraphDescriptions() { + validate(); + this.graphsById = PREDEFINED_GRAPHS.stream().collect(toMap(GraphDescription::id, d -> d)); + } + + Optional getById(String id) { + return Optional.ofNullable(graphsById.get(id)); + } + + Stream all() { + return graphsById.values().stream(); + } + + private void validate() { + Map errors = new HashMap<>(); + for (GraphDescription description : PREDEFINED_GRAPHS) { + new PromQueryTemplate(description) + .validateSyntax() + .ifPresent(err -> errors.put(description.id(), err)); + } + if (!errors.isEmpty()) { + throw new ValidationException("Error validating queries for following graphs: " + errors); + } + } + + private static final List PREDEFINED_GRAPHS = List.of( + + GraphDescription.range(DEFAULT_RANGE_DURATION) + .id("broker_bytes_disk_ts") + .prometheusQuery("broker_bytes_disk{cluster=\"${cluster}\"}") + .params(Set.of()) + .build(), + + GraphDescription.instant() + .id("broker_bytes_disk") + .prometheusQuery("broker_bytes_disk{cluster=\"${cluster}\"}") + .params(Set.of()) + .build(), + + GraphDescription.instant() + .id("kafka_topic_partition_current_offset") + .prometheusQuery("kafka_topic_partition_current_offset{cluster=\"${cluster}\"}") + .params(Set.of()) + .build(), + + GraphDescription.range(DEFAULT_RANGE_DURATION) + .id("kafka_topic_partition_current_offset_per_topic_ts") + .prometheusQuery("kafka_topic_partition_current_offset{cluster=\"${cluster}\",topic = \"${topic}\"}") + .params(Set.of("topic")) + .build() + ); + +} diff --git a/api/src/main/java/io/kafbat/ui/service/graphs/GraphsService.java b/api/src/main/java/io/kafbat/ui/service/graphs/GraphsService.java new file mode 100644 index 000000000..451feb1da --- /dev/null +++ b/api/src/main/java/io/kafbat/ui/service/graphs/GraphsService.java @@ -0,0 +1,95 @@ +package io.kafbat.ui.service.graphs; + +import com.google.common.base.Preconditions; +import io.kafbat.ui.exception.NotFoundException; +import io.kafbat.ui.exception.ValidationException; +import io.kafbat.ui.model.KafkaCluster; +import io.kafbat.ui.prometheus.api.PrometheusClientApi; +import io.kafbat.ui.prometheus.model.QueryResponse; +import java.time.Duration; +import java.time.Instant; +import java.util.Map; +import java.util.Optional; +import java.util.stream.Stream; +import javax.annotation.Nullable; +import lombok.RequiredArgsConstructor; +import org.springframework.stereotype.Component; +import reactor.core.publisher.Mono; + +@Component +@RequiredArgsConstructor +public class GraphsService { + + private static final int TARGET_MATRIX_DATA_POINTS = 200; + + private final GraphDescriptions graphDescriptions; + + public Mono getGraphData(KafkaCluster cluster, + String id, + @Nullable Instant from, + @Nullable Instant to, + @Nullable Map params) { + + var graph = graphDescriptions.getById(id) + .orElseThrow(() -> new NotFoundException("No graph found with id = " + id)); + + var promClient = cluster.getPrometheusStorageClient(); + if (promClient == null) { + throw new ValidationException("Prometheus not configured for cluster"); + } + String preparedQuery = prepareQuery(graph, cluster.getName(), params); + return cluster.getPrometheusStorageClient() + .mono(client -> { + if (graph.isRange()) { + return queryRange(client, preparedQuery, graph.defaultInterval(), from, to); + } + return queryInstant(client, preparedQuery); + }); + } + + private Mono queryRange(PrometheusClientApi c, + String preparedQuery, + Duration defaultPeriod, + @Nullable Instant from, + @Nullable Instant to) { + if (from == null) { + from = Instant.now().minus(defaultPeriod); + } + if (to == null) { + to = Instant.now(); + } + Preconditions.checkArgument(to.isAfter(from)); + return c.queryRange( + preparedQuery, + String.valueOf(from.getEpochSecond()), + String.valueOf(to.getEpochSecond()), + calculateStepSize(from, to), + null + ); + } + + private String calculateStepSize(Instant from, Instant to) { + long intervalInSecs = to.getEpochSecond() - from.getEpochSecond(); + if (intervalInSecs <= TARGET_MATRIX_DATA_POINTS) { + return intervalInSecs + "s"; + } + int step = ((int) (((double) intervalInSecs) / TARGET_MATRIX_DATA_POINTS)); + return step + "s"; + } + + private Mono queryInstant(PrometheusClientApi c, String preparedQuery) { + return c.query(preparedQuery, null, null); + } + + private String prepareQuery(GraphDescription d, String clusterName, @Nullable Map params) { + return new PromQueryTemplate(d).getQuery(clusterName, Optional.ofNullable(params).orElse(Map.of())); + } + + public Stream getGraphs(KafkaCluster cluster) { + if (cluster.getPrometheusStorageClient() == null) { + return Stream.empty(); + } + return graphDescriptions.all(); + } + +} diff --git a/api/src/main/java/io/kafbat/ui/service/graphs/PromQueryLangGrammar.java b/api/src/main/java/io/kafbat/ui/service/graphs/PromQueryLangGrammar.java new file mode 100644 index 000000000..e8959c571 --- /dev/null +++ b/api/src/main/java/io/kafbat/ui/service/graphs/PromQueryLangGrammar.java @@ -0,0 +1,37 @@ +package io.kafbat.ui.service.graphs; + +import java.util.Optional; +import org.antlr.v4.runtime.BailErrorStrategy; +import org.antlr.v4.runtime.CharStreams; +import org.antlr.v4.runtime.CommonTokenStream; +import org.antlr.v4.runtime.misc.ParseCancellationException; +import promql.PromQLLexer; +import promql.PromQLParser; + +class PromQueryLangGrammar { + + private PromQueryLangGrammar() { + } + + // returns error msg, or empty if query is valid + static Optional validateExpression(String query) { + try { + parseExpression(query); + return Optional.empty(); + } catch (ParseCancellationException e) { + return Optional.of("PromQL syntax error, " + e.getMessage()); + } + } + + static PromQLParser.ExpressionContext parseExpression(String query) { + return createParser(query).expression(); + } + + private static PromQLParser createParser(String str) { + var parser = new PromQLParser(new CommonTokenStream(new PromQLLexer(CharStreams.fromString(str)))); + parser.removeErrorListeners(); + parser.setErrorHandler(new BailErrorStrategy()); + return parser; + } + +} diff --git a/api/src/main/java/io/kafbat/ui/service/graphs/PromQueryTemplate.java b/api/src/main/java/io/kafbat/ui/service/graphs/PromQueryTemplate.java new file mode 100644 index 000000000..060b7424e --- /dev/null +++ b/api/src/main/java/io/kafbat/ui/service/graphs/PromQueryTemplate.java @@ -0,0 +1,51 @@ +package io.kafbat.ui.service.graphs; + +import com.google.common.collect.Sets; +import io.kafbat.ui.exception.ValidationException; +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import org.apache.commons.text.StringSubstitutor; + +class PromQueryTemplate { + + private static final String CLUSTER_LABEL_NAME = "cluster"; + + private final String queryTemplate; + private final Set paramsNames; + + PromQueryTemplate(GraphDescription d) { + this(d.prometheusQuery(), d.params()); + } + + PromQueryTemplate(String templateQueryString, Set paramsNames) { + this.queryTemplate = templateQueryString; + this.paramsNames = paramsNames; + } + + String getQuery(String clusterName, Map paramValues) { + var missingParams = Sets.difference(paramsNames, paramValues.keySet()); + if (!missingParams.isEmpty()) { + throw new ValidationException("Not all params set for query, missing: " + missingParams); + } + Map replacements = new HashMap<>(paramValues); + replacements.put(CLUSTER_LABEL_NAME, clusterName); + return replaceParams(replacements); + } + + // returns error msg or empty if no errors found + Optional validateSyntax() { + Map fakeReplacements = new HashMap<>(); + fakeReplacements.put(CLUSTER_LABEL_NAME, "1"); + paramsNames.forEach(paramName -> fakeReplacements.put(paramName, "1")); + + String prepared = replaceParams(fakeReplacements); + return PromQueryLangGrammar.validateExpression(prepared); + } + + private String replaceParams(Map replacements) { + return new StringSubstitutor(replacements).replace(queryTemplate); + } + +} diff --git a/api/src/main/java/io/kafbat/ui/service/integration/odd/TopicsExporter.java b/api/src/main/java/io/kafbat/ui/service/integration/odd/TopicsExporter.java index 641a4a32a..d34e0a613 100644 --- a/api/src/main/java/io/kafbat/ui/service/integration/odd/TopicsExporter.java +++ b/api/src/main/java/io/kafbat/ui/service/integration/odd/TopicsExporter.java @@ -2,9 +2,10 @@ import com.google.common.collect.ImmutableMap; import io.kafbat.ui.model.KafkaCluster; -import io.kafbat.ui.model.Statistics; import io.kafbat.ui.service.StatisticsCache; import io.kafbat.ui.service.integration.odd.schema.DataSetFieldsExtractors; +import io.kafbat.ui.service.metrics.scrape.ScrapedClusterState; +import io.kafbat.ui.service.metrics.scrape.ScrapedClusterState.TopicState; import io.kafbat.ui.sr.model.SchemaSubject; import java.net.URI; import java.util.List; @@ -37,10 +38,10 @@ class TopicsExporter { Flux export(KafkaCluster cluster) { String clusterOddrn = Oddrn.clusterOddrn(cluster); - Statistics stats = statisticsCache.get(cluster); - return Flux.fromIterable(stats.getTopicDescriptions().keySet()) + var clusterState = statisticsCache.get(cluster).getClusterState(); + return Flux.fromIterable(clusterState.getTopicStates().keySet()) .filter(topicFilter) - .flatMap(topic -> createTopicDataEntity(cluster, topic, stats)) + .flatMap(topic -> createTopicDataEntity(cluster, topic, clusterState.getTopicStates().get(topic))) .onErrorContinue( (th, topic) -> log.warn("Error exporting data for topic {}, cluster {}", topic, cluster.getName(), th)) .buffer(100) @@ -50,7 +51,7 @@ Flux export(KafkaCluster cluster) { .items(topicsEntities)); } - private Mono createTopicDataEntity(KafkaCluster cluster, String topic, Statistics stats) { + private Mono createTopicDataEntity(KafkaCluster cluster, String topic, TopicState topicState) { KafkaPath topicOddrnPath = Oddrn.topicOddrnPath(cluster, topic); return Mono.zip( @@ -70,13 +71,13 @@ private Mono createTopicDataEntity(KafkaCluster cluster, String topi .addMetadataItem( new MetadataExtension() .schemaUrl(URI.create("wontbeused.oops")) - .metadata(getTopicMetadata(topic, stats))); + .metadata(getTopicMetadata(topicState))); } ); } - private Map getNonDefaultConfigs(String topic, Statistics stats) { - List config = stats.getTopicConfigs().get(topic); + private Map getNonDefaultConfigs(TopicState topicState) { + List config = topicState.configs(); if (config == null) { return Map.of(); } @@ -85,12 +86,12 @@ private Map getNonDefaultConfigs(String topic, Statistics stats) .collect(Collectors.toMap(ConfigEntry::name, ConfigEntry::value)); } - private Map getTopicMetadata(String topic, Statistics stats) { - TopicDescription topicDescription = stats.getTopicDescriptions().get(topic); + private Map getTopicMetadata(TopicState topicState) { + TopicDescription topicDescription = topicState.description(); return ImmutableMap.builder() .put("partitions", topicDescription.partitions().size()) .put("replication_factor", topicDescription.partitions().get(0).replicas().size()) - .putAll(getNonDefaultConfigs(topic, stats)) + .putAll(getNonDefaultConfigs(topicState)) .build(); } diff --git a/api/src/main/java/io/kafbat/ui/service/masking/policies/Mask.java b/api/src/main/java/io/kafbat/ui/service/masking/policies/Mask.java index 97913164d..5a7210381 100644 --- a/api/src/main/java/io/kafbat/ui/service/masking/policies/Mask.java +++ b/api/src/main/java/io/kafbat/ui/service/masking/policies/Mask.java @@ -7,6 +7,7 @@ import com.fasterxml.jackson.databind.node.TextNode; import com.google.common.base.Preconditions; import java.util.List; +import java.util.Map; import java.util.function.UnaryOperator; class Mask extends MaskingPolicy { @@ -54,15 +55,15 @@ private static UnaryOperator createMasker(List maskingChars) { private JsonNode maskWithFieldsCheck(JsonNode node) { if (node.isObject()) { ObjectNode obj = ((ObjectNode) node).objectNode(); - node.fields().forEachRemaining(f -> { - String fieldName = f.getKey(); - JsonNode fieldVal = f.getValue(); + for (Map.Entry property : node.properties()) { + String fieldName = property.getKey(); + JsonNode fieldVal = property.getValue(); if (fieldShouldBeMasked(fieldName)) { obj.set(fieldName, maskNodeRecursively(fieldVal)); } else { obj.set(fieldName, maskWithFieldsCheck(fieldVal)); } - }); + } return obj; } else if (node.isArray()) { ArrayNode arr = ((ArrayNode) node).arrayNode(node.size()); @@ -75,7 +76,9 @@ private JsonNode maskWithFieldsCheck(JsonNode node) { private JsonNode maskNodeRecursively(JsonNode node) { if (node.isObject()) { ObjectNode obj = ((ObjectNode) node).objectNode(); - node.fields().forEachRemaining(f -> obj.set(f.getKey(), maskNodeRecursively(f.getValue()))); + for (Map.Entry property : node.properties()) { + obj.set(property.getKey(), maskNodeRecursively(property.getValue())); + } return obj; } else if (node.isArray()) { ArrayNode arr = ((ArrayNode) node).arrayNode(node.size()); diff --git a/api/src/main/java/io/kafbat/ui/service/masking/policies/Remove.java b/api/src/main/java/io/kafbat/ui/service/masking/policies/Remove.java index 90e01133b..af35900be 100644 --- a/api/src/main/java/io/kafbat/ui/service/masking/policies/Remove.java +++ b/api/src/main/java/io/kafbat/ui/service/masking/policies/Remove.java @@ -4,6 +4,7 @@ import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.ContainerNode; import com.fasterxml.jackson.databind.node.ObjectNode; +import java.util.Map; class Remove extends MaskingPolicy { @@ -25,13 +26,13 @@ public ContainerNode applyToJsonContainer(ContainerNode node) { private JsonNode removeFields(JsonNode node) { if (node.isObject()) { ObjectNode obj = ((ObjectNode) node).objectNode(); - node.fields().forEachRemaining(f -> { - String fieldName = f.getKey(); - JsonNode fieldVal = f.getValue(); + for (Map.Entry property : node.properties()) { + String fieldName = property.getKey(); + JsonNode fieldVal = property.getValue(); if (!fieldShouldBeMasked(fieldName)) { obj.set(fieldName, removeFields(fieldVal)); } - }); + } return obj; } else if (node.isArray()) { var arr = ((ArrayNode) node).arrayNode(node.size()); diff --git a/api/src/main/java/io/kafbat/ui/service/masking/policies/Replace.java b/api/src/main/java/io/kafbat/ui/service/masking/policies/Replace.java index 89bf6304e..cc5ff72c3 100644 --- a/api/src/main/java/io/kafbat/ui/service/masking/policies/Replace.java +++ b/api/src/main/java/io/kafbat/ui/service/masking/policies/Replace.java @@ -6,6 +6,7 @@ import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.TextNode; import com.google.common.base.Preconditions; +import java.util.Map; class Replace extends MaskingPolicy { @@ -31,15 +32,15 @@ public ContainerNode applyToJsonContainer(ContainerNode node) { private JsonNode replaceWithFieldsCheck(JsonNode node) { if (node.isObject()) { ObjectNode obj = ((ObjectNode) node).objectNode(); - node.fields().forEachRemaining(f -> { - String fieldName = f.getKey(); - JsonNode fieldVal = f.getValue(); + for (Map.Entry property : node.properties()) { + String fieldName = property.getKey(); + JsonNode fieldVal = property.getValue(); if (fieldShouldBeMasked(fieldName)) { obj.set(fieldName, replaceRecursive(fieldVal)); } else { obj.set(fieldName, replaceWithFieldsCheck(fieldVal)); } - }); + } return obj; } else if (node.isArray()) { ArrayNode arr = ((ArrayNode) node).arrayNode(node.size()); @@ -53,7 +54,9 @@ private JsonNode replaceWithFieldsCheck(JsonNode node) { private JsonNode replaceRecursive(JsonNode node) { if (node.isObject()) { ObjectNode obj = ((ObjectNode) node).objectNode(); - node.fields().forEachRemaining(f -> obj.set(f.getKey(), replaceRecursive(f.getValue()))); + for (Map.Entry property : node.properties()) { + obj.set(property.getKey(), replaceRecursive(property.getValue())); + } return obj; } else if (node.isArray()) { ArrayNode arr = ((ArrayNode) node).arrayNode(node.size()); diff --git a/api/src/main/java/io/kafbat/ui/service/metrics/JmxMetricsRetriever.java b/api/src/main/java/io/kafbat/ui/service/metrics/JmxMetricsRetriever.java deleted file mode 100644 index bcb5665bc..000000000 --- a/api/src/main/java/io/kafbat/ui/service/metrics/JmxMetricsRetriever.java +++ /dev/null @@ -1,134 +0,0 @@ -package io.kafbat.ui.service.metrics; - -import io.kafbat.ui.model.KafkaCluster; -import java.io.Closeable; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.function.Consumer; -import javax.management.MBeanAttributeInfo; -import javax.management.MBeanServerConnection; -import javax.management.ObjectName; -import javax.management.remote.JMXConnector; -import javax.management.remote.JMXConnectorFactory; -import javax.management.remote.JMXServiceURL; -import lombok.SneakyThrows; -import lombok.extern.slf4j.Slf4j; -import org.apache.commons.lang3.StringUtils; -import org.apache.kafka.common.Node; -import org.springframework.stereotype.Service; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; -import reactor.core.scheduler.Schedulers; - - -@Service -@Slf4j -class JmxMetricsRetriever implements MetricsRetriever, Closeable { - - private static final boolean SSL_JMX_SUPPORTED; - - static { - // see JmxSslSocketFactory doc for details - SSL_JMX_SUPPORTED = JmxSslSocketFactory.initialized(); - } - - private static final String JMX_URL = "service:jmx:rmi:///jndi/rmi://"; - private static final String JMX_SERVICE_TYPE = "jmxrmi"; - private static final String CANONICAL_NAME_PATTERN = "kafka.server*:*"; - - @Override - public void close() { - JmxSslSocketFactory.clearFactoriesCache(); - } - - @Override - public Flux retrieve(KafkaCluster c, Node node) { - if (isSslJmxEndpoint(c) && !SSL_JMX_SUPPORTED) { - log.warn("Cluster {} has jmx ssl configured, but it is not supported", c.getName()); - return Flux.empty(); - } - return Mono.fromSupplier(() -> retrieveSync(c, node)) - .subscribeOn(Schedulers.boundedElastic()) - .flatMapMany(Flux::fromIterable); - } - - private boolean isSslJmxEndpoint(KafkaCluster cluster) { - return cluster.getMetricsConfig().getKeystoreLocation() != null; - } - - @SneakyThrows - private List retrieveSync(KafkaCluster c, Node node) { - String jmxUrl = JMX_URL + node.host() + ":" + c.getMetricsConfig().getPort() + "/" + JMX_SERVICE_TYPE; - log.debug("Collection JMX metrics for {}", jmxUrl); - List result = new ArrayList<>(); - withJmxConnector(jmxUrl, c, jmxConnector -> getMetricsFromJmx(jmxConnector, result)); - log.debug("{} metrics collected for {}", result.size(), jmxUrl); - return result; - } - - private void withJmxConnector(String jmxUrl, - KafkaCluster c, - Consumer consumer) { - var env = prepareJmxEnvAndSetThreadLocal(c); - try (JMXConnector connector = JMXConnectorFactory.newJMXConnector(new JMXServiceURL(jmxUrl), env)) { - try { - connector.connect(env); - } catch (Exception exception) { - log.error("Error connecting to {}", jmxUrl, exception); - return; - } - consumer.accept(connector); - } catch (Exception e) { - log.error("Error getting jmx metrics from {}", jmxUrl, e); - } finally { - JmxSslSocketFactory.clearThreadLocalContext(); - } - } - - private Map prepareJmxEnvAndSetThreadLocal(KafkaCluster cluster) { - var metricsConfig = cluster.getMetricsConfig(); - Map env = new HashMap<>(); - if (isSslJmxEndpoint(cluster)) { - var clusterSsl = cluster.getOriginalProperties().getSsl(); - JmxSslSocketFactory.setSslContextThreadLocal( - clusterSsl != null ? clusterSsl.getTruststoreLocation() : null, - clusterSsl != null ? clusterSsl.getTruststorePassword() : null, - metricsConfig.getKeystoreLocation(), - metricsConfig.getKeystorePassword() - ); - JmxSslSocketFactory.editJmxConnectorEnv(env); - } - - if (StringUtils.isNotEmpty(metricsConfig.getUsername()) - && StringUtils.isNotEmpty(metricsConfig.getPassword())) { - env.put( - JMXConnector.CREDENTIALS, - new String[] {metricsConfig.getUsername(), metricsConfig.getPassword()} - ); - } - return env; - } - - @SneakyThrows - private void getMetricsFromJmx(JMXConnector jmxConnector, List sink) { - MBeanServerConnection msc = jmxConnector.getMBeanServerConnection(); - var jmxMetrics = msc.queryNames(new ObjectName(CANONICAL_NAME_PATTERN), null); - for (ObjectName jmxMetric : jmxMetrics) { - sink.addAll(extractObjectMetrics(jmxMetric, msc)); - } - } - - @SneakyThrows - private List extractObjectMetrics(ObjectName objectName, MBeanServerConnection msc) { - MBeanAttributeInfo[] attrNames = msc.getMBeanInfo(objectName).getAttributes(); - Object[] attrValues = new Object[attrNames.length]; - for (int i = 0; i < attrNames.length; i++) { - attrValues[i] = msc.getAttribute(objectName, attrNames[i].getName()); - } - return JmxMetricsFormatter.constructMetricsList(objectName, attrNames, attrValues); - } - -} - diff --git a/api/src/main/java/io/kafbat/ui/service/metrics/MetricsCollector.java b/api/src/main/java/io/kafbat/ui/service/metrics/MetricsCollector.java deleted file mode 100644 index e9a08e8cb..000000000 --- a/api/src/main/java/io/kafbat/ui/service/metrics/MetricsCollector.java +++ /dev/null @@ -1,68 +0,0 @@ -package io.kafbat.ui.service.metrics; - -import io.kafbat.ui.model.KafkaCluster; -import io.kafbat.ui.model.Metrics; -import io.kafbat.ui.model.MetricsConfig; -import java.util.Collection; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; -import lombok.RequiredArgsConstructor; -import lombok.extern.slf4j.Slf4j; -import org.apache.kafka.common.Node; -import org.springframework.stereotype.Component; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; -import reactor.util.function.Tuple2; -import reactor.util.function.Tuples; - -@Component -@Slf4j -@RequiredArgsConstructor -public class MetricsCollector { - - private final JmxMetricsRetriever jmxMetricsRetriever; - private final PrometheusMetricsRetriever prometheusMetricsRetriever; - - public Mono getBrokerMetrics(KafkaCluster cluster, Collection nodes) { - return Flux.fromIterable(nodes) - .flatMap(n -> getMetrics(cluster, n).map(lst -> Tuples.of(n, lst))) - .collectMap(Tuple2::getT1, Tuple2::getT2) - .map(this::collectMetrics) - .defaultIfEmpty(Metrics.empty()); - } - - private Mono> getMetrics(KafkaCluster kafkaCluster, Node node) { - Flux metricFlux = Flux.empty(); - if (kafkaCluster.getMetricsConfig() != null) { - String type = kafkaCluster.getMetricsConfig().getType(); - if (type == null || type.equalsIgnoreCase(MetricsConfig.JMX_METRICS_TYPE)) { - metricFlux = jmxMetricsRetriever.retrieve(kafkaCluster, node); - } else if (type.equalsIgnoreCase(MetricsConfig.PROMETHEUS_METRICS_TYPE)) { - metricFlux = prometheusMetricsRetriever.retrieve(kafkaCluster, node); - } - } - return metricFlux.collectList(); - } - - public Metrics collectMetrics(Map> perBrokerMetrics) { - Metrics.MetricsBuilder builder = Metrics.builder() - .perBrokerMetrics( - perBrokerMetrics.entrySet() - .stream() - .collect(Collectors.toMap(e -> e.getKey().id(), Map.Entry::getValue))); - - populateWellknowMetrics(perBrokerMetrics).apply(builder); - - return builder.build(); - } - - private WellKnownMetrics populateWellknowMetrics(Map> perBrokerMetrics) { - WellKnownMetrics wellKnownMetrics = new WellKnownMetrics(); - perBrokerMetrics.forEach((node, metrics) -> - metrics.forEach(metric -> - wellKnownMetrics.populate(node, metric))); - return wellKnownMetrics; - } - -} diff --git a/api/src/main/java/io/kafbat/ui/service/metrics/MetricsRetriever.java b/api/src/main/java/io/kafbat/ui/service/metrics/MetricsRetriever.java deleted file mode 100644 index aa7d8cc2e..000000000 --- a/api/src/main/java/io/kafbat/ui/service/metrics/MetricsRetriever.java +++ /dev/null @@ -1,9 +0,0 @@ -package io.kafbat.ui.service.metrics; - -import io.kafbat.ui.model.KafkaCluster; -import org.apache.kafka.common.Node; -import reactor.core.publisher.Flux; - -interface MetricsRetriever { - Flux retrieve(KafkaCluster c, Node node); -} diff --git a/api/src/main/java/io/kafbat/ui/service/metrics/PrometheusEndpointMetricsParser.java b/api/src/main/java/io/kafbat/ui/service/metrics/PrometheusEndpointMetricsParser.java deleted file mode 100644 index 5662706d4..000000000 --- a/api/src/main/java/io/kafbat/ui/service/metrics/PrometheusEndpointMetricsParser.java +++ /dev/null @@ -1,46 +0,0 @@ -package io.kafbat.ui.service.metrics; - -import java.math.BigDecimal; -import java.util.Arrays; -import java.util.Optional; -import java.util.regex.Matcher; -import java.util.regex.Pattern; -import java.util.stream.Collectors; -import lombok.extern.slf4j.Slf4j; -import org.apache.commons.lang3.math.NumberUtils; - -@Slf4j -class PrometheusEndpointMetricsParser { - - /** - * Matches openmetrics format. For example, string: - * kafka_server_BrokerTopicMetrics_FiveMinuteRate{name="BytesInPerSec",topic="__consumer_offsets",} 16.94886650744339 - * will produce: - * name=kafka_server_BrokerTopicMetrics_FiveMinuteRate - * value=16.94886650744339 - * labels={name="BytesInPerSec", topic="__consumer_offsets"}", - */ - private static final Pattern PATTERN = Pattern.compile( - "(?^\\w+)([ \t]*\\{*(?.*)}*)[ \\t]+(?[\\d]+\\.?[\\d]+)?"); - - static Optional parse(String s) { - Matcher matcher = PATTERN.matcher(s); - if (matcher.matches()) { - String value = matcher.group("value"); - String metricName = matcher.group("metricName"); - if (metricName == null || !NumberUtils.isCreatable(value)) { - return Optional.empty(); - } - var labels = Arrays.stream(matcher.group("properties").split(",")) - .filter(str -> !"".equals(str)) - .map(str -> str.split("=")) - .filter(spit -> spit.length == 2) - .collect(Collectors.toUnmodifiableMap( - str -> str[0].trim(), - str -> str[1].trim().replace("\"", ""))); - - return Optional.of(RawMetric.create(metricName, labels, new BigDecimal(value))); - } - return Optional.empty(); - } -} diff --git a/api/src/main/java/io/kafbat/ui/service/metrics/PrometheusMetricsRetriever.java b/api/src/main/java/io/kafbat/ui/service/metrics/PrometheusMetricsRetriever.java deleted file mode 100644 index fa9da4a95..000000000 --- a/api/src/main/java/io/kafbat/ui/service/metrics/PrometheusMetricsRetriever.java +++ /dev/null @@ -1,70 +0,0 @@ -package io.kafbat.ui.service.metrics; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Strings; -import io.kafbat.ui.config.ClustersProperties; -import io.kafbat.ui.model.KafkaCluster; -import io.kafbat.ui.model.MetricsConfig; -import io.kafbat.ui.util.WebClientConfigurator; -import java.util.Arrays; -import java.util.Optional; -import lombok.extern.slf4j.Slf4j; -import org.apache.kafka.common.Node; -import org.springframework.stereotype.Service; -import org.springframework.util.unit.DataSize; -import org.springframework.web.reactive.function.client.WebClient; -import org.springframework.web.util.UriComponentsBuilder; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; - -@Service -@Slf4j -class PrometheusMetricsRetriever implements MetricsRetriever { - - private static final String METRICS_ENDPOINT_PATH = "/metrics"; - private static final int DEFAULT_EXPORTER_PORT = 11001; - - @Override - public Flux retrieve(KafkaCluster c, Node node) { - log.debug("Retrieving metrics from prometheus exporter: {}:{}", node.host(), c.getMetricsConfig().getPort()); - - MetricsConfig metricsConfig = c.getMetricsConfig(); - var webClient = new WebClientConfigurator() - .configureBufferSize(DataSize.ofMegabytes(20)) - .configureBasicAuth(metricsConfig.getUsername(), metricsConfig.getPassword()) - .configureSsl( - c.getOriginalProperties().getSsl(), - new ClustersProperties.KeystoreConfig( - metricsConfig.getKeystoreLocation(), - metricsConfig.getKeystorePassword())) - .build(); - - return retrieve(webClient, node.host(), c.getMetricsConfig()); - } - - @VisibleForTesting - Flux retrieve(WebClient webClient, String host, MetricsConfig metricsConfig) { - int port = Optional.ofNullable(metricsConfig.getPort()).orElse(DEFAULT_EXPORTER_PORT); - boolean sslEnabled = metricsConfig.isSsl() || metricsConfig.getKeystoreLocation() != null; - var request = webClient.get() - .uri(UriComponentsBuilder.newInstance() - .scheme(sslEnabled ? "https" : "http") - .host(host) - .port(port) - .path(METRICS_ENDPOINT_PATH).build().toUri()); - - WebClient.ResponseSpec responseSpec = request.retrieve(); - return responseSpec.bodyToMono(String.class) - .doOnError(e -> log.error("Error while getting metrics from {}", host, e)) - .onErrorResume(th -> Mono.empty()) - .flatMapMany(body -> - Flux.fromStream( - Arrays.stream(body.split("\\n")) - .filter(str -> !Strings.isNullOrEmpty(str) && !str.startsWith("#")) // skipping comments strings - .map(PrometheusEndpointMetricsParser::parse) - .filter(Optional::isPresent) - .map(Optional::get) - ) - ); - } -} diff --git a/api/src/main/java/io/kafbat/ui/service/metrics/RawMetric.java b/api/src/main/java/io/kafbat/ui/service/metrics/RawMetric.java index 80cc6073b..3b306872c 100644 --- a/api/src/main/java/io/kafbat/ui/service/metrics/RawMetric.java +++ b/api/src/main/java/io/kafbat/ui/service/metrics/RawMetric.java @@ -1,10 +1,13 @@ package io.kafbat.ui.service.metrics; +import io.prometheus.metrics.core.metrics.Gauge; +import io.prometheus.metrics.model.snapshots.MetricSnapshot; import java.math.BigDecimal; +import java.util.Arrays; +import java.util.Collection; +import java.util.LinkedHashMap; import java.util.Map; -import lombok.AllArgsConstructor; -import lombok.EqualsAndHashCode; -import lombok.ToString; +import java.util.stream.Stream; public interface RawMetric { @@ -14,47 +17,31 @@ public interface RawMetric { BigDecimal value(); - // Key, that can be used for metrics reductions - default Object identityKey() { - return name() + "_" + labels(); - } - - RawMetric copyWithValue(BigDecimal newValue); - //-------------------------------------------------- static RawMetric create(String name, Map labels, BigDecimal value) { return new SimpleMetric(name, labels, value); } - @AllArgsConstructor - @EqualsAndHashCode - @ToString - class SimpleMetric implements RawMetric { - - private final String name; - private final Map labels; - private final BigDecimal value; - - @Override - public String name() { - return name; - } - - @Override - public Map labels() { - return labels; - } - - @Override - public BigDecimal value() { - return value; + static Stream groupIntoSnapshot(Collection rawMetrics) { + Map map = new LinkedHashMap<>(); + for (RawMetric m : rawMetrics) { + var lbls = m.labels().keySet().toArray(String[]::new); + var lblVals = Arrays.stream(lbls).map(l -> m.labels().get(l)).toArray(String[]::new); + var gauge = map.computeIfAbsent( + m.name(), + n -> Gauge.builder() + .name(m.name()) + .help(m.name()) + .labelNames(lbls) + .build() + ); + gauge.labelValues(lblVals).set(m.value().doubleValue()); } + return map.values().stream().map(Gauge::collect); + } - @Override - public RawMetric copyWithValue(BigDecimal newValue) { - return new SimpleMetric(name, labels, newValue); - } + record SimpleMetric(String name, Map labels, BigDecimal value) implements RawMetric { } } diff --git a/api/src/main/java/io/kafbat/ui/service/metrics/SummarizedMetrics.java b/api/src/main/java/io/kafbat/ui/service/metrics/SummarizedMetrics.java new file mode 100644 index 000000000..feadbfdce --- /dev/null +++ b/api/src/main/java/io/kafbat/ui/service/metrics/SummarizedMetrics.java @@ -0,0 +1,125 @@ +package io.kafbat.ui.service.metrics; + +import static io.prometheus.metrics.model.snapshots.CounterSnapshot.CounterDataPointSnapshot; +import static io.prometheus.metrics.model.snapshots.GaugeSnapshot.GaugeDataPointSnapshot; +import static java.util.stream.Collectors.toMap; + +import com.google.common.collect.Streams; +import io.kafbat.ui.model.Metrics; +import io.prometheus.metrics.model.snapshots.CounterSnapshot; +import io.prometheus.metrics.model.snapshots.DataPointSnapshot; +import io.prometheus.metrics.model.snapshots.GaugeSnapshot; +import io.prometheus.metrics.model.snapshots.Labels; +import io.prometheus.metrics.model.snapshots.MetricSnapshot; +import io.prometheus.metrics.model.snapshots.UnknownSnapshot; +import io.prometheus.metrics.model.snapshots.UnknownSnapshot.UnknownDataPointSnapshot; +import java.util.Collection; +import java.util.LinkedHashMap; +import java.util.Optional; +import java.util.function.BiFunction; +import java.util.function.Function; +import java.util.stream.Stream; +import lombok.RequiredArgsConstructor; + +/** + * Will be replaced in the next versions. + * @deprecated Since 1.4.0 + **/ +@Deprecated(forRemoval = true, since = "1.4.0") //used for api backward-compatibility +@RequiredArgsConstructor +public class SummarizedMetrics { + + private final Metrics metrics; + + public Stream asStream() { + return Streams.concat( + metrics.getInferredMetrics().asStream(), + summarize( + metrics.getPerBrokerScrapedMetrics() + .values() + .stream() + .flatMap(Collection::stream) + ) + ); + } + + private Stream summarize(Stream snapshots) { + return snapshots + .collect( + toMap( + mfs -> mfs.getMetadata().getName(), + Optional::of, SummarizedMetrics::summarizeMetricSnapshot, LinkedHashMap::new + ) + ).values() + .stream() + .filter(Optional::isPresent) + .map(Optional::get); + } + + //returns Optional.empty if merging not supported for metric type + @SuppressWarnings("unchecked") + private static Optional summarizeMetricSnapshot(Optional snap1Opt, + Optional snap2Opt) { + + if ((snap1Opt.isEmpty() || snap2Opt.isEmpty()) || !(snap1Opt.get().getClass().equals(snap2Opt.get().getClass()))) { + return Optional.empty(); + } + + var snap1 = snap1Opt.get(); + + if (snap1 instanceof GaugeSnapshot + || snap1 instanceof CounterSnapshot + || snap1 instanceof UnknownSnapshot) { + + BiFunction pointFactory; + Function valueGetter; + Function, MetricSnapshot> builder; + + if (snap1 instanceof UnknownSnapshot) { + pointFactory = (l, v) -> UnknownDataPointSnapshot.builder() + .labels(l) + .value(v) + .build(); + valueGetter = (dp) -> ((UnknownDataPointSnapshot) dp).getValue(); + builder = (dps) -> + new UnknownSnapshot(snap1.getMetadata(), (Collection) dps); + } else if (snap1 instanceof CounterSnapshot) { + pointFactory = (l, v) -> CounterDataPointSnapshot.builder() + .labels(l) + .value(v) + .build(); + valueGetter = (dp) -> ((CounterDataPointSnapshot) dp).getValue(); + builder = (dps) -> + new CounterSnapshot(snap1.getMetadata(), (Collection) dps); + } else { + pointFactory = (l, v) -> GaugeDataPointSnapshot.builder() + .labels(l) + .value(v) + .build(); + valueGetter = (dp) -> ((GaugeDataPointSnapshot) dp).getValue(); + builder = (dps) -> + new GaugeSnapshot(snap1.getMetadata(), (Collection) dps); + } + + Collection points = + Stream.concat(snap1.getDataPoints().stream(), snap2Opt.get().getDataPoints().stream()) + .collect( + toMap( + // merging samples with same labels + DataPointSnapshot::getLabels, + s -> s, + (s1, s2) -> pointFactory.apply( + s1.getLabels(), + valueGetter.apply(s1) + valueGetter.apply(s2) + ), + LinkedHashMap::new + ) + ).values(); + return Optional.of(builder.apply(points)); + } else { + return Optional.empty(); + } + } + + +} diff --git a/api/src/main/java/io/kafbat/ui/service/metrics/WellKnownMetrics.java b/api/src/main/java/io/kafbat/ui/service/metrics/WellKnownMetrics.java deleted file mode 100644 index 80e5f023c..000000000 --- a/api/src/main/java/io/kafbat/ui/service/metrics/WellKnownMetrics.java +++ /dev/null @@ -1,70 +0,0 @@ -package io.kafbat.ui.service.metrics; - -import static org.apache.commons.lang3.StringUtils.containsIgnoreCase; -import static org.apache.commons.lang3.StringUtils.endsWithIgnoreCase; - -import io.kafbat.ui.model.Metrics; -import java.math.BigDecimal; -import java.util.HashMap; -import java.util.Map; -import org.apache.kafka.common.Node; - -class WellKnownMetrics { - - private static final String BROKER_TOPIC_METRICS = "BrokerTopicMetrics"; - private static final String FIFTEEN_MINUTE_RATE = "FifteenMinuteRate"; - - // per broker - final Map brokerBytesInFifteenMinuteRate = new HashMap<>(); - final Map brokerBytesOutFifteenMinuteRate = new HashMap<>(); - - // per topic - final Map bytesInFifteenMinuteRate = new HashMap<>(); - final Map bytesOutFifteenMinuteRate = new HashMap<>(); - - void populate(Node node, RawMetric rawMetric) { - updateBrokerIOrates(node, rawMetric); - updateTopicsIOrates(rawMetric); - } - - void apply(Metrics.MetricsBuilder metricsBuilder) { - metricsBuilder.topicBytesInPerSec(bytesInFifteenMinuteRate); - metricsBuilder.topicBytesOutPerSec(bytesOutFifteenMinuteRate); - metricsBuilder.brokerBytesInPerSec(brokerBytesInFifteenMinuteRate); - metricsBuilder.brokerBytesOutPerSec(brokerBytesOutFifteenMinuteRate); - } - - private void updateBrokerIOrates(Node node, RawMetric rawMetric) { - String name = rawMetric.name(); - if (!brokerBytesInFifteenMinuteRate.containsKey(node.id()) - && rawMetric.labels().size() == 1 - && "BytesInPerSec".equalsIgnoreCase(rawMetric.labels().get("name")) - && containsIgnoreCase(name, BROKER_TOPIC_METRICS) - && endsWithIgnoreCase(name, FIFTEEN_MINUTE_RATE)) { - brokerBytesInFifteenMinuteRate.put(node.id(), rawMetric.value()); - } - if (!brokerBytesOutFifteenMinuteRate.containsKey(node.id()) - && rawMetric.labels().size() == 1 - && "BytesOutPerSec".equalsIgnoreCase(rawMetric.labels().get("name")) - && containsIgnoreCase(name, BROKER_TOPIC_METRICS) - && endsWithIgnoreCase(name, FIFTEEN_MINUTE_RATE)) { - brokerBytesOutFifteenMinuteRate.put(node.id(), rawMetric.value()); - } - } - - private void updateTopicsIOrates(RawMetric rawMetric) { - String name = rawMetric.name(); - String topic = rawMetric.labels().get("topic"); - if (topic != null - && containsIgnoreCase(name, BROKER_TOPIC_METRICS) - && endsWithIgnoreCase(name, FIFTEEN_MINUTE_RATE)) { - String nameProperty = rawMetric.labels().get("name"); - if ("BytesInPerSec".equalsIgnoreCase(nameProperty)) { - bytesInFifteenMinuteRate.compute(topic, (k, v) -> v == null ? rawMetric.value() : v.add(rawMetric.value())); - } else if ("BytesOutPerSec".equalsIgnoreCase(nameProperty)) { - bytesOutFifteenMinuteRate.compute(topic, (k, v) -> v == null ? rawMetric.value() : v.add(rawMetric.value())); - } - } - } - -} diff --git a/api/src/main/java/io/kafbat/ui/service/metrics/prometheus/PrometheusMetricsExposer.java b/api/src/main/java/io/kafbat/ui/service/metrics/prometheus/PrometheusMetricsExposer.java new file mode 100644 index 000000000..a90e65247 --- /dev/null +++ b/api/src/main/java/io/kafbat/ui/service/metrics/prometheus/PrometheusMetricsExposer.java @@ -0,0 +1,80 @@ +package io.kafbat.ui.service.metrics.prometheus; + + +import static io.kafbat.ui.util.MetricsUtils.appendLabel; +import static org.springframework.http.HttpHeaders.CONTENT_TYPE; + +import com.google.common.annotations.VisibleForTesting; +import io.kafbat.ui.model.Metrics; +import io.kafbat.ui.util.MetricsUtils; +import io.prometheus.metrics.expositionformats.OpenMetricsTextFormatWriter; +import io.prometheus.metrics.expositionformats.PrometheusTextFormatWriter; +import io.prometheus.metrics.model.snapshots.MetricSnapshot; +import io.prometheus.metrics.model.snapshots.MetricSnapshots; +import java.io.ByteArrayOutputStream; +import java.nio.charset.StandardCharsets; +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import lombok.SneakyThrows; +import org.springframework.http.HttpHeaders; +import org.springframework.http.ResponseEntity; + +public final class PrometheusMetricsExposer { + + private static final String CLUSTER_EXPOSE_LBL_NAME = "cluster"; + private static final String BROKER_EXPOSE_LBL_NAME = "broker_id"; + + private static final HttpHeaders PROMETHEUS_EXPOSE_ENDPOINT_HEADERS; + + static { + PROMETHEUS_EXPOSE_ENDPOINT_HEADERS = new HttpHeaders(); + PROMETHEUS_EXPOSE_ENDPOINT_HEADERS.set(CONTENT_TYPE, OpenMetricsTextFormatWriter.CONTENT_TYPE); + } + + private PrometheusMetricsExposer() { + } + + public static ResponseEntity exposeAllMetrics(Map clustersMetrics) { + return constructHttpsResponse(getMetricsForGlobalExpose(clustersMetrics)); + } + + private static MetricSnapshots getMetricsForGlobalExpose(Map clustersMetrics) { + return new MetricSnapshots(clustersMetrics.entrySet() + .stream() + .flatMap(e -> prepareMetricsForGlobalExpose(e.getKey(), e.getValue())) + // merging MFS with same name with LinkedHashMap(for order keeping) + .collect(Collectors.toMap(mfs -> mfs.getMetadata().getName(), mfs -> mfs, + MetricsUtils::concatDataPoints, LinkedHashMap::new)) + .values()); + } + + public static Stream prepareMetricsForGlobalExpose(String clusterName, Metrics metrics) { + return Stream.concat( + metrics.getInferredMetrics().asStream(), + extractBrokerMetricsWithLabel(metrics) + ) + .map(mfs -> appendLabel(mfs, CLUSTER_EXPOSE_LBL_NAME, clusterName)); + } + + private static Stream extractBrokerMetricsWithLabel(Metrics metrics) { + return metrics.getPerBrokerScrapedMetrics().entrySet().stream() + .flatMap(e -> { + String brokerId = String.valueOf(e.getKey()); + return e.getValue().stream().map(mfs -> appendLabel(mfs, BROKER_EXPOSE_LBL_NAME, brokerId)); + }); + } + + @VisibleForTesting + @SneakyThrows + public static ResponseEntity constructHttpsResponse(MetricSnapshots metrics) { + ByteArrayOutputStream buffer = new ByteArrayOutputStream(); + PrometheusTextFormatWriter writer = new PrometheusTextFormatWriter(false); + writer.write(buffer, metrics); + return ResponseEntity + .ok() + .headers(PROMETHEUS_EXPOSE_ENDPOINT_HEADERS) + .body(buffer.toString(StandardCharsets.UTF_8)); + } +} diff --git a/api/src/main/java/io/kafbat/ui/service/metrics/scrape/BrokerMetricsScraper.java b/api/src/main/java/io/kafbat/ui/service/metrics/scrape/BrokerMetricsScraper.java new file mode 100644 index 000000000..aa28e22a2 --- /dev/null +++ b/api/src/main/java/io/kafbat/ui/service/metrics/scrape/BrokerMetricsScraper.java @@ -0,0 +1,11 @@ +package io.kafbat.ui.service.metrics.scrape; + +import java.util.Collection; +import org.apache.kafka.common.Node; +import reactor.core.publisher.Mono; + +public interface BrokerMetricsScraper { + + Mono scrape(Collection clusterNodes); + +} diff --git a/api/src/main/java/io/kafbat/ui/service/metrics/scrape/IoRatesMetricsScanner.java b/api/src/main/java/io/kafbat/ui/service/metrics/scrape/IoRatesMetricsScanner.java new file mode 100644 index 000000000..cc912f978 --- /dev/null +++ b/api/src/main/java/io/kafbat/ui/service/metrics/scrape/IoRatesMetricsScanner.java @@ -0,0 +1,94 @@ +package io.kafbat.ui.service.metrics.scrape; + +import static org.apache.commons.lang3.Strings.CI; +import static org.apache.commons.lang3.Strings.CS; + +import io.kafbat.ui.model.Metrics; +import io.prometheus.metrics.model.snapshots.GaugeSnapshot; +import io.prometheus.metrics.model.snapshots.Labels; +import io.prometheus.metrics.model.snapshots.MetricSnapshot; +import io.prometheus.metrics.model.snapshots.UnknownSnapshot; +import java.math.BigDecimal; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +// Scans external jmx/prometheus metric and tries to infer io rates +class IoRatesMetricsScanner { + + public static final String BROKER_TOPIC_METRICS_SUFFIX = "BrokerTopicMetrics"; + public static final String FIFTEEN_MINUTE_RATE_SUFFIX = "FifteenMinuteRate"; + // per broker + final Map brokerBytesInFifteenMinuteRate = new HashMap<>(); + final Map brokerBytesOutFifteenMinuteRate = new HashMap<>(); + + // per topic + final Map bytesInFifteenMinuteRate = new HashMap<>(); + final Map bytesOutFifteenMinuteRate = new HashMap<>(); + + IoRatesMetricsScanner(Map> perBrokerMetrics) { + for (Map.Entry> broker : perBrokerMetrics.entrySet()) { + Integer nodeId = broker.getKey(); + List metrics = broker.getValue(); + for (MetricSnapshot metric : metrics) { + String name = metric.getMetadata().getName(); + if (metric instanceof GaugeSnapshot gauge) { + gauge.getDataPoints().forEach(dp -> { + updateBrokerIOrates(nodeId, name, dp.getLabels(), dp.getValue()); + updateTopicsIOrates(name, dp.getLabels(), dp.getValue()); + }); + } else if (metric instanceof UnknownSnapshot unknown) { + unknown.getDataPoints().forEach(dp -> { + updateBrokerIOrates(nodeId, name, dp.getLabels(), dp.getValue()); + updateTopicsIOrates(name, dp.getLabels(), dp.getValue()); + }); + } + } + } + } + + Metrics.IoRates get() { + return Metrics.IoRates.builder() + .topicBytesInPerSec(bytesInFifteenMinuteRate) + .topicBytesOutPerSec(bytesOutFifteenMinuteRate) + .brokerBytesInPerSec(brokerBytesInFifteenMinuteRate) + .brokerBytesOutPerSec(brokerBytesOutFifteenMinuteRate) + .build(); + } + + private void updateBrokerIOrates(int nodeId, String name, Labels labels, double value) { + if (!brokerBytesInFifteenMinuteRate.containsKey(nodeId) + && labels.size() == 1 + && "BytesInPerSec".equalsIgnoreCase(labels.getValue(0)) + && CI.contains(name, BROKER_TOPIC_METRICS_SUFFIX) + && CI.endsWith(name, FIFTEEN_MINUTE_RATE_SUFFIX)) { + brokerBytesInFifteenMinuteRate.put(nodeId, BigDecimal.valueOf(value)); + } + if (!brokerBytesOutFifteenMinuteRate.containsKey(nodeId) + && labels.size() == 1 + && "BytesOutPerSec".equalsIgnoreCase(labels.getValue(0)) + && CI.contains(name, BROKER_TOPIC_METRICS_SUFFIX) + && CI.endsWith(name, FIFTEEN_MINUTE_RATE_SUFFIX)) { + brokerBytesOutFifteenMinuteRate.put(nodeId, BigDecimal.valueOf(value)); + } + } + + private void updateTopicsIOrates(String name, Labels labels, double value) { + if (labels.contains("topic") + && CI.contains(name, BROKER_TOPIC_METRICS_SUFFIX) + && CI.endsWith(name, FIFTEEN_MINUTE_RATE_SUFFIX)) { + String topic = labels.get("topic"); + if (labels.contains("name")) { + var nameLblVal = labels.get("name"); + if ("BytesInPerSec".equalsIgnoreCase(nameLblVal)) { + BigDecimal val = BigDecimal.valueOf(value); + bytesInFifteenMinuteRate.merge(topic, val, BigDecimal::add); + } else if ("BytesOutPerSec".equalsIgnoreCase(nameLblVal)) { + BigDecimal val = BigDecimal.valueOf(value); + bytesOutFifteenMinuteRate.merge(topic, val, BigDecimal::add); + } + } + } + } + +} diff --git a/api/src/main/java/io/kafbat/ui/service/metrics/scrape/MetricsScraper.java b/api/src/main/java/io/kafbat/ui/service/metrics/scrape/MetricsScraper.java new file mode 100644 index 000000000..8c5ff3bba --- /dev/null +++ b/api/src/main/java/io/kafbat/ui/service/metrics/scrape/MetricsScraper.java @@ -0,0 +1,91 @@ +package io.kafbat.ui.service.metrics.scrape; + +import static io.kafbat.ui.config.ClustersProperties.Cluster; +import static io.kafbat.ui.model.MetricsScrapeProperties.JMX_METRICS_TYPE; +import static io.kafbat.ui.model.MetricsScrapeProperties.PROMETHEUS_METRICS_TYPE; + +import io.kafbat.ui.config.ClustersProperties.MetricsConfig; +import io.kafbat.ui.model.Metrics; +import io.kafbat.ui.model.MetricsScrapeProperties; +import io.kafbat.ui.service.metrics.prometheus.PrometheusMetricsExposer; +import io.kafbat.ui.service.metrics.scrape.inferred.InferredMetrics; +import io.kafbat.ui.service.metrics.scrape.inferred.InferredMetricsScraper; +import io.kafbat.ui.service.metrics.scrape.jmx.JmxMetricsRetriever; +import io.kafbat.ui.service.metrics.scrape.jmx.JmxMetricsScraper; +import io.kafbat.ui.service.metrics.scrape.prometheus.PrometheusScraper; +import io.kafbat.ui.service.metrics.sink.MetricsSink; +import io.prometheus.metrics.model.snapshots.MetricSnapshot; +import jakarta.annotation.Nullable; +import java.util.Collection; +import lombok.AccessLevel; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.apache.kafka.common.Node; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +@Slf4j +@RequiredArgsConstructor(access = AccessLevel.PRIVATE) +public class MetricsScraper { + + private final String clusterName; + private final MetricsSink sink; + private final InferredMetricsScraper inferredMetricsScraper; + @Nullable + private final BrokerMetricsScraper brokerMetricsScraper; + + public static MetricsScraper create(Cluster cluster, + JmxMetricsRetriever jmxMetricsRetriever) { + BrokerMetricsScraper scraper = null; + MetricsConfig metricsConfig = cluster.getMetrics(); + if (cluster.getMetrics() != null) { + var scrapeProperties = MetricsScrapeProperties.create(cluster); + if (metricsConfig.getType().equalsIgnoreCase(JMX_METRICS_TYPE) && metricsConfig.getPort() != null) { + scraper = new JmxMetricsScraper(scrapeProperties, jmxMetricsRetriever); + } else if (metricsConfig.getType().equalsIgnoreCase(PROMETHEUS_METRICS_TYPE)) { + scraper = new PrometheusScraper(scrapeProperties); + } + } + return new MetricsScraper( + cluster.getName(), + MetricsSink.create(cluster), + new InferredMetricsScraper(), + scraper + ); + } + + public Mono scrape(ScrapedClusterState clusterState, Collection nodes) { + Mono inferred = inferredMetricsScraper.scrape(clusterState); + Mono brokerMetrics = scrapeBrokers(nodes); + return inferred.zipWith( + brokerMetrics, + (inf, ext) -> + Metrics.builder() + .inferredMetrics(inf) + .ioRates(ext.ioRates()) + .perBrokerScrapedMetrics(ext.perBrokerMetrics()) + .build() + ).doOnNext(this::sendMetricsToSink); + } + + private void sendMetricsToSink(Metrics metrics) { + sink.send(prepareMetricsForSending(metrics)) + .doOnError(th -> log.warn("Error sending metrics to metrics sink", th)) + .subscribe(); + } + + private Flux prepareMetricsForSending(Metrics metrics) { + //need to be "cold" because sinks can resubscribe multiple times + return Flux.defer(() -> + Flux.fromStream( + PrometheusMetricsExposer.prepareMetricsForGlobalExpose(clusterName, metrics))); + } + + private Mono scrapeBrokers(Collection nodes) { + if (brokerMetricsScraper != null) { + return brokerMetricsScraper.scrape(nodes); + } + return Mono.just(PerBrokerScrapedMetrics.empty()); + } + +} diff --git a/api/src/main/java/io/kafbat/ui/service/metrics/scrape/PerBrokerScrapedMetrics.java b/api/src/main/java/io/kafbat/ui/service/metrics/scrape/PerBrokerScrapedMetrics.java new file mode 100644 index 000000000..7bd733a65 --- /dev/null +++ b/api/src/main/java/io/kafbat/ui/service/metrics/scrape/PerBrokerScrapedMetrics.java @@ -0,0 +1,18 @@ +package io.kafbat.ui.service.metrics.scrape; + +import io.kafbat.ui.model.Metrics; +import io.prometheus.metrics.model.snapshots.MetricSnapshot; +import java.util.List; +import java.util.Map; + +public record PerBrokerScrapedMetrics(Map> perBrokerMetrics) { + + static PerBrokerScrapedMetrics empty() { + return new PerBrokerScrapedMetrics(Map.of()); + } + + Metrics.IoRates ioRates() { + return new IoRatesMetricsScanner(perBrokerMetrics).get(); + } + +} diff --git a/api/src/main/java/io/kafbat/ui/service/metrics/scrape/ScrapedClusterState.java b/api/src/main/java/io/kafbat/ui/service/metrics/scrape/ScrapedClusterState.java new file mode 100644 index 000000000..e5d8c059c --- /dev/null +++ b/api/src/main/java/io/kafbat/ui/service/metrics/scrape/ScrapedClusterState.java @@ -0,0 +1,199 @@ +package io.kafbat.ui.service.metrics.scrape; + +import static io.kafbat.ui.model.InternalLogDirStats.LogDirSpaceStats; +import static io.kafbat.ui.model.InternalLogDirStats.SegmentStats; +import static io.kafbat.ui.service.ReactiveAdminClient.ClusterDescription; + +import com.google.common.collect.Table; +import io.kafbat.ui.model.InternalLogDirStats; +import io.kafbat.ui.model.InternalPartitionsOffsets; +import io.kafbat.ui.service.ReactiveAdminClient; +import jakarta.annotation.Nullable; +import java.time.Instant; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.function.Function; +import java.util.stream.Collectors; +import lombok.Builder; +import lombok.RequiredArgsConstructor; +import lombok.Value; +import org.apache.kafka.clients.admin.ConfigEntry; +import org.apache.kafka.clients.admin.ConsumerGroupDescription; +import org.apache.kafka.clients.admin.ConsumerGroupListing; +import org.apache.kafka.clients.admin.OffsetSpec; +import org.apache.kafka.clients.admin.TopicDescription; +import org.apache.kafka.common.Node; +import org.apache.kafka.common.TopicPartition; +import reactor.core.publisher.Mono; + +@Builder(toBuilder = true) +@RequiredArgsConstructor +@Value +public class ScrapedClusterState { + + Instant scrapeFinishedAt; + Map nodesStates; + Map topicStates; + Map consumerGroupsStates; + + public record NodeState(int id, + Node node, + @Nullable SegmentStats segmentStats, + @Nullable LogDirSpaceStats logDirSpaceStats) { + } + + public record TopicState( + String name, + TopicDescription description, + List configs, + Map startOffsets, + Map endOffsets, + @Nullable SegmentStats segmentStats, + @Nullable Map partitionsSegmentStats) { + } + + public record ConsumerGroupState( + String group, + ConsumerGroupDescription description, + Map committedOffsets) { + } + + public static ScrapedClusterState empty() { + return ScrapedClusterState.builder() + .scrapeFinishedAt(Instant.now()) + .nodesStates(Map.of()) + .topicStates(Map.of()) + .consumerGroupsStates(Map.of()) + .build(); + } + + public ScrapedClusterState updateTopics(Map descriptions, + Map> configs, + InternalPartitionsOffsets partitionsOffsets) { + var updatedTopicStates = new HashMap<>(topicStates); + descriptions.forEach((topic, description) -> { + SegmentStats segmentStats = null; + Map partitionsSegmentStats = null; + if (topicStates.containsKey(topic)) { + segmentStats = topicStates.get(topic).segmentStats(); + partitionsSegmentStats = topicStates.get(topic).partitionsSegmentStats(); + } + updatedTopicStates.put( + topic, + new TopicState( + topic, + description, + configs.getOrDefault(topic, List.of()), + partitionsOffsets.topicOffsets(topic, true), + partitionsOffsets.topicOffsets(topic, false), + segmentStats, + partitionsSegmentStats + ) + ); + }); + return toBuilder() + .topicStates(updatedTopicStates) + .build(); + } + + public ScrapedClusterState topicDeleted(String topic) { + var newTopicStates = new HashMap<>(topicStates); + newTopicStates.remove(topic); + return toBuilder() + .topicStates(newTopicStates) + .build(); + } + + public static Mono scrape(ClusterDescription clusterDescription, + ReactiveAdminClient ac) { + return Mono.zip( + ac.describeLogDirs(clusterDescription.getNodes().stream().map(Node::id).toList()) + .map(InternalLogDirStats::new), + ac.listConsumerGroups().map(l -> l.stream().map(ConsumerGroupListing::groupId).toList()), + ac.describeTopics(), + ac.getTopicsConfig() + ).flatMap(phase1 -> + Mono.zip( + ac.listOffsets(phase1.getT3().values(), OffsetSpec.latest()), + ac.listOffsets(phase1.getT3().values(), OffsetSpec.earliest()), + ac.describeConsumerGroups(phase1.getT2()), + ac.listConsumerGroupOffsets(phase1.getT2(), null) + ).map(phase2 -> + create( + clusterDescription, + phase1.getT1(), + topicStateMap(phase1.getT1(), phase1.getT3(), phase1.getT4(), phase2.getT1(), phase2.getT2()), + phase2.getT3(), + phase2.getT4() + ))); + } + + private static Map topicStateMap( + InternalLogDirStats segmentStats, + Map topicDescriptions, + Map> topicConfigs, + Map latestOffsets, + Map earliestOffsets) { + + return topicDescriptions.entrySet().stream().map(entry -> new TopicState( + entry.getKey(), + entry.getValue(), + topicConfigs.getOrDefault(entry.getKey(), List.of()), + filterTopic(entry.getKey(), earliestOffsets), + filterTopic(entry.getKey(), latestOffsets), + segmentStats.getTopicStats().get(entry.getKey()), + Optional.ofNullable(segmentStats.getPartitionsStats()) + .map(topicForFilter -> filterTopic(entry.getKey(), topicForFilter)) + .orElse(null) + )).collect(Collectors.toMap( + TopicState::name, + Function.identity() + )); + } + + private static ScrapedClusterState create(ClusterDescription clusterDescription, + InternalLogDirStats segmentStats, + Map topicStates, + Map consumerDescriptions, + Table consumerOffsets) { + + Map consumerGroupsStates = new HashMap<>(); + consumerDescriptions.forEach((name, desc) -> + consumerGroupsStates.put( + name, + new ConsumerGroupState( + name, + desc, + consumerOffsets.row(name) + ))); + + Map nodesStates = new HashMap<>(); + clusterDescription.getNodes().forEach(node -> + nodesStates.put( + node.id(), + new NodeState( + node.id(), + node, + segmentStats.getBrokerStats().get(node.id()), + segmentStats.getBrokerDirsStats().get(node.id()) + ))); + + return new ScrapedClusterState( + Instant.now(), + nodesStates, + topicStates, + consumerGroupsStates + ); + } + + private static Map filterTopic(String topicForFilter, Map tpMap) { + return tpMap.entrySet() + .stream() + .filter(tp -> tp.getKey().topic().equals(topicForFilter)) + .collect(Collectors.toMap(e -> e.getKey().partition(), Map.Entry::getValue)); + } + + +} diff --git a/api/src/main/java/io/kafbat/ui/service/metrics/scrape/inferred/InferredMetrics.java b/api/src/main/java/io/kafbat/ui/service/metrics/scrape/inferred/InferredMetrics.java new file mode 100644 index 000000000..6d68ff1b4 --- /dev/null +++ b/api/src/main/java/io/kafbat/ui/service/metrics/scrape/inferred/InferredMetrics.java @@ -0,0 +1,24 @@ +package io.kafbat.ui.service.metrics.scrape.inferred; + +import io.prometheus.metrics.model.snapshots.MetricSnapshot; +import java.util.List; +import java.util.stream.Stream; + +//metrics that we inferred from cluster state (always present for any setup) +public class InferredMetrics { + + private final List metrics; + + public static InferredMetrics empty() { + return new InferredMetrics(List.of()); + } + + public InferredMetrics(List metrics) { + this.metrics = metrics; + } + + public Stream asStream() { + return metrics.stream(); + } + +} diff --git a/api/src/main/java/io/kafbat/ui/service/metrics/scrape/inferred/InferredMetricsScraper.java b/api/src/main/java/io/kafbat/ui/service/metrics/scrape/inferred/InferredMetricsScraper.java new file mode 100644 index 000000000..0bdba0a6a --- /dev/null +++ b/api/src/main/java/io/kafbat/ui/service/metrics/scrape/inferred/InferredMetricsScraper.java @@ -0,0 +1,235 @@ +package io.kafbat.ui.service.metrics.scrape.inferred; + +import com.google.common.annotations.VisibleForTesting; +import io.kafbat.ui.service.metrics.scrape.ScrapedClusterState; +import io.prometheus.metrics.core.metrics.Gauge; +import io.prometheus.metrics.core.metrics.Metric; +import io.prometheus.metrics.model.snapshots.GaugeSnapshot; +import io.prometheus.metrics.model.snapshots.Labels; +import io.prometheus.metrics.model.snapshots.MetricSnapshot; +import io.prometheus.metrics.model.snapshots.PrometheusNaming; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import javax.annotation.Nullable; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.apache.kafka.clients.admin.MemberDescription; +import org.apache.kafka.common.Node; +import reactor.core.publisher.Mono; + +@Slf4j +@RequiredArgsConstructor +public class InferredMetricsScraper { + + public static final String NODE_ID_TAG = "node_id"; + public static final String TOPIC_TAG = "topic"; + public static final String GROUP_TAG = "group"; + public static final String PARTITION_TAG = "partition"; + private ScrapedClusterState prevState = null; + + public synchronized Mono scrape(ScrapedClusterState newState) { + var inferred = infer(prevState, newState); + this.prevState = newState; + return Mono.just(inferred); + } + + @VisibleForTesting + static InferredMetrics infer(@Nullable ScrapedClusterState prevState, ScrapedClusterState newState) { + var registry = new MetricsRegistry(); + fillNodesMetrics(registry, newState); + fillTopicMetrics(registry, newState); + fillConsumerGroupsMetrics(registry, newState); + List metrics = registry.metrics.values().stream().map(Metric::collect).toList(); + log.debug("{} metric families inferred from cluster state", metrics.size()); + return new InferredMetrics(metrics); + } + + private static class MetricsRegistry { + + final Map metrics = new LinkedHashMap<>(); + + void gauge(String name, + String help, + List lbls, + List lblVals, + Number value) { + + + Gauge gauge = (Gauge) metrics.computeIfAbsent(name, (n) -> + Gauge.builder() + .name(PrometheusNaming.sanitizeMetricName(name)) + .labelNames(lbls.toArray(new String[0])) + .help(help).build() + ); + + gauge.labelValues(lblVals.toArray(new String[0])) + .set(value.doubleValue()); + } + } + + private static void fillNodesMetrics(MetricsRegistry registry, ScrapedClusterState newState) { + registry.gauge( + "broker_count", + "Number of brokers in the Kafka cluster", + List.of(), + List.of(), + newState.getNodesStates().size() + ); + + newState.getNodesStates().forEach((nodeId, state) -> { + if (state.segmentStats() != null) { + registry.gauge( + "broker_bytes_disk", + "Written disk size in bytes of a broker", + List.of(NODE_ID_TAG), + List.of(nodeId.toString()), + state.segmentStats().getSegmentSize() + ); + } + if (state.logDirSpaceStats() != null) { + if (state.logDirSpaceStats().usableBytes() != null) { + registry.gauge( + "broker_bytes_usable", + "Usable disk size in bytes of a broker", + List.of(NODE_ID_TAG), + List.of(nodeId.toString()), + state.logDirSpaceStats().usableBytes() + ); + } + if (state.logDirSpaceStats().totalBytes() != null) { + registry.gauge( + "broker_bytes_total", + "Total disk size in bytes of a broker", + List.of(NODE_ID_TAG), + List.of(nodeId.toString()), + state.logDirSpaceStats().totalBytes() + ); + } + } + }); + } + + private static void fillTopicMetrics(MetricsRegistry registry, ScrapedClusterState clusterState) { + registry.gauge( + "topic_count", + "Number of topics in the Kafka cluster", + List.of(), + List.of(), + clusterState.getTopicStates().size() + ); + + clusterState.getTopicStates().forEach((topicName, state) -> { + registry.gauge( + "kafka_topic_partitions", + "Number of partitions for this Topic", + List.of(TOPIC_TAG), + List.of(topicName), + state.description().partitions().size() + ); + state.endOffsets().forEach((partition, endOffset) -> registry.gauge( + "kafka_topic_partition_next_offset", + "Current (next) Offset of a Broker at Topic/Partition", + List.of(TOPIC_TAG, PARTITION_TAG), + List.of(topicName, String.valueOf(partition)), + endOffset + )); + state.startOffsets().forEach((partition, startOffset) -> registry.gauge( + "kafka_topic_partition_oldest_offset", + "Oldest Offset of a Broker at Topic/Partition", + List.of(TOPIC_TAG, PARTITION_TAG), + List.of(topicName, String.valueOf(partition)), + startOffset + )); + state.description().partitions().forEach(p -> { + registry.gauge( + "kafka_topic_partition_in_sync_replica", + "Number of In-Sync Replicas for this Topic/Partition", + List.of(TOPIC_TAG, PARTITION_TAG), + List.of(topicName, String.valueOf(p.partition())), + p.isr().size() + ); + registry.gauge( + "kafka_topic_partition_replicas", + "Number of Replicas for this Topic/Partition", + List.of(TOPIC_TAG, PARTITION_TAG), + List.of(topicName, String.valueOf(p.partition())), + p.replicas().size() + ); + registry.gauge( + "kafka_topic_partition_leader", + "Leader Broker ID of this Topic/Partition (-1, if no leader)", + List.of(TOPIC_TAG, PARTITION_TAG), + List.of(topicName, String.valueOf(p.partition())), + Optional.ofNullable(p.leader()).map(Node::id).orElse(-1) + ); + }); + if (state.segmentStats() != null) { + registry.gauge( + "topic_bytes_disk", + "Disk size in bytes of a topic", + List.of(TOPIC_TAG), + List.of(topicName), + state.segmentStats().getSegmentSize() + ); + } + }); + } + + private static void fillConsumerGroupsMetrics(MetricsRegistry registry, ScrapedClusterState clusterState) { + registry.gauge( + "group_count", + "Number of consumer groups in the Kafka cluster", + List.of(), + List.of(), + clusterState.getConsumerGroupsStates().size() + ); + + clusterState.getConsumerGroupsStates().forEach((groupName, state) -> { + registry.gauge( + "group_state", + "State of the consumer group, value = ordinal of org.apache.kafka.common.ConsumerGroupState", + List.of(GROUP_TAG), + List.of(groupName), + state.description().state().ordinal() + ); + registry.gauge( + "group_member_count", + "Number of member assignments in the consumer group.", + List.of(GROUP_TAG), + List.of(groupName), + state.description().members().size() + ); + registry.gauge( + "group_host_count", + "Number of distinct hosts in the consumer group.", + List.of(GROUP_TAG), + List.of(groupName), + state.description().members().stream().map(MemberDescription::host).distinct().count() + ); + + state.committedOffsets().forEach((tp, committedOffset) -> { + registry.gauge( + "kafka_consumergroup_current_offset", + "Current Offset of a ConsumerGroup at Topic/Partition", + List.of("consumergroup", TOPIC_TAG, PARTITION_TAG), + List.of(groupName, tp.topic(), String.valueOf(tp.partition())), + committedOffset + ); + + Optional.ofNullable(clusterState.getTopicStates().get(tp.topic())) + .flatMap(s -> Optional.ofNullable(s.endOffsets().get(tp.partition()))) + .ifPresent(endOffset -> + registry.gauge( + "kafka_consumergroup_lag", + "Current Approximate Lag of a ConsumerGroup at Topic/Partition", + List.of("consumergroup", TOPIC_TAG, PARTITION_TAG), + List.of(groupName, tp.topic(), String.valueOf(tp.partition())), + endOffset - committedOffset + )); + + }); + }); + } +} diff --git a/api/src/main/java/io/kafbat/ui/service/metrics/JmxMetricsFormatter.java b/api/src/main/java/io/kafbat/ui/service/metrics/scrape/jmx/JmxMetricsFormatter.java similarity index 73% rename from api/src/main/java/io/kafbat/ui/service/metrics/JmxMetricsFormatter.java rename to api/src/main/java/io/kafbat/ui/service/metrics/scrape/jmx/JmxMetricsFormatter.java index 37323c7dd..c445434cb 100644 --- a/api/src/main/java/io/kafbat/ui/service/metrics/JmxMetricsFormatter.java +++ b/api/src/main/java/io/kafbat/ui/service/metrics/scrape/jmx/JmxMetricsFormatter.java @@ -1,5 +1,6 @@ -package io.kafbat.ui.service.metrics; +package io.kafbat.ui.service.metrics.scrape.jmx; +import io.kafbat.ui.service.metrics.RawMetric; import java.math.BigDecimal; import java.util.ArrayList; import java.util.LinkedHashMap; @@ -13,15 +14,19 @@ /** * Converts JMX metrics into JmxExporter prometheus format: format. */ -class JmxMetricsFormatter { +public class JmxMetricsFormatter { // copied from https://github.com/prometheus/jmx_exporter/blob/b6b811b4aae994e812e902b26dd41f29364c0e2b/collector/src/main/java/io/prometheus/jmx/JmxMBeanPropertyCache.java#L15 - private static final Pattern PROPERTY_PATTERN = Pattern.compile( - "([^,=:\\*\\?]+)=(\"(?:[^\\\\\"]*(?:\\\\.)?)*\"|[^,=:\"]*)"); + private static final Pattern PROPERTY_PATTERN = Pattern.compile(// NOSONAR + "([^,=:\\*\\?]+)=(\"(?:[^\\\\\"]*(?:\\\\.)?)*\"|[^,=:\"]*)" // NOSONAR + ); - static List constructMetricsList(ObjectName jmxMetric, - MBeanAttributeInfo[] attributes, - Object[] attrValues) { + private JmxMetricsFormatter() { + } + + public static List constructMetricsList(ObjectName jmxMetric, + MBeanAttributeInfo[] attributes, + Object[] attrValues) { String domain = fixIllegalChars(jmxMetric.getDomain()); LinkedHashMap labels = getLabelsMap(jmxMetric); String firstLabel = labels.keySet().iterator().next(); @@ -68,17 +73,21 @@ private static Optional convertNumericValue(Object value) { private static LinkedHashMap getLabelsMap(ObjectName mbeanName) { LinkedHashMap keyProperties = new LinkedHashMap<>(); String properties = mbeanName.getKeyPropertyListString(); - Matcher match = PROPERTY_PATTERN.matcher(properties); - while (match.lookingAt()) { - String labelName = fixIllegalChars(match.group(1)); // label names should be fixed - String labelValue = match.group(2); - keyProperties.put(labelName, labelValue); - properties = properties.substring(match.end()); - if (properties.startsWith(",")) { - properties = properties.substring(1); + + if (!properties.isBlank()) { + Matcher match = PROPERTY_PATTERN.matcher(properties); + while (match.lookingAt()) { + String labelName = fixIllegalChars(match.group(1)); // label names should be fixed + String labelValue = match.group(2); + keyProperties.put(labelName, labelValue); + properties = properties.substring(match.end()); + if (properties.startsWith(",")) { + properties = properties.substring(1); + } + match.reset(properties); } - match.reset(properties); } + return keyProperties; } diff --git a/api/src/main/java/io/kafbat/ui/service/metrics/scrape/jmx/JmxMetricsRetriever.java b/api/src/main/java/io/kafbat/ui/service/metrics/scrape/jmx/JmxMetricsRetriever.java new file mode 100644 index 000000000..e5984725d --- /dev/null +++ b/api/src/main/java/io/kafbat/ui/service/metrics/scrape/jmx/JmxMetricsRetriever.java @@ -0,0 +1,147 @@ +package io.kafbat.ui.service.metrics.scrape.jmx; + +import io.kafbat.ui.model.MetricsScrapeProperties; +import io.kafbat.ui.service.metrics.RawMetric; +import java.io.Closeable; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.Consumer; +import javax.management.MBeanAttributeInfo; +import javax.management.MBeanServerConnection; +import javax.management.ObjectName; +import javax.management.remote.JMXConnector; +import javax.management.remote.JMXConnectorFactory; +import javax.management.remote.JMXServiceURL; +import lombok.SneakyThrows; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.StringUtils; +import org.apache.kafka.common.Node; +import org.springframework.stereotype.Component; +import reactor.core.publisher.Mono; +import reactor.core.scheduler.Schedulers; + + +@Component //need to be a component, since it is closeable +@Slf4j +public class JmxMetricsRetriever implements Closeable { + + private static final boolean SSL_JMX_SUPPORTED; + + static { + // see JmxSslSocketFactory doc for details + SSL_JMX_SUPPORTED = JmxSslSocketFactory.initialized(); + } + + private static final String JMX_URL = "service:jmx:rmi:///jndi/rmi://"; + private static final String JMX_SERVICE_TYPE = "jmxrmi"; + private static final String CANONICAL_NAME_PATTERN = "kafka.server*:*"; + + @Override + public void close() { + JmxSslSocketFactory.clearFactoriesCache(); + } + + public Mono> retrieveFromNode(MetricsScrapeProperties scrapeProperties, Node node) { + if (isSslJmxEndpoint(scrapeProperties) && !SSL_JMX_SUPPORTED) { + log.warn("Cluster has jmx ssl configured, but it is not supported by the app"); + return Mono.just(List.of()); + } + return Mono.fromSupplier(() -> retrieveSync(scrapeProperties, node)) + .subscribeOn(Schedulers.boundedElastic()); + } + + private boolean isSslJmxEndpoint(MetricsScrapeProperties scrapeProperties) { + return scrapeProperties.getKeystoreConfig() != null + && scrapeProperties.getKeystoreConfig().getKeystoreLocation() != null; + } + + @SneakyThrows + private List retrieveSync(MetricsScrapeProperties scrapeProperties, Node node) { + String jmxUrl = JMX_URL + node.host() + ":" + scrapeProperties.getPort() + "/" + JMX_SERVICE_TYPE; + log.debug("Collecting JMX metrics for {}", jmxUrl); + List result = new ArrayList<>(); + withJmxConnector(jmxUrl, scrapeProperties, jmxConnector -> getMetricsFromJmx(jmxConnector, result)); + log.debug("{} metrics collected for {}", result.size(), jmxUrl); + return result; + } + + private void withJmxConnector(String jmxUrl, + MetricsScrapeProperties scrapeProperties, + Consumer consumer) { + var env = prepareJmxEnvAndSetThreadLocal(scrapeProperties); + JMXServiceURL serviceUrl; + try { + serviceUrl = new JMXServiceURL(jmxUrl); + } catch (java.net.MalformedURLException e) { + log.error("Malformed JMX URL: {}", jmxUrl, e); + return; + } + try (JMXConnector connector = JMXConnectorFactory.newJMXConnector(serviceUrl, env)) { + if (!tryConnect(connector, env, jmxUrl)) { + return; + } + consumer.accept(connector); + } catch (Exception connectorException) { + log.error("Error getting jmx metrics from {}", jmxUrl, connectorException); + } finally { + JmxSslSocketFactory.clearThreadLocalContext(); + } + } + + private boolean tryConnect(JMXConnector connector, Map env, String jmxUrl) { + try { + connector.connect(env); + return true; + } catch (Exception connectException) { + log.error("Error connecting to {}", jmxUrl, connectException); + return false; + } + } + + private Map prepareJmxEnvAndSetThreadLocal(MetricsScrapeProperties scrapeProperties) { + Map env = new HashMap<>(); + if (isSslJmxEndpoint(scrapeProperties)) { + var truststoreConfig = scrapeProperties.getTruststoreConfig(); + var keystoreConfig = scrapeProperties.getKeystoreConfig(); + JmxSslSocketFactory.setSslContextThreadLocal( + truststoreConfig != null ? truststoreConfig.getTruststoreLocation() : null, + truststoreConfig != null ? truststoreConfig.getTruststorePassword() : null, + keystoreConfig != null ? keystoreConfig.getKeystoreLocation() : null, + keystoreConfig != null ? keystoreConfig.getKeystorePassword() : null + ); + JmxSslSocketFactory.editJmxConnectorEnv(env); + } + + if (StringUtils.isNotEmpty(scrapeProperties.getUsername()) + && StringUtils.isNotEmpty(scrapeProperties.getPassword())) { + env.put( + JMXConnector.CREDENTIALS, + new String[] {scrapeProperties.getUsername(), scrapeProperties.getPassword()} + ); + } + return env; + } + + @SneakyThrows + private void getMetricsFromJmx(JMXConnector jmxConnector, List sink) { + MBeanServerConnection msc = jmxConnector.getMBeanServerConnection(); + var jmxMetrics = msc.queryNames(new ObjectName(CANONICAL_NAME_PATTERN), null); + for (ObjectName jmxMetric : jmxMetrics) { + sink.addAll(extractObjectMetrics(jmxMetric, msc)); + } + } + + @SneakyThrows + private List extractObjectMetrics(ObjectName objectName, MBeanServerConnection msc) { + MBeanAttributeInfo[] attrNames = msc.getMBeanInfo(objectName).getAttributes(); + Object[] attrValues = new Object[attrNames.length]; + for (int i = 0; i < attrNames.length; i++) { + attrValues[i] = msc.getAttribute(objectName, attrNames[i].getName()); + } + return JmxMetricsFormatter.constructMetricsList(objectName, attrNames, attrValues); + } + +} + diff --git a/api/src/main/java/io/kafbat/ui/service/metrics/scrape/jmx/JmxMetricsScraper.java b/api/src/main/java/io/kafbat/ui/service/metrics/scrape/jmx/JmxMetricsScraper.java new file mode 100644 index 000000000..e334325e7 --- /dev/null +++ b/api/src/main/java/io/kafbat/ui/service/metrics/scrape/jmx/JmxMetricsScraper.java @@ -0,0 +1,37 @@ +package io.kafbat.ui.service.metrics.scrape.jmx; + +import io.kafbat.ui.model.MetricsScrapeProperties; +import io.kafbat.ui.service.metrics.RawMetric; +import io.kafbat.ui.service.metrics.scrape.BrokerMetricsScraper; +import io.kafbat.ui.service.metrics.scrape.PerBrokerScrapedMetrics; +import io.prometheus.metrics.model.snapshots.MetricSnapshot; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import org.apache.kafka.common.Node; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; +import reactor.util.function.Tuples; + +public class JmxMetricsScraper implements BrokerMetricsScraper { + + private final JmxMetricsRetriever jmxMetricsRetriever; + private final MetricsScrapeProperties scrapeProperties; + + public JmxMetricsScraper(MetricsScrapeProperties scrapeProperties, + JmxMetricsRetriever jmxMetricsRetriever) { + this.scrapeProperties = scrapeProperties; + this.jmxMetricsRetriever = jmxMetricsRetriever; + } + + @Override + public Mono scrape(Collection nodes) { + Mono>> collected = Flux.fromIterable(nodes) + .flatMap(n -> jmxMetricsRetriever.retrieveFromNode(scrapeProperties, n).map(metrics -> Tuples.of(n, metrics))) + .collectMap( + t -> t.getT1().id(), + t -> RawMetric.groupIntoSnapshot(t.getT2()).toList() + ); + return collected.map(PerBrokerScrapedMetrics::new); + } +} diff --git a/api/src/main/java/io/kafbat/ui/service/metrics/JmxSslSocketFactory.java b/api/src/main/java/io/kafbat/ui/service/metrics/scrape/jmx/JmxSslSocketFactory.java similarity index 95% rename from api/src/main/java/io/kafbat/ui/service/metrics/JmxSslSocketFactory.java rename to api/src/main/java/io/kafbat/ui/service/metrics/scrape/jmx/JmxSslSocketFactory.java index 27ecf505b..bd20cb2e1 100644 --- a/api/src/main/java/io/kafbat/ui/service/metrics/JmxSslSocketFactory.java +++ b/api/src/main/java/io/kafbat/ui/service/metrics/scrape/jmx/JmxSslSocketFactory.java @@ -1,4 +1,4 @@ -package io.kafbat.ui.service.metrics; +package io.kafbat.ui.service.metrics.scrape.jmx; import com.google.common.base.Preconditions; import java.io.FileInputStream; @@ -61,9 +61,8 @@ class JmxSslSocketFactory extends javax.net.ssl.SSLSocketFactory { } catch (Exception e) { log.error("----------------------------------"); log.error("SSL can't be enabled for JMX retrieval. " - + "Make sure your java app run with '--add-opens java.rmi/javax.rmi.ssl=ALL-UNNAMED' arg. Err: {}", + + "Make sure your java app is running with '--add-opens java.rmi/javax.rmi.ssl=ALL-UNNAMED' arg. Err: {}", e.getMessage()); - log.trace("SSL can't be enabled for JMX retrieval", e); log.error("----------------------------------"); } SSL_JMX_SUPPORTED = sslJmxSupported; @@ -166,7 +165,7 @@ public Socket createSocket(String host, int port) throws IOException { return defaultSocketFactory.createSocket(host, port); } - /// FOLLOWING METHODS WON'T BE USED DURING JMX INTERACTION, IMPLEMENTING THEM JUST FOR CONSISTENCY ->>>>> + // THE FOLLOWING METHODS WON'T BE USED DURING JMX INTERACTION, IMPLEMENTING THEM JUST FOR CONSISTENCY ->>>>> @Override public Socket createSocket(Socket s, String host, int port, boolean autoClose) throws IOException { @@ -178,7 +177,7 @@ public Socket createSocket(Socket s, String host, int port, boolean autoClose) t @Override public Socket createSocket(String host, int port, InetAddress localHost, int localPort) - throws IOException, UnknownHostException { + throws IOException { if (threadLocalContextSet()) { return createFactoryFromThreadLocalCtx().createSocket(host, port, localHost, localPort); } diff --git a/api/src/main/java/io/kafbat/ui/service/metrics/scrape/prometheus/PrometheusMetricsRetriever.java b/api/src/main/java/io/kafbat/ui/service/metrics/scrape/prometheus/PrometheusMetricsRetriever.java new file mode 100644 index 000000000..6a53ad9b1 --- /dev/null +++ b/api/src/main/java/io/kafbat/ui/service/metrics/scrape/prometheus/PrometheusMetricsRetriever.java @@ -0,0 +1,56 @@ +package io.kafbat.ui.service.metrics.scrape.prometheus; + +import io.kafbat.ui.model.MetricsScrapeProperties; +import io.kafbat.ui.util.WebClientConfigurator; +import io.prometheus.metrics.model.snapshots.MetricSnapshot; +import java.util.List; +import java.util.Optional; +import lombok.extern.slf4j.Slf4j; +import org.springframework.util.unit.DataSize; +import org.springframework.web.reactive.function.client.WebClient; +import org.springframework.web.util.UriComponentsBuilder; +import reactor.core.publisher.Mono; + +@Slf4j +class PrometheusMetricsRetriever { + + private static final String METRICS_ENDPOINT_PATH = "/metrics"; + private static final int DEFAULT_EXPORTER_PORT = 11001; + + private final int port; + private final boolean sslEnabled; + private final WebClient webClient; + + PrometheusMetricsRetriever(MetricsScrapeProperties scrapeProperties) { + this.port = Optional.ofNullable(scrapeProperties.getPort()).orElse(DEFAULT_EXPORTER_PORT); + this.sslEnabled = scrapeProperties.isSsl() || scrapeProperties.getKeystoreConfig() != null; + this.webClient = new WebClientConfigurator() + .configureBufferSize(DataSize.ofMegabytes(20)) + .configureBasicAuth(scrapeProperties.getUsername(), scrapeProperties.getPassword()) + .configureSsl(scrapeProperties.getTruststoreConfig(), scrapeProperties.getKeystoreConfig()) + .build(); + } + + Mono> retrieve(String host) { + log.debug("Retrieving metrics from prometheus endpoint: {}:{}", host, port); + + var uri = UriComponentsBuilder.newInstance() + .scheme(sslEnabled ? "https" : "http") + .host(host) + .port(port) + .path(METRICS_ENDPOINT_PATH) + .build() + .toUri(); + + return webClient.get() + .uri(uri) + .retrieve() + .bodyToMono(String.class) + .doOnError(e -> log.error("Error while getting metrics from {}", host, e)) + .map(body -> new PrometheusTextFormatParser().parse(body)) + .onErrorResume(th -> { + log.warn("Error while getting prometheus metrics from {}", host, th); + return Mono.just(List.of()); + }); + } +} diff --git a/api/src/main/java/io/kafbat/ui/service/metrics/scrape/prometheus/PrometheusScraper.java b/api/src/main/java/io/kafbat/ui/service/metrics/scrape/prometheus/PrometheusScraper.java new file mode 100644 index 000000000..d2eae43f7 --- /dev/null +++ b/api/src/main/java/io/kafbat/ui/service/metrics/scrape/prometheus/PrometheusScraper.java @@ -0,0 +1,31 @@ +package io.kafbat.ui.service.metrics.scrape.prometheus; + +import io.kafbat.ui.model.MetricsScrapeProperties; +import io.kafbat.ui.service.metrics.scrape.BrokerMetricsScraper; +import io.kafbat.ui.service.metrics.scrape.PerBrokerScrapedMetrics; +import io.prometheus.metrics.model.snapshots.MetricSnapshot; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import org.apache.kafka.common.Node; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; +import reactor.util.function.Tuple2; +import reactor.util.function.Tuples; + +public class PrometheusScraper implements BrokerMetricsScraper { + + private final PrometheusMetricsRetriever retriever; + + public PrometheusScraper(MetricsScrapeProperties scrapeProperties) { + this.retriever = new PrometheusMetricsRetriever(scrapeProperties); + } + + @Override + public Mono scrape(Collection clusterNodes) { + Mono>> collected = Flux.fromIterable(clusterNodes) + .flatMap(n -> retriever.retrieve(n.host()).map(metrics -> Tuples.of(n, metrics))) + .collectMap(t -> t.getT1().id(), Tuple2::getT2); + return collected.map(PerBrokerScrapedMetrics::new); + } +} diff --git a/api/src/main/java/io/kafbat/ui/service/metrics/scrape/prometheus/PrometheusTextFormatParser.java b/api/src/main/java/io/kafbat/ui/service/metrics/scrape/prometheus/PrometheusTextFormatParser.java new file mode 100644 index 000000000..9b7571cf6 --- /dev/null +++ b/api/src/main/java/io/kafbat/ui/service/metrics/scrape/prometheus/PrometheusTextFormatParser.java @@ -0,0 +1,409 @@ +package io.kafbat.ui.service.metrics.scrape.prometheus; + +import com.google.common.collect.HashMultimap; +import com.google.common.collect.Multimap; +import io.prometheus.metrics.model.snapshots.ClassicHistogramBuckets; +import io.prometheus.metrics.model.snapshots.CounterSnapshot; +import io.prometheus.metrics.model.snapshots.CounterSnapshot.CounterDataPointSnapshot; +import io.prometheus.metrics.model.snapshots.GaugeSnapshot; +import io.prometheus.metrics.model.snapshots.GaugeSnapshot.GaugeDataPointSnapshot; +import io.prometheus.metrics.model.snapshots.HistogramSnapshot; +import io.prometheus.metrics.model.snapshots.HistogramSnapshot.HistogramDataPointSnapshot; +import io.prometheus.metrics.model.snapshots.Labels; +import io.prometheus.metrics.model.snapshots.MetricSnapshot; +import io.prometheus.metrics.model.snapshots.MetricSnapshots; +import io.prometheus.metrics.model.snapshots.PrometheusNaming; +import io.prometheus.metrics.model.snapshots.Quantile; +import io.prometheus.metrics.model.snapshots.Quantiles; +import io.prometheus.metrics.model.snapshots.SummarySnapshot; +import io.prometheus.metrics.model.snapshots.UnknownSnapshot; +import io.prometheus.metrics.model.snapshots.UnknownSnapshot.UnknownDataPointSnapshot; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import lombok.RequiredArgsConstructor; +import org.jetbrains.annotations.NotNull; +import org.jspecify.annotations.Nullable; + +/** + * Parses the Prometheus text format into a {@link MetricSnapshots} object. + * This class is designed to be the functional inverse of + * {@code io.prometheus.metrics.expositionformats.PrometheusTextFormatWriter}. + */ +public class PrometheusTextFormatParser { + + // Regex to capture metric name, optional labels, value, and optional timestamp. + // Groups: 1=name, 2=labels (content), 3=value, 4=timestamp + private static final Pattern METRIC_LINE_PATTERN = Pattern.compile(// NOSONAR + "^([a-zA-Z_:][a-zA-Z0-9_:]*)" // Metric name + + "(?:\\{([^}]*)})?" // Optional labels (content in group 2) + + "\\s+" + + "(-?(?:Inf|NaN|\\d*\\.?\\d+(?:[eE][+-]?\\d+)?))" // Value (group 3) + + "(?:\\s+(\\d+))?$"); // Group 4: Optional timestamp + + + private static final Pattern HELP_PATTERN = + Pattern.compile("^# HELP ([a-zA-Z_:][a-zA-Z0-9_:]*) (.*)"); + private static final Pattern TYPE_PATTERN = + Pattern.compile("^# TYPE ([a-zA-Z_:][a-zA-Z0-9_:]*) (counter|gauge|histogram|summary|untyped)"); + private static final Pattern LABEL_PATTERN = + Pattern.compile("([a-zA-Z_:][a-zA-Z0-9_:]*)=\"((?>\\\\\"|\\\\\\\\|\\\\n|[^\"])*)\""); + public static final String QUANTILE_LABEL = "quantile"; + + private record ParsedDataPoint(String name, Labels labels, double value, Long scrapedAt) { + } + + public List parse(String textFormat) { + List snapshots = new ArrayList<>(); + var cxt = new ParsingContext(snapshots); + textFormat.lines() + .map(String::trim) + .filter(s -> !s.isEmpty()) + .forEach(line -> { + if (line.startsWith("#")) { + parseComment(line, cxt); + } else { + parseMetricLine(line, cxt); + } + }); + cxt.flushAndReset(); + return snapshots; + } + + private void parseComment(String line, ParsingContext cxt) { + if (line.startsWith("# HELP")) { + Matcher m = HELP_PATTERN.matcher(line); + if (m.matches()) { + cxt.metricNameAndHelp( + PrometheusNaming.sanitizeMetricName(m.group(1)), + m.group(2) + ); + } + } else if (line.startsWith("# TYPE")) { + Matcher m = TYPE_PATTERN.matcher(line); + if (m.matches()) { + cxt.metricNameAndType( + PrometheusNaming.sanitizeMetricName(m.group(1)), + MetricType.valueOf(m.group(2).toUpperCase()) + ); + } + } + } + + private void parseMetricLine(String line, ParsingContext cxt) { + Matcher m = METRIC_LINE_PATTERN.matcher(line); + if (m.matches()) { + String metricName = m.group(1); + String labelsString = m.group(2); + String valueString = m.group(3); + String timestampString = m.group(4); + cxt.dataPoint( + new ParsedDataPoint( + metricName, + Optional.ofNullable(labelsString).map(this::parseLabels).orElse(Labels.EMPTY), + parseDouble(valueString), + Optional.ofNullable(timestampString).map(Long::parseLong).orElse(0L))); + } + } + + private Labels parseLabels(String labelsString) { + Labels.Builder builder = Labels.builder(); + Matcher m = LABEL_PATTERN.matcher(labelsString); + while (m.find()) { + builder.label(m.group(1), unescapeLabelValue(m.group(2))); + } + return builder.build(); + } + + private String unescapeLabelValue(String value) { + return value.replace("\\\\", "\\").replace("\\\"", "\"").replace("\\n", "\n"); + } + + private static double parseDouble(String value) { + return switch (value) { + case "+Inf" -> Double.POSITIVE_INFINITY; + case "-Inf" -> Double.NEGATIVE_INFINITY; + case "NaN" -> Double.NaN; + default -> Double.parseDouble(value); + }; + } + + private enum MetricType { + COUNTER, + GAUGE, + UNTYPED, + HISTOGRAM, + SUMMARY + } + + private static class ParsingContext { + + private final List sink; + + private String currentMetricName; + private String currentHelp; + private MetricDataPointsAccumulator dataPoints; + + ParsingContext(List sink) { + this.sink = sink; + } + + private void reset() { + currentMetricName = null; + currentHelp = null; + dataPoints = null; + } + + void metricNameAndType(String metricName, MetricType metricType) { + if (!metricName.equals(currentMetricName)) { + flushAndReset(); + } + currentMetricName = metricName; + dataPoints = switch (metricType) { + case UNTYPED -> new UntypedDataPointsAccumulator(); + case GAUGE -> new GaugeDataPointsAccumulator(); + case COUNTER -> new CounterDataPointsAccumulator(metricName); + case HISTOGRAM -> new HistogramDataPointsAccumulator(metricName); + case SUMMARY -> new SummaryDataPointsAccumulator(metricName); + }; + } + + void metricNameAndHelp(String metricName, String help) { + if (!metricName.equals(currentMetricName)) { + flushAndReset(); + } + currentMetricName = metricName; + currentHelp = help; + } + + void dataPoint(ParsedDataPoint parsedDataPoint) { + if (currentMetricName == null) { + currentMetricName = PrometheusNaming.sanitizeMetricName(parsedDataPoint.name); + } + if (dataPoints == null) { + dataPoints = new UntypedDataPointsAccumulator(); + } + if (!dataPoints.add(parsedDataPoint)) { + flushAndReset(); + dataPoint(parsedDataPoint); + } + } + + void flushAndReset() { + if (dataPoints != null) { + dataPoints.buildSnapshot(currentMetricName, currentHelp) + .ifPresent(sink::add); + } + reset(); + } + } + + interface MetricDataPointsAccumulator { + boolean add(ParsedDataPoint parsedDataPoint); + + Optional buildSnapshot(String name, @Nullable String help); + } + + static class UntypedDataPointsAccumulator implements MetricDataPointsAccumulator { + + final List dataPoints = new ArrayList<>(); + String name; + + @Override + public boolean add(ParsedDataPoint dp) { + if (name == null) { + name = dp.name; + } else if (!name.equals(dp.name)) { + return false; + } + dataPoints.add( + UnknownDataPointSnapshot.builder() + .labels(dp.labels).value(dp.value).scrapeTimestampMillis(dp.scrapedAt).build()); + return true; + } + + @Override + public Optional buildSnapshot(String name, @Nullable String help) { + if (dataPoints.isEmpty()) { + return Optional.empty(); + } + var builder = UnknownSnapshot.builder().name(name).help(help); + dataPoints.forEach(builder::dataPoint); + return Optional.of(builder.build()); + } + } + + static class GaugeDataPointsAccumulator implements MetricDataPointsAccumulator { + + final List dataPoints = new ArrayList<>(); + + @Override + public boolean add(ParsedDataPoint dp) { + dataPoints.add( + GaugeDataPointSnapshot.builder() + .labels(dp.labels).value(dp.value).scrapeTimestampMillis(dp.scrapedAt).build()); + return true; + } + + @Override + public Optional buildSnapshot(String name, @Nullable String help) { + if (dataPoints.isEmpty()) { + return Optional.empty(); + } + var builder = GaugeSnapshot.builder().name(name).help(help); + dataPoints.forEach(builder::dataPoint); + return Optional.of(builder.build()); + } + } + + static class CounterDataPointsAccumulator extends UntypedDataPointsAccumulator { + + final List counterDataPoints = new ArrayList<>(); + + public CounterDataPointsAccumulator(String name) { + this.name = name; + } + + @Override + public boolean add(ParsedDataPoint dp) { + if (!dp.name.equals(name + "_total")) { + return false; + } + counterDataPoints.add( + CounterDataPointSnapshot.builder() + .labels(dp.labels).value(dp.value).scrapeTimestampMillis(dp.scrapedAt).build()); + return true; + } + + @Override + public Optional buildSnapshot(String name, @Nullable String help) { + if (counterDataPoints.isEmpty()) { + return Optional.empty(); + } + var builder = CounterSnapshot.builder().name(name).help(help); + counterDataPoints.forEach(builder::dataPoint); + return Optional.of(builder.build()); + } + } + + @RequiredArgsConstructor + static class HistogramDataPointsAccumulator implements MetricDataPointsAccumulator { + + //contains cumulative(!) counts + record Bucket(double le, long count) implements Comparable { + @Override + public int compareTo(@NotNull Bucket o) { + return Double.compare(le, o.le); + } + } + + final String name; + final Map sums = new HashMap<>(); + final Multimap buckets = HashMultimap.create(); + + @Override + public boolean add(ParsedDataPoint dp) { + if (dp.name.equals(name + "_bucket") && dp.labels.contains("le")) { + var histLbls = rmLabel(dp.labels, "le"); + buckets.put(histLbls, new Bucket(parseDouble(dp.labels.get("le")), (long) dp.value)); + return true; + } + if (dp.name.equals(name + "_count")) { + return true; //skipping counts + } + if (dp.name.equals(name + "_sum")) { + sums.put(dp.labels, dp.value); + return true; + } + return false; + } + + @Override + public Optional buildSnapshot(String name, @Nullable String help) { + if (buckets.isEmpty()) { + return Optional.empty(); + } + var builder = HistogramSnapshot.builder().name(name).help(help); + buckets.asMap().forEach((labels, localBuckets) -> { + localBuckets = localBuckets.stream().sorted().toList(); + long prevCount = 0; + var nonCumulativeBuckets = new ArrayList(); + for (Bucket b : localBuckets) { + nonCumulativeBuckets.add(new Bucket(b.le, b.count - prevCount)); + prevCount = b.count; + } + builder.dataPoint( + HistogramDataPointSnapshot.builder() + .labels(labels) + .classicHistogramBuckets( + ClassicHistogramBuckets.of( + nonCumulativeBuckets.stream().map(b -> b.le).toList(), + nonCumulativeBuckets.stream().map(b -> b.count).toList() + ) + ) + .sum(sums.getOrDefault(labels, Double.NaN)) + .build() + ); + }); + return Optional.of(builder.build()); + } + } + + @RequiredArgsConstructor + static class SummaryDataPointsAccumulator implements MetricDataPointsAccumulator { + + final String name; + final Map sums = new HashMap<>(); + final Map counts = new HashMap<>(); + final Multimap quantiles = HashMultimap.create(); + + @Override + public boolean add(ParsedDataPoint dp) { + if (dp.name.equals(name) && dp.labels.contains(QUANTILE_LABEL)) { + var histLbls = rmLabel(dp.labels, QUANTILE_LABEL); + quantiles.put(histLbls, new Quantile(parseDouble(dp.labels.get(QUANTILE_LABEL)), dp.value)); + return true; + } + if (dp.name.equals(name + "_count")) { + counts.put(dp.labels, (long) dp.value); + return true; + } + if (dp.name.equals(name + "_sum")) { + sums.put(dp.labels, dp.value); + return true; + } + return false; + } + + @Override + public Optional buildSnapshot(String name, @Nullable String help) { + if (quantiles.isEmpty()) { + return Optional.empty(); + } + var builder = SummarySnapshot.builder().name(name).help(help); + quantiles.asMap().forEach((labels, localQuantiles) -> { + builder.dataPoint( + SummarySnapshot.SummaryDataPointSnapshot.builder() + .labels(labels) + .quantiles(Quantiles.of(new ArrayList<>(localQuantiles))) + .sum(sums.getOrDefault(labels, Double.NaN)) + .count(counts.getOrDefault(labels, 0L)) + .build() + ); + }); + return Optional.of(builder.build()); + } + } + + private static Labels rmLabel(Labels labels, String labelToExclude) { + var builder = Labels.builder(); + labels.stream() + .filter(l -> !l.getName().equals(labelToExclude)) + .forEach(l -> builder.label(l.getName(), l.getValue())); + return builder.build(); + } +} diff --git a/api/src/main/java/io/kafbat/ui/service/metrics/sink/MetricsSink.java b/api/src/main/java/io/kafbat/ui/service/metrics/sink/MetricsSink.java new file mode 100644 index 000000000..900882f1d --- /dev/null +++ b/api/src/main/java/io/kafbat/ui/service/metrics/sink/MetricsSink.java @@ -0,0 +1,43 @@ +package io.kafbat.ui.service.metrics.sink; + +import static org.springframework.util.StringUtils.hasText; + +import io.kafbat.ui.config.ClustersProperties; +import io.prometheus.metrics.model.snapshots.MetricSnapshot; +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; +import java.util.stream.Stream; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +public interface MetricsSink { + + static MetricsSink create(ClustersProperties.Cluster cluster) { + List sinks = new ArrayList<>(); + Optional.ofNullable(cluster.getMetrics()) + .flatMap(metrics -> Optional.ofNullable(metrics.getStore())) + .flatMap(store -> Optional.ofNullable(store.getPrometheus())) + .ifPresent(prometheusConf -> { + if (hasText(prometheusConf.getPushGatewayUrl())) { + sinks.add( + PrometheusPushGatewaySink.create( + prometheusConf.getPushGatewayUrl(), + prometheusConf.getPushGatewayUsername(), + prometheusConf.getPushGatewayPassword() + )); + } + }); + return compoundSink(sinks); + } + + private static MetricsSink compoundSink(List sinks) { + return metricsFlux -> + Flux.fromIterable(sinks) + .flatMap(sink -> sink.send(metricsFlux)) + .then(); + } + + Mono send(Flux metrics); + +} diff --git a/api/src/main/java/io/kafbat/ui/service/metrics/sink/PrometheusPushGatewaySink.java b/api/src/main/java/io/kafbat/ui/service/metrics/sink/PrometheusPushGatewaySink.java new file mode 100644 index 000000000..16a66a931 --- /dev/null +++ b/api/src/main/java/io/kafbat/ui/service/metrics/sink/PrometheusPushGatewaySink.java @@ -0,0 +1,47 @@ +package io.kafbat.ui.service.metrics.sink; + +import static org.springframework.util.StringUtils.hasText; + +import io.prometheus.metrics.exporter.pushgateway.PushGateway; +import io.prometheus.metrics.model.snapshots.MetricSnapshot; +import io.prometheus.metrics.model.snapshots.MetricSnapshots; +import jakarta.annotation.Nullable; +import java.util.List; +import lombok.RequiredArgsConstructor; +import lombok.SneakyThrows; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; +import reactor.core.scheduler.Schedulers; + +@RequiredArgsConstructor +class PrometheusPushGatewaySink implements MetricsSink { + + private final PushGateway pushGateway; + + @SneakyThrows + static PrometheusPushGatewaySink create(String url, + @Nullable String username, + @Nullable String passw) { + PushGateway.Builder builder = PushGateway.builder() + .address(url); + + if (hasText(username) && hasText(passw)) { + builder.basicAuth(username, passw); + } + return new PrometheusPushGatewaySink(builder.build()); + } + + @Override + public Mono send(Flux metrics) { + return metrics.collectList() + .filter(lst -> !lst.isEmpty()) + .doOnNext(this::pushSync) + .subscribeOn(Schedulers.boundedElastic()) + .then(); + } + + @SneakyThrows + private void pushSync(List metricsToPush) { + pushGateway.push(() -> new MetricSnapshots(metricsToPush)); + } +} diff --git a/api/src/main/java/io/kafbat/ui/util/DynamicConfigOperations.java b/api/src/main/java/io/kafbat/ui/util/DynamicConfigOperations.java index df0ac5426..e4028a1c0 100644 --- a/api/src/main/java/io/kafbat/ui/util/DynamicConfigOperations.java +++ b/api/src/main/java/io/kafbat/ui/util/DynamicConfigOperations.java @@ -208,7 +208,7 @@ private String serializeToYaml(PropertiesStructure props) { @Data @Builder - // field name should be in sync with @ConfigurationProperties annotation + // the field name should be in sync with @ConfigurationProperties annotation public static class PropertiesStructure { private ClustersProperties kafka; diff --git a/api/src/main/java/io/kafbat/ui/util/KafkaClientSslPropertiesUtil.java b/api/src/main/java/io/kafbat/ui/util/KafkaClientSslPropertiesUtil.java index 324e2e4d0..384888aa1 100644 --- a/api/src/main/java/io/kafbat/ui/util/KafkaClientSslPropertiesUtil.java +++ b/api/src/main/java/io/kafbat/ui/util/KafkaClientSslPropertiesUtil.java @@ -31,5 +31,4 @@ public static void addKafkaSslProperties(@Nullable ClustersProperties.Truststore } } - } diff --git a/api/src/main/java/io/kafbat/ui/util/KafkaServicesValidation.java b/api/src/main/java/io/kafbat/ui/util/KafkaServicesValidation.java index 019a33543..1871dbcc1 100644 --- a/api/src/main/java/io/kafbat/ui/util/KafkaServicesValidation.java +++ b/api/src/main/java/io/kafbat/ui/util/KafkaServicesValidation.java @@ -4,6 +4,7 @@ import io.kafbat.ui.connect.api.KafkaConnectClientApi; import io.kafbat.ui.model.ApplicationPropertyValidationDTO; +import io.kafbat.ui.prometheus.api.PrometheusClientApi; import io.kafbat.ui.service.ReactiveAdminClient; import io.kafbat.ui.service.ksql.KsqlApiClient; import io.kafbat.ui.sr.api.KafkaSrClientApi; @@ -140,5 +141,18 @@ public static Mono validateKsql( .onErrorResume(KafkaServicesValidation::invalid); } + public static Mono validatePrometheusStore( + Supplier> clientSupplier) { + ReactiveFailover client; + try { + client = clientSupplier.get(); + } catch (Exception e) { + log.error("Error creating Prometheus client", e); + return invalid("Error creating Prometheus client: " + e.getMessage()); + } + return client.mono(c -> c.query("1", null, null)) + .then(valid()) + .onErrorResume(KafkaServicesValidation::invalid); + } } diff --git a/api/src/main/java/io/kafbat/ui/util/MetricsUtils.java b/api/src/main/java/io/kafbat/ui/util/MetricsUtils.java new file mode 100644 index 000000000..3b4a22024 --- /dev/null +++ b/api/src/main/java/io/kafbat/ui/util/MetricsUtils.java @@ -0,0 +1,114 @@ +package io.kafbat.ui.util; + +import static io.prometheus.metrics.model.snapshots.CounterSnapshot.CounterDataPointSnapshot; +import static io.prometheus.metrics.model.snapshots.HistogramSnapshot.HistogramDataPointSnapshot; +import static io.prometheus.metrics.model.snapshots.SummarySnapshot.SummaryDataPointSnapshot; + +import io.prometheus.metrics.model.snapshots.CounterSnapshot; +import io.prometheus.metrics.model.snapshots.DataPointSnapshot; +import io.prometheus.metrics.model.snapshots.GaugeSnapshot; +import io.prometheus.metrics.model.snapshots.GaugeSnapshot.GaugeDataPointSnapshot; +import io.prometheus.metrics.model.snapshots.HistogramSnapshot; +import io.prometheus.metrics.model.snapshots.Labels; +import io.prometheus.metrics.model.snapshots.MetricSnapshot; +import io.prometheus.metrics.model.snapshots.SummarySnapshot; +import io.prometheus.metrics.model.snapshots.UnknownSnapshot; +import io.prometheus.metrics.model.snapshots.UnknownSnapshot.UnknownDataPointSnapshot; +import java.util.Collection; +import java.util.List; +import java.util.stream.Stream; + +public final class MetricsUtils { + + private MetricsUtils() { + } + + public static double readPointValue(DataPointSnapshot dps) { + return switch (dps) { + case UnknownDataPointSnapshot unknown -> unknown.getValue(); + case GaugeDataPointSnapshot guage -> guage.getValue(); + case CounterDataPointSnapshot counter -> counter.getValue(); + default -> 0; + }; + } + + public static MetricSnapshot appendLabel(MetricSnapshot md, String name, String value) { + return switch (md) { + case UnknownSnapshot unknown -> new UnknownSnapshot(unknown.getMetadata(), unknown.getDataPoints() + .stream().map(dp -> + new UnknownDataPointSnapshot( + dp.getValue(), + extendLabels(dp.getLabels(), name, value), + dp.getExemplar(), + dp.getScrapeTimestampMillis() + ) + ).toList() + ); + case GaugeSnapshot gauge -> new GaugeSnapshot(gauge.getMetadata(), gauge.getDataPoints() + .stream().map(dp -> + new GaugeDataPointSnapshot( + dp.getValue(), + extendLabels(dp.getLabels(), name, value), + dp.getExemplar() + ) + ).toList()); + case CounterSnapshot counter -> new CounterSnapshot(counter.getMetadata(), counter.getDataPoints() + .stream().map(dp -> + new CounterDataPointSnapshot( + dp.getValue(), + extendLabels(dp.getLabels(), name, value), + dp.getExemplar(), + dp.getCreatedTimestampMillis(), + dp.getScrapeTimestampMillis() + ) + ).toList()); + case HistogramSnapshot histogram -> new HistogramSnapshot(histogram.getMetadata(), histogram.getDataPoints() + .stream().map(dp -> + new HistogramDataPointSnapshot( + dp.getClassicBuckets(), + dp.getSum(), + extendLabels(dp.getLabels(), name, value), + dp.getExemplars(), + dp.getCreatedTimestampMillis() + ) + ).toList()); + case SummarySnapshot summary -> new SummarySnapshot(summary.getMetadata(), summary.getDataPoints() + .stream().map(dp -> + new SummaryDataPointSnapshot( + dp.getCount(), + dp.getSum(), + dp.getQuantiles(), + extendLabels(dp.getLabels(), name, value), + dp.getExemplars(), + dp.getCreatedTimestampMillis() + ) + ).toList()); + default -> md; + }; + } + + @SuppressWarnings("unchecked") + public static MetricSnapshot concatDataPoints(MetricSnapshot d1, MetricSnapshot d2) { + List dataPoints = Stream.concat( + d1.getDataPoints().stream(), d2.getDataPoints().stream() + ).toList(); + + return switch (d1) { + case UnknownSnapshot u -> new UnknownSnapshot(u.getMetadata(), + (Collection) dataPoints); + case GaugeSnapshot g -> new GaugeSnapshot(g.getMetadata(), + (Collection) dataPoints); + case CounterSnapshot c -> new CounterSnapshot(c.getMetadata(), + (Collection) dataPoints); + case HistogramSnapshot h -> new HistogramSnapshot(h.getMetadata(), + (Collection) dataPoints); + case SummarySnapshot s -> new SummarySnapshot(s.getMetadata(), + (Collection) dataPoints); + default -> d1; + }; + } + + private static Labels extendLabels(Labels labels, String name, String value) { + return labels.add(name, value); + } +} diff --git a/api/src/main/java/io/kafbat/ui/util/ReactiveFailover.java b/api/src/main/java/io/kafbat/ui/util/ReactiveFailover.java index 872e9ddf9..81efd63fd 100644 --- a/api/src/main/java/io/kafbat/ui/util/ReactiveFailover.java +++ b/api/src/main/java/io/kafbat/ui/util/ReactiveFailover.java @@ -81,9 +81,6 @@ private Mono mono(Function> f, List> candid .flatMap(f) .onErrorResume(failoverExceptionsPredicate, th -> { publisher.markFailed(); - if (candidates.size() == 1) { - return Mono.error(th); - } var newCandidates = candidates.stream().skip(1).filter(PublisherHolder::isActive).toList(); if (newCandidates.isEmpty()) { return Mono.error(th); @@ -106,9 +103,6 @@ private Flux flux(Function> f, List> candid .flatMapMany(f) .onErrorResume(failoverExceptionsPredicate, th -> { publisher.markFailed(); - if (candidates.size() == 1) { - return Flux.error(th); - } var newCandidates = candidates.stream().skip(1).filter(PublisherHolder::isActive).toList(); if (newCandidates.isEmpty()) { return Flux.error(th); diff --git a/api/src/main/java/io/kafbat/ui/util/jsonschema/JsonAvroConversion.java b/api/src/main/java/io/kafbat/ui/util/jsonschema/JsonAvroConversion.java index 76baf0072..4df8a637e 100644 --- a/api/src/main/java/io/kafbat/ui/util/jsonschema/JsonAvroConversion.java +++ b/api/src/main/java/io/kafbat/ui/util/jsonschema/JsonAvroConversion.java @@ -75,7 +75,7 @@ private static Object convert(JsonNode node, Schema avroSchema) { assertJsonType(node, JsonNodeType.OBJECT); var map = new LinkedHashMap(); var valueSchema = avroSchema.getValueType(); - node.fields().forEachRemaining(f -> map.put(f.getKey(), convert(f.getValue(), valueSchema))); + node.properties().forEach(f -> map.put(f.getKey(), convert(f.getValue(), valueSchema))); yield map; } case ARRAY -> { @@ -101,7 +101,7 @@ private static Object convert(JsonNode node, Schema avroSchema) { } assertJsonType(node, JsonNodeType.OBJECT); - var elements = Lists.newArrayList(node.fields()); + var elements = Lists.newArrayList(node.properties()); if (elements.size() != 1) { throw new JsonAvroConversionException( "UNION field value should be an object with single field == type name"); diff --git a/api/src/test/java/io/kafbat/ui/KafkaConsumerGroupTests.java b/api/src/test/java/io/kafbat/ui/KafkaConsumerGroupTests.java index c23ea5fb0..89b3000aa 100644 --- a/api/src/test/java/io/kafbat/ui/KafkaConsumerGroupTests.java +++ b/api/src/test/java/io/kafbat/ui/KafkaConsumerGroupTests.java @@ -215,7 +215,7 @@ private Closeable startConsumerGroups(int count, String consumerGroupPrefix) { String topicName = createTopicWithRandomName(); var consumers = Stream.generate(() -> { - String groupId = consumerGroupPrefix + RandomStringUtils.randomAlphabetic(5); + String groupId = consumerGroupPrefix + RandomStringUtils.secure().nextAlphabetic(5); val consumer = createTestConsumerWithGroupId(groupId); consumer.subscribe(List.of(topicName)); consumer.poll(Duration.ofMillis(100)); diff --git a/api/src/test/java/io/kafbat/ui/container/PrometheusContainer.java b/api/src/test/java/io/kafbat/ui/container/PrometheusContainer.java new file mode 100644 index 000000000..6df330789 --- /dev/null +++ b/api/src/test/java/io/kafbat/ui/container/PrometheusContainer.java @@ -0,0 +1,19 @@ +package io.kafbat.ui.container; + +import org.testcontainers.containers.GenericContainer; + +public class PrometheusContainer extends GenericContainer { + + public PrometheusContainer() { + super("prom/prometheus:latest"); + setCommandParts(new String[] { + "--web.enable-remote-write-receiver", + "--config.file=/etc/prometheus/prometheus.yml" + }); + addExposedPort(9090); + } + + public String url() { + return "http://" + getHost() + ":" + getMappedPort(9090); + } +} diff --git a/api/src/test/java/io/kafbat/ui/emitter/MessageFiltersTest.java b/api/src/test/java/io/kafbat/ui/emitter/MessageFiltersTest.java index 33414ef05..c9aa0bdfa 100644 --- a/api/src/test/java/io/kafbat/ui/emitter/MessageFiltersTest.java +++ b/api/src/test/java/io/kafbat/ui/emitter/MessageFiltersTest.java @@ -183,8 +183,8 @@ void filterSpeedIsAtLeast5kPerSec() { List toFilter = new ArrayList<>(); for (int i = 0; i < 5_000; i++) { - String name = i % 2 == 0 ? "user1" : RandomStringUtils.randomAlphabetic(10); - String randString = RandomStringUtils.randomAlphabetic(30); + String name = i % 2 == 0 ? "user1" : RandomStringUtils.secure().nextAlphabetic(10); + String randString = RandomStringUtils.secure().nextAlphabetic(30); String jsonContent = String.format( "{ \"name\" : { \"randomStr\": \"%s\", \"first\" : \"%s\"} }", randString, name); diff --git a/api/src/test/java/io/kafbat/ui/model/PartitionDistributionStatsTest.java b/api/src/test/java/io/kafbat/ui/model/PartitionDistributionStatsTest.java index 6c5086061..f168a7c12 100644 --- a/api/src/test/java/io/kafbat/ui/model/PartitionDistributionStatsTest.java +++ b/api/src/test/java/io/kafbat/ui/model/PartitionDistributionStatsTest.java @@ -23,28 +23,23 @@ void skewCalculatedBasedOnPartitionsCounts() { Node n4 = new Node(4, "n4", 9092); var stats = PartitionDistributionStats.create( - Statistics.builder() - .clusterDescription( - new ReactiveAdminClient.ClusterDescription(null, "test", Set.of(n1, n2, n3), null)) - .topicDescriptions( - Map.of( - "t1", new TopicDescription( - "t1", false, - List.of( - new TopicPartitionInfo(0, n1, List.of(n1, n2), List.of(n1, n2)), - new TopicPartitionInfo(1, n2, List.of(n2, n3), List.of(n2, n3)) - ) - ), - "t2", new TopicDescription( - "t2", false, - List.of( - new TopicPartitionInfo(0, n1, List.of(n1, n2), List.of(n1, n2)), - new TopicPartitionInfo(1, null, List.of(n2, n1), List.of(n1)) - ) - ) + List.of( + new TopicDescription( + "t1", false, + List.of( + new TopicPartitionInfo(0, n1, List.of(n1, n2), List.of(n1, n2)), + new TopicPartitionInfo(1, n2, List.of(n2, n3), List.of(n2, n3)) + ) + ), + new TopicDescription( + "t2", false, + List.of( + new TopicPartitionInfo(0, n1, List.of(n1, n2), List.of(n1, n2)), + new TopicPartitionInfo(1, null, List.of(n2, n1), List.of(n1)) ) ) - .build(), 4 + ), + 4 ); assertThat(stats.getPartitionLeaders()) diff --git a/api/src/test/java/io/kafbat/ui/service/StatisticsServiceTest.java b/api/src/test/java/io/kafbat/ui/service/StatisticsServiceTest.java new file mode 100644 index 000000000..9005ba2c3 --- /dev/null +++ b/api/src/test/java/io/kafbat/ui/service/StatisticsServiceTest.java @@ -0,0 +1,78 @@ +package io.kafbat.ui.service; + +import static org.assertj.core.api.Assertions.assertThat; + +import io.kafbat.ui.AbstractIntegrationTest; +import io.kafbat.ui.model.CreateTopicMessageDTO; +import io.kafbat.ui.model.Statistics; +import io.kafbat.ui.service.metrics.scrape.inferred.InferredMetrics; +import io.prometheus.metrics.model.snapshots.GaugeSnapshot; +import io.prometheus.metrics.model.snapshots.GaugeSnapshot.GaugeDataPointSnapshot; +import io.prometheus.metrics.model.snapshots.Labels; +import java.util.List; +import java.util.UUID; +import org.apache.kafka.clients.admin.NewTopic; +import org.junit.jupiter.api.Test; +import org.springframework.beans.factory.annotation.Autowired; + +class StatisticsServiceTest extends AbstractIntegrationTest { + + @Autowired + private MessagesService messagesService; + + @Autowired + private ClustersStorage clustersStorage; + + @Autowired + private StatisticsService statisticsService; + + @Test + void testInferredMetricsCollected() { + var newTopicName = "interred_metrics_" + UUID.randomUUID(); + createTopic(new NewTopic(newTopicName, 2, (short) 1)); + for (int i = 0; i < 4; i++) { + messagesService.sendMessage( + clustersStorage.getClusterByName(LOCAL).get(), + newTopicName, + new CreateTopicMessageDTO() + .key(UUID.randomUUID().toString()) + .value(UUID.randomUUID().toString()) + .partition(0) + .keySerde("String") + .valueSerde("String") + ).block(); + } + + Statistics updated = + statisticsService.updateCache(clustersStorage.getClusterByName(LOCAL).get()) + .block(); + + var kafkaTopicPartitionsGauge = getGaugeSnapshot( + updated.getMetrics().getInferredMetrics(), + "kafka_topic_partitions", + Labels.of("topic", newTopicName) + ); + assertThat(kafkaTopicPartitionsGauge.getValue()) + .isEqualTo(2); + + var kafkaTopicPartitionNextOffset = getGaugeSnapshot( + updated.getMetrics().getInferredMetrics(), + "kafka_topic_partition_next_offset", + Labels.of("topic", newTopicName, "partition", "0") + ); + assertThat(kafkaTopicPartitionNextOffset.getValue()) + .isEqualTo(4); + } + + @SuppressWarnings("unchecked") + private GaugeDataPointSnapshot getGaugeSnapshot(InferredMetrics inferredMetrics, + String metricName, + Labels labels) { + return inferredMetrics.asStream() + .filter(s -> s.getMetadata().getName().equals(metricName) && s instanceof GaugeSnapshot) + .flatMap(s -> ((List) s.getDataPoints()).stream()) + .filter(dp -> dp.getLabels().equals(labels)) + .findFirst() + .orElseThrow(); + } +} diff --git a/api/src/test/java/io/kafbat/ui/service/TopicsServicePaginationTest.java b/api/src/test/java/io/kafbat/ui/service/TopicsServicePaginationTest.java index 08b211b40..3fb1a804f 100644 --- a/api/src/test/java/io/kafbat/ui/service/TopicsServicePaginationTest.java +++ b/api/src/test/java/io/kafbat/ui/service/TopicsServicePaginationTest.java @@ -72,7 +72,7 @@ void shouldListFirst25Topics() { .map(Objects::toString) .map(name -> new TopicDescription(name, false, List.of())) .map(topicDescription -> InternalTopic.from(topicDescription, List.of(), null, - Metrics.empty(), InternalLogDirStats.empty(), "_")) + Metrics.empty(), null, null, "_")) .collect(Collectors.toMap(InternalTopic::getName, Function.identity())) ); @@ -98,7 +98,7 @@ void shouldListFirst25TopicsSortedByNameDescendingOrder() { .map(Objects::toString) .map(name -> new TopicDescription(name, false, List.of())) .map(topicDescription -> InternalTopic.from(topicDescription, List.of(), null, - Metrics.empty(), InternalLogDirStats.empty(), "_")) + Metrics.empty(), null, null, "_")) .collect(Collectors.toMap(InternalTopic::getName, Function.identity())); init(internalTopics); @@ -125,7 +125,7 @@ void shouldCalculateCorrectPageCountForNonDivisiblePageSize() { .map(Objects::toString) .map(name -> new TopicDescription(name, false, List.of())) .map(topicDescription -> InternalTopic.from(topicDescription, List.of(), null, - Metrics.empty(), InternalLogDirStats.empty(), "_")) + Metrics.empty(), null, null, "_")) .collect(Collectors.toMap(InternalTopic::getName, Function.identity())) ); @@ -144,7 +144,7 @@ void shouldCorrectlyHandleNonPositivePageNumberAndPageSize() { .map(Objects::toString) .map(name -> new TopicDescription(name, false, List.of())) .map(topicDescription -> InternalTopic.from(topicDescription, List.of(), null, - Metrics.empty(), InternalLogDirStats.empty(), "_")) + Metrics.empty(), null, null, "_")) .collect(Collectors.toMap(InternalTopic::getName, Function.identity())) ); @@ -163,7 +163,7 @@ void shouldListBotInternalAndNonInternalTopics() { .map(Objects::toString) .map(name -> new TopicDescription(name, Integer.parseInt(name) % 10 == 0, List.of())) .map(topicDescription -> InternalTopic.from(topicDescription, List.of(), null, - Metrics.empty(), InternalLogDirStats.empty(), "_")) + Metrics.empty(), null, null, "_")) .collect(Collectors.toMap(InternalTopic::getName, Function.identity())) ); @@ -184,7 +184,7 @@ void shouldListOnlyNonInternalTopics() { .map(Objects::toString) .map(name -> new TopicDescription(name, Integer.parseInt(name) % 5 == 0, List.of())) .map(topicDescription -> InternalTopic.from(topicDescription, List.of(), null, - Metrics.empty(), InternalLogDirStats.empty(), "_")) + Metrics.empty(), null, null, "_")) .collect(Collectors.toMap(InternalTopic::getName, Function.identity())) ); @@ -205,7 +205,7 @@ void shouldListOnlyTopicsContainingOne() { .map(Objects::toString) .map(name -> new TopicDescription(name, false, List.of())) .map(topicDescription -> InternalTopic.from(topicDescription, List.of(), null, - Metrics.empty(), InternalLogDirStats.empty(), "_")) + Metrics.empty(), null, null, "_")) .collect(Collectors.toMap(InternalTopic::getName, Function.identity())) ); @@ -227,7 +227,7 @@ void shouldListTopicsOrderedByPartitionsCount() { new TopicPartitionInfo(p, null, List.of(), List.of())) .collect(Collectors.toList()))) .map(topicDescription -> InternalTopic.from(topicDescription, List.of(), InternalPartitionsOffsets.empty(), - Metrics.empty(), InternalLogDirStats.empty(), "_")) + Metrics.empty(), null, null, "_")) .collect(Collectors.toMap(InternalTopic::getName, Function.identity())); init(internalTopics); diff --git a/api/src/test/java/io/kafbat/ui/service/analyze/TopicAnalysisServiceTest.java b/api/src/test/java/io/kafbat/ui/service/analyze/TopicAnalysisServiceTest.java index 1c32b4215..93ed7a8e6 100644 --- a/api/src/test/java/io/kafbat/ui/service/analyze/TopicAnalysisServiceTest.java +++ b/api/src/test/java/io/kafbat/ui/service/analyze/TopicAnalysisServiceTest.java @@ -50,8 +50,8 @@ private void fillTopic(String topic, int cnt) { producer.send( new ProducerRecord<>( topic, - RandomStringUtils.randomAlphabetic(5), - RandomStringUtils.randomAlphabetic(10))); + RandomStringUtils.secure().nextAlphabetic(5), + RandomStringUtils.secure().nextAlphabetic(10))); } } } diff --git a/api/src/test/java/io/kafbat/ui/service/integration/odd/TopicsExporterTest.java b/api/src/test/java/io/kafbat/ui/service/integration/odd/TopicsExporterTest.java index 6bf6887a7..c34ff742e 100644 --- a/api/src/test/java/io/kafbat/ui/service/integration/odd/TopicsExporterTest.java +++ b/api/src/test/java/io/kafbat/ui/service/integration/odd/TopicsExporterTest.java @@ -1,5 +1,6 @@ package io.kafbat.ui.service.integration.odd; +import static io.kafbat.ui.service.metrics.scrape.ScrapedClusterState.empty; import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.ArgumentMatchers.anyString; @@ -9,6 +10,8 @@ import io.kafbat.ui.model.KafkaCluster; import io.kafbat.ui.model.Statistics; import io.kafbat.ui.service.StatisticsCache; +import io.kafbat.ui.service.metrics.scrape.ScrapedClusterState; +import io.kafbat.ui.service.metrics.scrape.ScrapedClusterState.TopicState; import io.kafbat.ui.sr.api.KafkaSrClientApi; import io.kafbat.ui.sr.model.SchemaSubject; import io.kafbat.ui.sr.model.SchemaType; @@ -59,15 +62,22 @@ void doesNotExportTopicsWhichDontFitFiltrationRule() { .thenReturn(Mono.error(WebClientResponseException.create(404, "NF", new HttpHeaders(), null, null, null))); stats = Statistics.empty() .toBuilder() - .topicDescriptions( - Map.of( - "_hidden", new TopicDescription("_hidden", false, List.of( - new TopicPartitionInfo(0, null, List.of(), List.of()) - )), - "visible", new TopicDescription("visible", false, List.of( - new TopicPartitionInfo(0, null, List.of(), List.of()) - )) - ) + .clusterState( + empty().toBuilder().topicStates( + Map.of( + "_hidden", + new TopicState( + "_hidden", + new TopicDescription("_hidden", false, List.of( + new TopicPartitionInfo(0, null, List.of(), List.of()) + )), null, null, null, null, null), + "visible", + new TopicState("visible", + new TopicDescription("visible", false, List.of( + new TopicPartitionInfo(0, null, List.of(), List.of()) + )), null, null, null, null, null) + ) + ).build() ) .build(); @@ -101,41 +111,44 @@ void doesExportTopicData() { stats = Statistics.empty() .toBuilder() - .topicDescriptions( - Map.of( - "testTopic", - new TopicDescription( - "testTopic", - false, - List.of( - new TopicPartitionInfo( - 0, - null, - List.of( - new Node(1, "host1", 9092), - new Node(2, "host2", 9092) + .clusterState( + ScrapedClusterState.empty().toBuilder() + .topicStates( + Map.of( + "testTopic", + new TopicState( + "testTopic", + new TopicDescription( + "testTopic", + false, + List.of( + new TopicPartitionInfo( + 0, + null, + List.of( + new Node(1, "host1", 9092), + new Node(2, "host2", 9092) + ), + List.of() + ) + ) ), - List.of()) - )) - ) - ) - .topicConfigs( - Map.of( - "testTopic", List.of( - new ConfigEntry( - "custom.config", - "100500", - ConfigEntry.ConfigSource.DYNAMIC_TOPIC_CONFIG, - false, - false, - List.of(), - ConfigEntry.ConfigType.INT, - null + List.of( + new ConfigEntry( + "custom.config", + "100500", + ConfigEntry.ConfigSource.DYNAMIC_TOPIC_CONFIG, + false, + false, + List.of(), + ConfigEntry.ConfigType.INT, + null + ) + ), null, null, null, null + ) ) - ) - ) - ) - .build(); + ).build() + ).build(); StepVerifier.create(topicsExporter.export(cluster)) .assertNext(entityList -> { diff --git a/api/src/test/java/io/kafbat/ui/service/ksql/KsqlApiClientTest.java b/api/src/test/java/io/kafbat/ui/service/ksql/KsqlApiClientTest.java index 90e549662..f50a7b6dd 100644 --- a/api/src/test/java/io/kafbat/ui/service/ksql/KsqlApiClientTest.java +++ b/api/src/test/java/io/kafbat/ui/service/ksql/KsqlApiClientTest.java @@ -10,12 +10,14 @@ import io.kafbat.ui.AbstractIntegrationTest; import java.time.Duration; import java.util.Map; +import org.junit.Ignore; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.testcontainers.shaded.org.awaitility.Awaitility; import reactor.test.StepVerifier; +@Ignore class KsqlApiClientTest extends AbstractIntegrationTest { @BeforeAll diff --git a/api/src/test/java/io/kafbat/ui/service/metrics/JmxMetricsFormatterTest.java b/api/src/test/java/io/kafbat/ui/service/metrics/JmxMetricsFormatterTest.java index 577d8aa6b..71a04ac68 100644 --- a/api/src/test/java/io/kafbat/ui/service/metrics/JmxMetricsFormatterTest.java +++ b/api/src/test/java/io/kafbat/ui/service/metrics/JmxMetricsFormatterTest.java @@ -2,6 +2,7 @@ import static org.assertj.core.api.Assertions.assertThat; +import io.kafbat.ui.service.metrics.scrape.jmx.JmxMetricsFormatter; import java.math.BigDecimal; import java.util.List; import java.util.Map; diff --git a/api/src/test/java/io/kafbat/ui/service/metrics/MetricsUtils.java b/api/src/test/java/io/kafbat/ui/service/metrics/MetricsUtils.java new file mode 100644 index 000000000..53261c4af --- /dev/null +++ b/api/src/test/java/io/kafbat/ui/service/metrics/MetricsUtils.java @@ -0,0 +1,71 @@ +package io.kafbat.ui.service.metrics; + +import io.prometheus.metrics.model.snapshots.CounterSnapshot; +import io.prometheus.metrics.model.snapshots.DataPointSnapshot; +import io.prometheus.metrics.model.snapshots.GaugeSnapshot; +import io.prometheus.metrics.model.snapshots.Labels; +import io.prometheus.metrics.model.snapshots.MetricMetadata; +import io.prometheus.metrics.model.snapshots.MetricSnapshot; +import java.util.Map; +import java.util.Optional; + +public class MetricsUtils { + + private MetricsUtils() { + } + + private static boolean areMetadataEqual(MetricMetadata metadata1, MetricMetadata metadata2) { + return metadata1.getName().equals(metadata2.getName()) + && metadata1.getHelp().equals(metadata2.getHelp()) + && Optional.ofNullable(metadata1.getUnit()) + .map(u -> u.equals(metadata2.getUnit())) + .orElse(metadata2.getUnit() == null); + } + + public static boolean isTheSameMetric(MetricSnapshot m1, MetricSnapshot m2) { + if (!m1.getClass().equals(m2.getClass())) { + return false; + } + MetricMetadata metadata1 = m1.getMetadata(); + MetricMetadata metadata2 = m2.getMetadata(); + if (!areMetadataEqual(metadata1, metadata2)) { + return false; + } + var dataPoints1 = m1.getDataPoints(); + var dataPoints2 = m2.getDataPoints(); + if (dataPoints1.size() != dataPoints2.size()) { + return false; + } + for (int i = 0; i < dataPoints1.size(); i++) { + if (!isTheSameDataPoint(dataPoints1.get(i), dataPoints2.get(i))) { + return false; + } + } + return true; + } + + private static boolean areLabelsEqual(Labels labels1, Labels labels2) { + return Optional.ofNullable(labels1) + .map(l -> l.equals(labels2)) + .orElse(labels2 == null); + } + + public static boolean isTheSameDataPoint(DataPointSnapshot dp1, DataPointSnapshot dp2) { + if (!dp1.getClass().equals(dp2.getClass())) { + return false; + } + if (!areLabelsEqual(dp1.getLabels(), dp2.getLabels())) { + return false; + } + if (dp1 instanceof GaugeSnapshot.GaugeDataPointSnapshot gauge1) { + var gauge2 = (GaugeSnapshot.GaugeDataPointSnapshot) dp2; + return Double.compare(gauge1.getValue(), gauge2.getValue()) == 0; + } + if (dp1 instanceof CounterSnapshot.CounterDataPointSnapshot counter1) { + var counter2 = (CounterSnapshot.CounterDataPointSnapshot) dp2; + return Double.compare(counter1.getValue(), counter2.getValue()) == 0; + } + return true; + } + +} diff --git a/api/src/test/java/io/kafbat/ui/service/metrics/PrometheusEndpointMetricsParserTest.java b/api/src/test/java/io/kafbat/ui/service/metrics/PrometheusEndpointMetricsParserTest.java deleted file mode 100644 index 88636d019..000000000 --- a/api/src/test/java/io/kafbat/ui/service/metrics/PrometheusEndpointMetricsParserTest.java +++ /dev/null @@ -1,31 +0,0 @@ -package io.kafbat.ui.service.metrics; - -import static org.assertj.core.api.Assertions.assertThat; - -import java.util.Map; -import java.util.Optional; -import org.assertj.core.api.Assertions; -import org.junit.jupiter.api.Test; - -class PrometheusEndpointMetricsParserTest { - - @Test - void test() { - String metricsString = - "kafka_server_BrokerTopicMetrics_FifteenMinuteRate" - + "{name=\"BytesOutPerSec\",topic=\"__confluent.support.metrics\",} 123.1234"; - - Optional parsedOpt = PrometheusEndpointMetricsParser.parse(metricsString); - - Assertions.assertThat(parsedOpt).hasValueSatisfying(metric -> { - assertThat(metric.name()).isEqualTo("kafka_server_BrokerTopicMetrics_FifteenMinuteRate"); - assertThat(metric.value()).isEqualTo("123.1234"); - assertThat(metric.labels()).containsExactlyEntriesOf( - Map.of( - "name", "BytesOutPerSec", - "topic", "__confluent.support.metrics" - )); - }); - } - -} diff --git a/api/src/test/java/io/kafbat/ui/service/metrics/PrometheusMetricsRetrieverTest.java b/api/src/test/java/io/kafbat/ui/service/metrics/PrometheusMetricsRetrieverTest.java deleted file mode 100644 index a4c63e391..000000000 --- a/api/src/test/java/io/kafbat/ui/service/metrics/PrometheusMetricsRetrieverTest.java +++ /dev/null @@ -1,97 +0,0 @@ -package io.kafbat.ui.service.metrics; - -import io.kafbat.ui.model.MetricsConfig; -import java.io.IOException; -import java.math.BigDecimal; -import java.util.List; -import java.util.Map; -import okhttp3.mockwebserver.MockResponse; -import okhttp3.mockwebserver.MockWebServer; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.springframework.web.reactive.function.client.WebClient; -import reactor.test.StepVerifier; - -class PrometheusMetricsRetrieverTest { - - private final PrometheusMetricsRetriever retriever = new PrometheusMetricsRetriever(); - - private final MockWebServer mockWebServer = new MockWebServer(); - - @BeforeEach - void startMockServer() throws IOException { - mockWebServer.start(); - } - - @AfterEach - void stopMockServer() throws IOException { - mockWebServer.close(); - } - - @Test - void callsMetricsEndpointAndConvertsResponceToRawMetric() { - var url = mockWebServer.url("/metrics"); - mockWebServer.enqueue(prepareResponse()); - - MetricsConfig metricsConfig = prepareMetricsConfig(url.port(), null, null); - - StepVerifier.create(retriever.retrieve(WebClient.create(), url.host(), metricsConfig)) - .expectNextSequence(expectedRawMetrics()) - // third metric should not be present, since it has "NaN" value - .verifyComplete(); - } - - @Test - void callsSecureMetricsEndpointAndConvertsResponceToRawMetric() { - var url = mockWebServer.url("/metrics"); - mockWebServer.enqueue(prepareResponse()); - - - MetricsConfig metricsConfig = prepareMetricsConfig(url.port(), "username", "password"); - - StepVerifier.create(retriever.retrieve(WebClient.create(), url.host(), metricsConfig)) - .expectNextSequence(expectedRawMetrics()) - // third metric should not be present, since it has "NaN" value - .verifyComplete(); - } - - MockResponse prepareResponse() { - // body copied from real jmx exporter - return new MockResponse().setBody( - "# HELP kafka_server_KafkaRequestHandlerPool_FifteenMinuteRate Attribute exposed for management \n" - + "# TYPE kafka_server_KafkaRequestHandlerPool_FifteenMinuteRate untyped\n" - + "kafka_server_KafkaRequestHandlerPool_FifteenMinuteRate{name=\"RequestHandlerAvgIdlePercent\",} 0.898\n" - + "# HELP kafka_server_socket_server_metrics_request_size_avg The average size of requests sent. \n" - + "# TYPE kafka_server_socket_server_metrics_request_size_avg untyped\n" - + "kafka_server_socket_server_metrics_request_size_avg{listener=\"PLAIN\",networkProcessor=\"1\",} 101.1\n" - + "kafka_server_socket_server_metrics_request_size_avg{listener=\"PLAIN2\",networkProcessor=\"5\",} NaN" - ); - } - - MetricsConfig prepareMetricsConfig(Integer port, String username, String password) { - return MetricsConfig.builder() - .ssl(false) - .port(port) - .type(MetricsConfig.PROMETHEUS_METRICS_TYPE) - .username(username) - .password(password) - .build(); - } - - List expectedRawMetrics() { - - var firstMetric = RawMetric.create( - "kafka_server_KafkaRequestHandlerPool_FifteenMinuteRate", - Map.of("name", "RequestHandlerAvgIdlePercent"), - new BigDecimal("0.898") - ); - - var secondMetric = RawMetric.create( - "kafka_server_socket_server_metrics_request_size_avg", - Map.of("listener", "PLAIN", "networkProcessor", "1"), - new BigDecimal("101.1") - ); - return List.of(firstMetric, secondMetric); - } -} diff --git a/api/src/test/java/io/kafbat/ui/service/metrics/WellKnownMetricsTest.java b/api/src/test/java/io/kafbat/ui/service/metrics/WellKnownMetricsTest.java deleted file mode 100644 index 777db86cb..000000000 --- a/api/src/test/java/io/kafbat/ui/service/metrics/WellKnownMetricsTest.java +++ /dev/null @@ -1,93 +0,0 @@ -package io.kafbat.ui.service.metrics; - -import static org.assertj.core.api.Assertions.assertThat; - -import io.kafbat.ui.model.Metrics; -import java.math.BigDecimal; -import java.util.Arrays; -import java.util.Map; -import java.util.Optional; -import org.apache.kafka.common.Node; -import org.junit.jupiter.api.Test; - -class WellKnownMetricsTest { - - private final WellKnownMetrics wellKnownMetrics = new WellKnownMetrics(); - - @Test - void bytesIoTopicMetricsPopulated() { - populateWith( - new Node(0, "host", 123), - "kafka_server_BrokerTopicMetrics_FifteenMinuteRate{name=\"BytesInPerSec\",topic=\"test-topic\",} 1.0", - "kafka_server_BrokerTopicMetrics_FifteenMinuteRate{name=\"BytesOutPerSec\",topic=\"test-topic\",} 2.0", - "kafka_server_brokertopicmetrics_fifteenminuterate{name=\"bytesinpersec\",topic=\"test-topic\",} 1.0", - "kafka_server_brokertopicmetrics_fifteenminuterate{name=\"bytesoutpersec\",topic=\"test-topic\",} 2.0", - "some_unknown_prefix_brokertopicmetrics_fifteenminuterate{name=\"bytesinpersec\",topic=\"test-topic\",} 1.0", - "some_unknown_prefix_brokertopicmetrics_fifteenminuterate{name=\"bytesoutpersec\",topic=\"test-topic\",} 2.0" - ); - assertThat(wellKnownMetrics.bytesInFifteenMinuteRate) - .containsEntry("test-topic", new BigDecimal("3.0")); - assertThat(wellKnownMetrics.bytesOutFifteenMinuteRate) - .containsEntry("test-topic", new BigDecimal("6.0")); - } - - @Test - void bytesIoBrokerMetricsPopulated() { - populateWith( - new Node(1, "host1", 123), - "kafka_server_BrokerTopicMetrics_FifteenMinuteRate{name=\"BytesInPerSec\",} 1.0", - "kafka_server_BrokerTopicMetrics_FifteenMinuteRate{name=\"BytesOutPerSec\",} 2.0" - ); - populateWith( - new Node(2, "host2", 345), - "some_unknown_prefix_brokertopicmetrics_fifteenminuterate{name=\"bytesinpersec\",} 10.0", - "some_unknown_prefix_brokertopicmetrics_fifteenminuterate{name=\"bytesoutpersec\",} 20.0" - ); - - assertThat(wellKnownMetrics.brokerBytesInFifteenMinuteRate) - .hasSize(2) - .containsEntry(1, new BigDecimal("1.0")) - .containsEntry(2, new BigDecimal("10.0")); - - assertThat(wellKnownMetrics.brokerBytesOutFifteenMinuteRate) - .hasSize(2) - .containsEntry(1, new BigDecimal("2.0")) - .containsEntry(2, new BigDecimal("20.0")); - } - - @Test - void appliesInnerStateToMetricsBuilder() { - //filling per topic io rates - wellKnownMetrics.bytesInFifteenMinuteRate.put("topic", new BigDecimal(1)); - wellKnownMetrics.bytesOutFifteenMinuteRate.put("topic", new BigDecimal(2)); - - //filling per broker io rates - wellKnownMetrics.brokerBytesInFifteenMinuteRate.put(1, new BigDecimal(1)); - wellKnownMetrics.brokerBytesOutFifteenMinuteRate.put(1, new BigDecimal(2)); - wellKnownMetrics.brokerBytesInFifteenMinuteRate.put(2, new BigDecimal(10)); - wellKnownMetrics.brokerBytesOutFifteenMinuteRate.put(2, new BigDecimal(20)); - - Metrics.MetricsBuilder builder = Metrics.builder(); - wellKnownMetrics.apply(builder); - var metrics = builder.build(); - - // checking per topic io rates - assertThat(metrics.getTopicBytesInPerSec()).containsExactlyEntriesOf(wellKnownMetrics.bytesInFifteenMinuteRate); - assertThat(metrics.getTopicBytesOutPerSec()).containsExactlyEntriesOf(wellKnownMetrics.bytesOutFifteenMinuteRate); - - // checking per broker io rates - assertThat(metrics.getBrokerBytesInPerSec()).containsExactlyInAnyOrderEntriesOf( - Map.of(1, new BigDecimal(1), 2, new BigDecimal(10))); - assertThat(metrics.getBrokerBytesOutPerSec()).containsExactlyInAnyOrderEntriesOf( - Map.of(1, new BigDecimal(2), 2, new BigDecimal(20))); - } - - private void populateWith(Node n, String... prometheusMetric) { - Arrays.stream(prometheusMetric) - .map(PrometheusEndpointMetricsParser::parse) - .filter(Optional::isPresent) - .map(Optional::get) - .forEach(m -> wellKnownMetrics.populate(n, m)); - } - -} diff --git a/api/src/test/java/io/kafbat/ui/service/metrics/prometheus/PrometheusExposeTest.java b/api/src/test/java/io/kafbat/ui/service/metrics/prometheus/PrometheusExposeTest.java new file mode 100644 index 000000000..2868e4765 --- /dev/null +++ b/api/src/test/java/io/kafbat/ui/service/metrics/prometheus/PrometheusExposeTest.java @@ -0,0 +1,67 @@ +package io.kafbat.ui.service.metrics.prometheus; + +import static io.kafbat.ui.service.metrics.MetricsUtils.isTheSameMetric; +import static io.kafbat.ui.service.metrics.prometheus.PrometheusMetricsExposer.prepareMetricsForGlobalExpose; +import static org.assertj.core.api.Assertions.assertThat; + +import io.kafbat.ui.model.Metrics; +import io.kafbat.ui.service.metrics.scrape.inferred.InferredMetrics; +import io.prometheus.metrics.model.snapshots.GaugeSnapshot; +import io.prometheus.metrics.model.snapshots.Labels; +import io.prometheus.metrics.model.snapshots.MetricMetadata; +import io.prometheus.metrics.model.snapshots.MetricSnapshot; +import java.util.List; +import java.util.Map; +import org.junit.jupiter.api.Test; + +class PrometheusExposeTest { + + @Test + void prepareMetricsForGlobalExposeAppendsClusterAndBrokerIdLabelsToMetrics() { + + var inferredMfs = new GaugeSnapshot(new MetricMetadata("infer", "help"), List.of( + new GaugeSnapshot.GaugeDataPointSnapshot(100, Labels.of("lbl1", "lblVal1"), null))); + + var broker1Mfs = new GaugeSnapshot(new MetricMetadata("brok", "help"), List.of( + new GaugeSnapshot.GaugeDataPointSnapshot(101, Labels.of("broklbl1", "broklblVal1"), null))); + + var broker2Mfs = new GaugeSnapshot(new MetricMetadata("brok", "help"), List.of( + new GaugeSnapshot.GaugeDataPointSnapshot(102, Labels.of("broklbl1", "broklblVal1"), null))); + + List prepared = prepareMetricsForGlobalExpose( + "testCluster", + Metrics.builder() + .inferredMetrics(new InferredMetrics(List.of(inferredMfs))) + .perBrokerScrapedMetrics(Map.of(1, List.of(broker1Mfs), 2, List.of(broker2Mfs))) + .build() + ).toList(); + + assertThat(prepared) + .hasSize(3) + .anyMatch(p -> isTheSameMetric(p, new GaugeSnapshot(new MetricMetadata("infer", "help"), List.of( + new GaugeSnapshot.GaugeDataPointSnapshot(100, + Labels.of("cluster", "testCluster", "lbl1", "lblVal1"), null + )) + ))) + .anyMatch(p -> isTheSameMetric(p, new GaugeSnapshot(new MetricMetadata("brok", "help"), List.of( + new GaugeSnapshot.GaugeDataPointSnapshot(101, + Labels.of( + "cluster", "testCluster", + "broker_id", "1", + "broklbl1", "broklblVal1" + ), null + )) + ))) + .anyMatch(p -> isTheSameMetric(p, new GaugeSnapshot(new MetricMetadata("brok", "help"), List.of( + new GaugeSnapshot.GaugeDataPointSnapshot(102, + Labels.of( + "cluster", "testCluster", + "broker_id", "2", + "broklbl1", "broklblVal1" + ), null + )) + ) + )); + } + +} diff --git a/api/src/test/java/io/kafbat/ui/service/metrics/scrape/IoRatesMetricsScannerTest.java b/api/src/test/java/io/kafbat/ui/service/metrics/scrape/IoRatesMetricsScannerTest.java new file mode 100644 index 000000000..94257bc38 --- /dev/null +++ b/api/src/test/java/io/kafbat/ui/service/metrics/scrape/IoRatesMetricsScannerTest.java @@ -0,0 +1,75 @@ +package io.kafbat.ui.service.metrics.scrape; + +import static java.util.Arrays.stream; +import static java.util.stream.Collectors.toMap; +import static org.assertj.core.api.Assertions.assertThat; + +import io.kafbat.ui.service.metrics.scrape.prometheus.PrometheusTextFormatParser; +import io.prometheus.metrics.model.snapshots.MetricSnapshot; +import java.math.BigDecimal; +import java.util.List; +import java.util.Map; +import org.apache.kafka.common.Node; +import org.junit.jupiter.api.Test; + +class IoRatesMetricsScannerTest { + + private IoRatesMetricsScanner ioRatesMetricsScanner; + + @Test + void bytesIoTopicMetricsPopulated() { + populateWith( + nodeMetrics( + new Node(0, "host", 123), + "kafka_server_BrokerTopicMetrics_FifteenMinuteRate{name=\"BytesInPerSec\",topic=\"test\",} 1.0", + "kafka_server_BrokerTopicMetrics_FifteenMinuteRate{name=\"BytesOutPerSec\",topic=\"test\",} 2.0", + "kafka_server_brokertopicmetrics_fifteenminuterate{name=\"bytesinpersec\",topic=\"test\",} 1.0", + "kafka_server_brokertopicmetrics_fifteenminuterate{name=\"bytesoutpersec\",topic=\"test\",} 2.0", + "some_unknown_prefix_brokertopicmetrics_fifteenminuterate{name=\"bytesinpersec\",topic=\"test\",} 1.0", + "some_unknown_prefix_brokertopicmetrics_fifteenminuterate{name=\"bytesoutpersec\",topic=\"test\",} 2.0" + ) + ); + assertThat(ioRatesMetricsScanner.bytesInFifteenMinuteRate) + .containsEntry("test", new BigDecimal("3.0")); + assertThat(ioRatesMetricsScanner.bytesOutFifteenMinuteRate) + .containsEntry("test", new BigDecimal("6.0")); + } + + @Test + void bytesIoBrokerMetricsPopulated() { + populateWith( + nodeMetrics( + new Node(1, "host1", 123), + "kafka_server_BrokerTopicMetrics_FifteenMinuteRate{name=\"BytesInPerSec\",} 1.0", + "kafka_server_BrokerTopicMetrics_FifteenMinuteRate{name=\"BytesOutPerSec\",} 2.0" + ), + nodeMetrics( + new Node(2, "host2", 345), + "some_unknown_prefix_brokertopicmetrics_fifteenminuterate{name=\"bytesinpersec\",} 10.0", + "some_unknown_prefix_brokertopicmetrics_fifteenminuterate{name=\"bytesoutpersec\",} 20.0" + ) + ); + + assertThat(ioRatesMetricsScanner.brokerBytesInFifteenMinuteRate) + .hasSize(2) + .containsEntry(1, new BigDecimal("1.0")) + .containsEntry(2, new BigDecimal("10.0")); + + assertThat(ioRatesMetricsScanner.brokerBytesOutFifteenMinuteRate) + .hasSize(2) + .containsEntry(1, new BigDecimal("2.0")) + .containsEntry(2, new BigDecimal("20.0")); + } + + @SafeVarargs + private void populateWith(Map.Entry>... entries) { + ioRatesMetricsScanner = new IoRatesMetricsScanner( + stream(entries).collect(toMap(Map.Entry::getKey, Map.Entry::getValue)) + ); + } + + private Map.Entry> nodeMetrics(Node n, String... prometheusMetrics) { + return Map.entry(n.id(), new PrometheusTextFormatParser().parse(String.join("\n", prometheusMetrics))); + } + +} diff --git a/api/src/test/java/io/kafbat/ui/service/metrics/scrape/inferred/InferredMetricsScraperTest.java b/api/src/test/java/io/kafbat/ui/service/metrics/scrape/inferred/InferredMetricsScraperTest.java new file mode 100644 index 000000000..1141aa4bb --- /dev/null +++ b/api/src/test/java/io/kafbat/ui/service/metrics/scrape/inferred/InferredMetricsScraperTest.java @@ -0,0 +1,120 @@ +package io.kafbat.ui.service.metrics.scrape.inferred; + +import static io.kafbat.ui.model.InternalLogDirStats.LogDirSpaceStats; +import static io.kafbat.ui.model.InternalLogDirStats.SegmentStats; +import static io.kafbat.ui.service.metrics.scrape.ScrapedClusterState.ConsumerGroupState; +import static io.kafbat.ui.service.metrics.scrape.ScrapedClusterState.NodeState; +import static io.kafbat.ui.service.metrics.scrape.ScrapedClusterState.TopicState; +import static org.assertj.core.api.Assertions.assertThat; + +import io.kafbat.ui.service.metrics.scrape.ScrapedClusterState; +import java.time.Instant; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import org.apache.kafka.clients.admin.ConsumerGroupDescription; +import org.apache.kafka.clients.admin.MemberAssignment; +import org.apache.kafka.clients.admin.MemberDescription; +import org.apache.kafka.clients.admin.TopicDescription; +import org.apache.kafka.common.Node; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.TopicPartitionInfo; +import org.junit.jupiter.api.Test; +import reactor.core.publisher.Mono; +import reactor.test.StepVerifier; + +class InferredMetricsScraperTest { + + final InferredMetricsScraper scraper = new InferredMetricsScraper(); + + @Test + void allExpectedMetricsScraped() { + var segmentStats = new SegmentStats(1234L, 3); + var logDirStats = new LogDirSpaceStats(234L, 345L, Map.of(), Map.of()); + + Node node1 = new Node(1, "node1", 9092); + Node node2 = new Node(2, "node2", 9092); + + Mono scraped = scraper.scrape( + ScrapedClusterState.builder() + .scrapeFinishedAt(Instant.now()) + .nodesStates( + Map.of( + 1, new NodeState(1, node1, segmentStats, logDirStats), + 2, new NodeState(2, node2, segmentStats, logDirStats) + ) + ) + .topicStates( + Map.of( + "t1", + new TopicState( + "t1", + new TopicDescription( + "t1", + false, + List.of( + new TopicPartitionInfo(0, node1, List.of(node1, node2), List.of(node1, node2)), + new TopicPartitionInfo(1, node1, List.of(node1, node2), List.of(node1)) + ) + ), + List.of(), + Map.of(0, 100L, 1, 101L), + Map.of(0, 200L, 1, 201L), + segmentStats, + Map.of(0, segmentStats, 1, segmentStats) + ) + ) + ) + .consumerGroupsStates( + Map.of( + "cg1", + new ConsumerGroupState( + "cg1", + new ConsumerGroupDescription( + "cg1", + true, + List.of( + new MemberDescription( + "memb1", Optional.empty(), "client1", "hst1", + new MemberAssignment(Set.of(new TopicPartition("t1", 0))) + ) + ), + null, + org.apache.kafka.common.ConsumerGroupState.STABLE, + node1 + ), + Map.of(new TopicPartition("t1", 0), 150L) + ) + ) + ) + .build() + ); + + StepVerifier.create(scraped) + .assertNext(inferredMetrics -> + assertThat(inferredMetrics.asStream().map(m -> m.getMetadata().getName())).containsExactlyInAnyOrder( + "broker_count", + "broker_bytes_disk", + "broker_bytes_usable", + "broker_bytes", + "topic_count", + "kafka_topic_partitions", + "kafka_topic_partition_next_offset", + "kafka_topic_partition_oldest_offset", + "kafka_topic_partition_in_sync_replica", + "kafka_topic_partition_replicas", + "kafka_topic_partition_leader", + "topic_bytes_disk", + "group_count", + "group_state", + "group_member_count", + "group_host_count", + "kafka_consumergroup_current_offset", + "kafka_consumergroup_lag" + ) + ) + .verifyComplete(); + } + +} diff --git a/api/src/test/java/io/kafbat/ui/service/metrics/scrape/prometheus/PrometheusMetricsRetrieverTest.java b/api/src/test/java/io/kafbat/ui/service/metrics/scrape/prometheus/PrometheusMetricsRetrieverTest.java new file mode 100644 index 000000000..516ec2176 --- /dev/null +++ b/api/src/test/java/io/kafbat/ui/service/metrics/scrape/prometheus/PrometheusMetricsRetrieverTest.java @@ -0,0 +1,39 @@ +package io.kafbat.ui.service.metrics.scrape.prometheus; + +import static org.assertj.core.api.Assertions.assertThat; + +import io.kafbat.ui.container.PrometheusContainer; +import io.kafbat.ui.model.MetricsScrapeProperties; +import io.prometheus.metrics.model.snapshots.MetricSnapshot; +import java.util.List; +import org.junit.jupiter.api.Test; +import org.testcontainers.junit.jupiter.Container; +import org.testcontainers.junit.jupiter.Testcontainers; + +@Testcontainers +class PrometheusMetricsRetrieverTest { + + @Container + private static final PrometheusContainer PROMETHEUS = new PrometheusContainer(); + + @Test + void testPrometheusMetricsParsedFromEndpoint() { + List retrieved = new PrometheusMetricsRetriever( + MetricsScrapeProperties.builder() + .port(PROMETHEUS.getMappedPort(9090)) + .ssl(false) + .build() + ).retrieve(PROMETHEUS.getHost()).block(); + + assertThat(retrieved) + .map(m -> m.getMetadata().getName()) + .containsAll( + List.of( + "go_gc_cycles_automatic_gc_cycles", //counter + "go_gc_duration_seconds", //histogram + "go_gc_gogc_percent" //gauge + ) + ); + } + +} diff --git a/api/src/test/java/io/kafbat/ui/service/metrics/scrape/prometheus/PrometheusTextFormatParserTest.java b/api/src/test/java/io/kafbat/ui/service/metrics/scrape/prometheus/PrometheusTextFormatParserTest.java new file mode 100644 index 000000000..b4b7120e5 --- /dev/null +++ b/api/src/test/java/io/kafbat/ui/service/metrics/scrape/prometheus/PrometheusTextFormatParserTest.java @@ -0,0 +1,145 @@ +package io.kafbat.ui.service.metrics.scrape.prometheus; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.google.common.base.Charsets; +import io.prometheus.metrics.expositionformats.PrometheusTextFormatWriter; +import io.prometheus.metrics.model.snapshots.MetricSnapshots; +import java.io.ByteArrayOutputStream; +import lombok.SneakyThrows; +import org.junit.jupiter.api.Test; + +class PrometheusTextFormatParserTest { + + @Test + void testCounter() { + String source = """ + # HELP kafka_network_requestmetrics_requests_total Total number of network requests + # TYPE kafka_network_requestmetrics_requests_total counter + kafka_network_requestmetrics_requests_total{request="FetchConsumer"} 138912.0 + kafka_network_requestmetrics_requests_total{request="Metadata"} 21001.0 + kafka_network_requestmetrics_requests_total{request="Produce"} 140321.0 + """; + assertParseAndSerialize(source); + } + + @Test + void testGauge() { + String source = """ + # HELP kafka_controller_kafkacontroller_activecontrollercount Number of active controllers + # TYPE kafka_controller_kafkacontroller_activecontrollercount gauge + kafka_controller_kafkacontroller_activecontrollercount 1.0 + """; + assertParseAndSerialize(source); + } + + @Test + void testHistogram() { + String source = """ + # HELP http_request_duration_seconds Request duration in seconds + # TYPE http_request_duration_seconds histogram + http_request_duration_seconds_bucket{method="GET",path="/hello",le="0.01"} 2 + http_request_duration_seconds_bucket{method="GET",path="/hello",le="0.05"} 10 + http_request_duration_seconds_bucket{method="GET",path="/hello",le="0.1"} 32 + http_request_duration_seconds_bucket{method="GET",path="/hello",le="0.25"} 76 + http_request_duration_seconds_bucket{method="GET",path="/hello",le="0.5"} 91 + http_request_duration_seconds_bucket{method="GET",path="/hello",le="1.0"} 98 + http_request_duration_seconds_bucket{method="GET",path="/hello",le="2.5"} 100 + http_request_duration_seconds_bucket{method="GET",path="/hello",le="5.0"} 100 + http_request_duration_seconds_bucket{method="GET",path="/hello",le="+Inf"} 100 + http_request_duration_seconds_count{method="GET",path="/hello"} 100 + http_request_duration_seconds_sum{method="GET",path="/hello"} 22.57 + """; + assertParseAndSerialize(source); + } + + @Test + void testSummary() { + String source = """ + # HELP kafka_network_requestmetrics_queue_time_ms Total time spent in request queue + # TYPE kafka_network_requestmetrics_queue_time_ms summary + kafka_network_requestmetrics_queue_time_ms{request="FetchConsumer",quantile="0.5"} 1.23 + kafka_network_requestmetrics_queue_time_ms{request="FetchConsumer",quantile="0.95"} 5.34 + kafka_network_requestmetrics_queue_time_ms{request="FetchConsumer",quantile="0.99"} 9.12 + kafka_network_requestmetrics_queue_time_ms_count{request="FetchConsumer"} 138912 + kafka_network_requestmetrics_queue_time_ms_sum{request="FetchConsumer"} 37812.3 + """; + assertParseAndSerialize(source); + } + + @Test + void testUntyped() { + String source = """ + # some comment that should be skipped + kafka_server_some_untyped_metric{topic="orders"} 138922 + """; + String expected = """ + # TYPE kafka_server_some_untyped_metric untyped + kafka_server_some_untyped_metric{topic="orders"} 138922.0 + """; + assertParseAndSerialize(source, expected); + } + + @Test + @SuppressWarnings("checkstyle:LineLength") + void testVariousTypes() { + String source = """ + # HELP kafka_server_brokertopicmetrics_totalfetchrequests_total Total number of fetch requests + # TYPE kafka_server_brokertopicmetrics_totalfetchrequests_total counter + kafka_server_brokertopicmetrics_totalfetchrequests_total{topic="orders"} 138922.0 + + # some invalid comment here + kafka_server_some_untyped_metric{topic="orders"} 138922 + + # Minimalistic line: + metric_without_timestamp_and_labels 12.47 + + # HELP help_no_type Some metric with help, but no type + help_no_type{lbl="test1"} 1 + help_no_type{lbl="test2"} 2 + + # Escaping in label values: + msdos_file_access_time_seconds{path="C:\\\\DIR\\\\FILE.TXT",error="Cannot find file:\\n\\"FILE.TXT\\""} 1.458255915e9 + + # HELP kafka_controller_kafkacontroller_activecontrollercount Number of active controllers + # TYPE kafka_controller_kafkacontroller_activecontrollercount gauge + kafka_controller_kafkacontroller_activecontrollercount 1 + """; + + String expected = """ + # HELP help_no_type Some metric with help, but no type + # TYPE help_no_type untyped + help_no_type{lbl="test1"} 1.0 + help_no_type{lbl="test2"} 2.0 + # HELP kafka_controller_kafkacontroller_activecontrollercount Number of active controllers + # TYPE kafka_controller_kafkacontroller_activecontrollercount gauge + kafka_controller_kafkacontroller_activecontrollercount 1.0 + # HELP kafka_server_brokertopicmetrics_totalfetchrequests_total Total number of fetch requests + # TYPE kafka_server_brokertopicmetrics_totalfetchrequests_total counter + kafka_server_brokertopicmetrics_totalfetchrequests_total{topic="orders"} 138922.0 + # TYPE kafka_server_some_untyped_metric untyped + kafka_server_some_untyped_metric{topic="orders"} 138922.0 + # TYPE metric_without_timestamp_and_labels untyped + metric_without_timestamp_and_labels 12.47 + # TYPE msdos_file_access_time_seconds untyped + msdos_file_access_time_seconds{error="Cannot find file:\\n\\"FILE.TXT\\"",path="C:\\\\DIR\\\\FILE.TXT"} 1.458255915E9 + """; + + assertParseAndSerialize(source, expected); + } + + private void assertParseAndSerialize(String test) { + assertParseAndSerialize(test, test); + } + + @SneakyThrows + private void assertParseAndSerialize(String source, + String expectedSerialized) { + var parsedMetrics = new MetricSnapshots(new PrometheusTextFormatParser().parse(source)); + var baos = new ByteArrayOutputStream(); + new PrometheusTextFormatWriter(false) + .write(baos, parsedMetrics); + assertThat(baos.toString(Charsets.UTF_8)).isEqualTo(expectedSerialized); + } + +} diff --git a/api/src/test/java/io/kafbat/ui/util/ContentUtilsTest.java b/api/src/test/java/io/kafbat/ui/util/ContentUtilsTest.java index fa2926e3d..fecdb40e7 100644 --- a/api/src/test/java/io/kafbat/ui/util/ContentUtilsTest.java +++ b/api/src/test/java/io/kafbat/ui/util/ContentUtilsTest.java @@ -56,7 +56,7 @@ void testHeaderValueShort() { @Test void testHeaderValueLongStringUtf8() { - String testValue = RandomStringUtils.random(10000, true, false); + String testValue = RandomStringUtils.secure().next(10000, true, false); assertEquals(testValue, ContentUtils.convertToString(testValue.getBytes(StandardCharsets.UTF_8))); } diff --git a/contract/build.gradle b/contract/build.gradle index 5950265ac..5bb53ac9d 100644 --- a/contract/build.gradle +++ b/contract/build.gradle @@ -6,7 +6,6 @@ plugins { alias(libs.plugins.openapi.validator) } - def specDir = project.layout.projectDirectory.dir("src/main/resources/swagger/") def targetDir = project.layout.buildDirectory.dir("generated").get() @@ -111,6 +110,23 @@ tasks.register('generateSchemaRegistryClient', GenerateTask) { dateLibrary : "java8",] } +tasks.register('generatePrometheusClient', GenerateTask) { + generatorName = "java" + inputSpec = specDir.file("prometheus-query-api.yaml").asFile.absolutePath + outputDir = targetDir.dir("prometheus-api").asFile.absolutePath + generateApiTests = false + generateModelTests = false + apiPackage = "io.kafbat.ui.prometheus.api" + invokerPackage = "io.kafbat.ui.prometheus" + modelPackage = "io.kafbat.ui.prometheus.model" + + configOptions = [asyncNative : "true", + library : "webclient", + useJakartaEe : "true", + useBeanValidation: "true", + dateLibrary : "java8",] +} + sourceSets { main { java { @@ -118,6 +134,7 @@ sourceSets { srcDir targetDir.dir("kafka-connect-client/src/main/java") srcDir targetDir.dir("kafbat-ui-client/src/main/java") srcDir targetDir.dir("kafka-sr-client/src/main/java") + srcDir targetDir.dir("prometheus-api/src/main/java") } resources { @@ -126,5 +143,5 @@ sourceSets { } } -compileJava.dependsOn generateUiClient, generateBackendApi, generateConnectClient, generateSchemaRegistryClient -processResources.dependsOn generateUiClient, generateBackendApi, generateConnectClient, generateSchemaRegistryClient +compileJava.dependsOn generateUiClient, generateBackendApi, generateConnectClient, generateSchemaRegistryClient, generatePrometheusClient +processResources.dependsOn generateUiClient, generateBackendApi, generateConnectClient, generateSchemaRegistryClient, generatePrometheusClient diff --git a/contract/src/main/resources/swagger/kafbat-ui-api.yaml b/contract/src/main/resources/swagger/kafbat-ui-api.yaml index eabf136c2..d35e320cf 100644 --- a/contract/src/main/resources/swagger/kafbat-ui-api.yaml +++ b/contract/src/main/resources/swagger/kafbat-ui-api.yaml @@ -31,6 +31,52 @@ paths: items: $ref: '#/components/schemas/Cluster' + /api/clusters/{clusterName}/graphs/descriptions: + get: + tags: + - Graphs + summary: getGraphsList + operationId: getGraphsList + parameters: + - name: clusterName + in: path + required: true + schema: + type: string + responses: + 200: + description: | + Success + content: + application/json: + schema: + $ref: '#/components/schemas/GraphDescriptions' + + /api/clusters/{clusterName}/graphs/prometheus: + post: + tags: + - Graphs + summary: getGraphData + operationId: getGraphData + parameters: + - name: clusterName + in: path + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/GraphDataRequest' + responses: + 200: + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/PrometheusApiQueryResponse' + /api/clusters/{clusterName}/cache: post: tags: @@ -155,6 +201,40 @@ paths: schema: $ref: '#/components/schemas/ClusterMetrics' + /metrics: + get: + tags: + - PrometheusExpose + summary: exposeAllMetrics + operationId: exposeAllMetrics + responses: + 200: + description: OK + content: + application/text: + schema: + type: string + + /metrics/{clusterName}: + get: + tags: + - PrometheusExpose + summary: exposeClusterMetrics + operationId: exposeClusterMetrics + parameters: + - name: clusterName + in: path + required: true + schema: + type: string + responses: + 200: + description: OK + content: + application/text: + schema: + type: string + /api/clusters/{clusterName}/stats: get: tags: @@ -2485,6 +2565,7 @@ components: - KAFKA_ACL_VIEW # get ACLs listing - KAFKA_ACL_EDIT # create & delete ACLs - CLIENT_QUOTA_MANAGEMENT + - GRAPHS_ENABLED required: - name - status @@ -4147,6 +4228,112 @@ components: location: type: string + GraphDataRequest: + type: object + properties: + id: + type: string + parameters: + type: object + additionalProperties: + type: string + from: + type: string + format: date-time + to: + type: string + format: date-time + + GraphDescriptions: + type: object + properties: + graphs: + type: array + items: + $ref: '#/components/schemas/GraphDescription' + + GraphDescription: + type: object + required: ["id"] + properties: + id: + type: string + description: Id that should be used to query data on API level + type: + type: string + enum: ["range", "instant"] + defaultPeriod: + type: string + description: ISO_8601 duration string (for "range" graphs only) + parameters: + type: array + items: + $ref: '#/components/schemas/GraphParameter' + + GraphParameter: + type: object + required: ["name"] + properties: + name: + type: string + + PrometheusApiBaseResponse: + type: object + required: [ status ] + properties: + status: + type: string + enum: [ "success", "error" ] + error: + type: string + errorType: + type: string + warnings: + type: array + items: + type: string + + PrometheusApiQueryResponse: + type: object + allOf: + - $ref: "#/components/schemas/PrometheusApiBaseResponse" + properties: + data: + $ref: '#/components/schemas/PrometheusApiQueryResponseData' + + PrometheusApiQueryResponseData: + type: object + required: [ "resultType" ] + properties: + resultType: + type: string + enum: [ "matrix", "vector", "scalar", "string"] + result: + type: array + items: { } + description: | + Depending on resultType format can vary: + "vector": + [ + { + "metric": { "": "", ... }, + "value": [ , "" ], + "histogram": [ , ] + }, ... + ] + "matrix": + [ + { + "metric": { "": "", ... }, + "values": [ [ , "" ], ... ], + "histograms": [ [ , ], ... ] + }, ... + ] + "scalar": + [ , "" ] + "string": + [ , "" ] + ApplicationConfigValidation: type: object properties: @@ -4179,6 +4366,8 @@ components: $ref: '#/components/schemas/ApplicationPropertyValidation' ksqldb: $ref: '#/components/schemas/ApplicationPropertyValidation' + prometheusStorage: + $ref: '#/components/schemas/ApplicationPropertyValidation' ApplicationConfig: type: object @@ -4351,6 +4540,8 @@ components: type: integer internalTopicPrefix: type: string + defaultMetricsStorage: + $ref: '#/components/schemas/ClusterMetricsStoreConfig' clusters: type: array items: @@ -4439,6 +4630,10 @@ components: type: string keystorePassword: type: string + prometheusExpose: + type: boolean + store: + $ref: '#/components/schemas/ClusterMetricsStoreConfig' properties: type: object additionalProperties: true @@ -4520,3 +4715,21 @@ components: type: object additionalProperties: type: string + ClusterMetricsStoreConfig: + type: object + properties: + prometheus: + type: object + properties: + url: + type: string + remoteWrite: + type: boolean + pushGatewayUrl: + type: string + pushGatewayUsername: + type: string + pushGatewayPassword: + type: string + pushGatewayJobName: + type: string diff --git a/contract/src/main/resources/swagger/prometheus-query-api.yaml b/contract/src/main/resources/swagger/prometheus-query-api.yaml new file mode 100644 index 000000000..9ba26a460 --- /dev/null +++ b/contract/src/main/resources/swagger/prometheus-query-api.yaml @@ -0,0 +1,363 @@ +openapi: 3.0.1 +info: + title: | + Prometheus query HTTP API + version: 0.1.0 + contact: { } + +tags: + - name: /promclient +servers: + - url: /localhost + + +paths: + /api/v1/label/{label_name}/values: + get: + tags: + - PrometheusClient + summary: Returns label values + description: "returns a list of label values for a provided label name" + operationId: getLabelValues + parameters: + - name: label_name + in: path + required: true + schema: + type: string + - name: start + in: query + description: Start timestamp. + schema: + type: string + format: rfc3339 | unix_timestamp + - name: end + in: query + description: End timestamp. + schema: + type: string + format: rfc3339 | unix_timestamp + - name: match[] + in: query + description: Repeated series selector argument that selects the series from which to read the label values. + schema: + type: string + format: series_selector + responses: + 200: + description: Success + content: + application/json: + schema: + $ref: '#/components/schemas/LabelValuesResponse' + + /api/v1/labels: + get: + tags: + - PrometheusClient + summary: Returns label names + description: returns a list of label names + operationId: getLabelNames + parameters: + - name: start + in: query + description: | + Start timestamp. + schema: + type: string + format: rfc3339 | unix_timestamp + - name: end + in: query + description: | + End timestamp. + schema: + type: string + format: rfc3339 | unix_timestamp + - name: match[] + in: query + description: Repeated series selector argument that selects the series from which to read the label values. Optional. + schema: + type: string + format: series_selector + responses: + 200: + description: Success + content: + application/json: + schema: + $ref: '#/components/schemas/LabelNamesResponse' + + /api/v1/metadata: + get: + tags: + - PrometheusClient + summary: Returns metric metadata + description: returns a list of label names + operationId: getMetricMetadata + parameters: + - name: limit + in: query + description: Maximum number of metrics to return. + required: true + schema: + type: integer + - name: metric + in: query + description: A metric name to filter metadata for. All metric metadata is retrieved if left empty. + schema: + type: string + responses: + 200: + description: Success + content: + application/json: + schema: + $ref: '#/components/schemas/MetadataResponse' + 201: + description: | + Success + content: + application/json: + schema: + $ref: '#/components/schemas/MetadataResponse' + + /api/v1/query: + get: + tags: + - PrometheusClient + summary: Evaluates instant query + description: | + Evaluates an instant query at a single point in time + operationId: query + parameters: + - name: query + in: query + description: | + Prometheus expression query string. + required: true + schema: + type: string + - name: time + in: query + description: | + Evaluation timestamp. Optional. + schema: + type: string + format: rfc3339 | unix_timestamp + - name: timeout + in: query + description: | + Evaluation timeout. Optional. + schema: + type: string + format: duration + responses: + 200: + description: | + Success + content: + application/json: + schema: + $ref: '#/components/schemas/QueryResponse' + + + /api/v1/query_range: + get: + tags: + - PrometheusClient + summary: Evaluates query over range of time. + description: Evaluates an expression query over a range of time + operationId: queryRange + parameters: + - name: query + in: query + description: Prometheus expression query string. + required: true + schema: + type: string + - name: start + in: query + description: Start timestamp. + schema: + type: string + format: rfc3339 | unix_timestamp + - name: end + in: query + description: End timestamp. + schema: + type: string + format: rfc3339 | unix_timestamp + - name: step + in: query + description: | + Query resolution step width in ```duration``` format or float number of seconds. + schema: + type: string + format: duration | float + - name: timeout + in: query + description: | + Evaluation timeout. Optional. Defaults to and is capped by the value of the ```-query.timeout``` flag. + schema: + type: string + format: duration + responses: + 200: + description: | + Success + content: + application/json: + schema: + $ref: "#/components/schemas/QueryResponse" + + + /api/v1/series: + get: + tags: + - PrometheusClient + summary: Returns time series + operationId: getSeries + parameters: + - name: start + in: query + description: | + Start timestamp. Optional. + schema: + type: string + format: rfc3339 | unix_timestamp + - name: end + in: query + description: | + End timestamp. Optional. + schema: + type: string + format: rfc3339 | unix_timestamp + - name: match[] + in: query + description: | + Repeated series selector argument that selects the series to return. At least one ```match[]``` argument must be provided. + required: true + schema: + type: string + format: series_selector + responses: + 200: + description: | + Success + content: + application/json: + schema: + $ref: '#/components/schemas/SeriesResponse' + +components: + schemas: + BaseResponse: + type: object + required: [ status ] + properties: + status: + type: string + enum: [ "success", "error" ] + error: + type: string + errorType: + type: string + warnings: + type: array + items: + type: string + + QueryResponse: + type: object + allOf: + - $ref: "#/components/schemas/BaseResponse" + properties: + data: + $ref: '#/components/schemas/QueryResponseData' + + QueryResponseData: + type: object + required: [ "resultType" ] + properties: + resultType: + type: string + enum: [ "matrix", "vector", "scalar", "string"] + result: + type: array + items: { } + description: | + Depending on resultType format can vary: + "vector": + [ + { + "metric": { "": "", ... }, + "value": [ , "" ], + "histogram": [ , ] + }, ... + ] + "matrix": + [ + { + "metric": { "": "", ... }, + "values": [ [ , "" ], ... ], + "histograms": [ [ , ], ... ] + }, ... + ] + "scalar": + [ , "" ] + "string": + [ , "" ] + + SeriesResponse: + type: object + allOf: + - $ref: "#/components/schemas/BaseResponse" + properties: + data: + type: array + description: a list of objects that contain the label name/value pairs which + identify each series + items: + type: object + properties: + __name__: + type: string + job: + type: string + instance: + type: string + + MetadataResponse: + type: object + allOf: + - $ref: "#/components/schemas/BaseResponse" + properties: + data: + type: object + additionalProperties: + type: array + items: + type: object + additionalProperties: true + + LabelValuesResponse: + type: object + allOf: + - $ref: "#/components/schemas/BaseResponse" + properties: + data: + type: array + description: a list of string label values + items: + type: string + + LabelNamesResponse: + type: object + allOf: + - $ref: "#/components/schemas/BaseResponse" + properties: + data: + type: array + description: a list of string label names + items: + type: string diff --git a/documentation/compose/kafbat-ui.yaml b/documentation/compose/kafbat-ui.yaml index afbe0d26c..919f36547 100644 --- a/documentation/compose/kafbat-ui.yaml +++ b/documentation/compose/kafbat-ui.yaml @@ -24,6 +24,9 @@ services: KAFKA_CLUSTERS_1_BOOTSTRAPSERVERS: kafka1:29092 KAFKA_CLUSTERS_1_METRICS_PORT: 9998 KAFKA_CLUSTERS_1_SCHEMAREGISTRY: http://schemaregistry1:8085 + KAFKA_CLUSTERS_0_METRICS_STORE_PROMETHEUS_URL: "http://prometheus:9090" + KAFKA_CLUSTERS_0_METRICS_STORE_PROMETHEUS_REMOTEWRITE: 'true' + KAFKA_CLUSTERS_0_METRICS_STORE_KAFKA_TOPIC: "kafka_metrics" DYNAMIC_CONFIG_ENABLED: 'true' kafka0: @@ -135,6 +138,16 @@ services: CONNECT_REST_ADVERTISED_HOST_NAME: kafka-connect0 CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components" + prometheus: + image: prom/prometheus:latest + hostname: prometheus + container_name: prometheus + ports: + - 9090:9090 + volumes: + - ./scripts:/etc/prometheus + command: --web.enable-remote-write-receiver --config.file=/etc/prometheus/prometheus.yaml + kafka-init-topics: image: confluentinc/cp-kafka:7.8.0 volumes: diff --git a/documentation/compose/scripts/prometheus.yaml b/documentation/compose/scripts/prometheus.yaml new file mode 100644 index 000000000..457de126c --- /dev/null +++ b/documentation/compose/scripts/prometheus.yaml @@ -0,0 +1,14 @@ +global: + scrape_interval: 30s + scrape_timeout: 10s + +rule_files: + - alert.yml + +scrape_configs: + - job_name: services + metrics_path: /metrics + static_configs: + - targets: + - 'prometheus:9090' +# - 'kafka-ui:8080' diff --git a/gradle/libs.versions.toml b/gradle/libs.versions.toml index 77386fce2..c020b6f2d 100644 --- a/gradle/libs.versions.toml +++ b/gradle/libs.versions.toml @@ -6,6 +6,7 @@ aws-msk-auth = '2.3.0' azure-identity = '1.15.4' apache-commons-lang3 = '3.18.0' +apache-commons-text = '1.13.1' apache-commons-io = '2.18.0' apache-commons-pool2 = '2.12.1' apache-datasketches = '3.1.0' @@ -14,7 +15,7 @@ apache-commons-compress = '1.26.0' assertj = '3.25.3' avro = '1.11.4' byte-buddy = '1.14.19' -confluent = '7.9.0' +confluent = '7.9.2' confluent-ccs = '7.9.0-ccs' mapstruct = '1.6.2' @@ -31,7 +32,7 @@ testcontainers = '1.20.6' swagger-integration-jakarta = '2.2.28' jakarta-annotation-api = '2.1.1' jackson-databind-nullable = '0.2.6' -antlr = '4.12.0' +antlr = '4.13.2' json-schema-validator = '2.2.14' checkstyle = '10.24.0' @@ -40,6 +41,7 @@ selenide = '7.2.3' testng = '7.10.0' bonigarcia-webdrivermanager = '6.1.1' aspectj = '1.9.21' +prometheus = '1.3.6' [plugins] spring-boot = { id = 'org.springframework.boot', version.ref = 'spring-boot' } @@ -82,6 +84,7 @@ jackson-databind-nullable = { module = 'org.openapitools:jackson-databind-nullab kafka-clients = { module = 'org.apache.kafka:kafka-clients', version.ref = 'confluent-ccs' } apache-commons = { module = 'org.apache.commons:commons-lang3', version.ref = 'apache-commons-lang3' } +apache-commons-text = { module = 'org.apache.commons:commons-text', version.ref = 'apache-commons-text' } apache-commons-compress = { module = 'org.apache.commons:commons-compress', version.ref = 'apache-commons-compress' } apache-commons-io = { module = 'commons-io:commons-io', version.ref = 'apache-commons-io' } apache-commons-pool2 = { module = 'org.apache.commons:commons-pool2', version.ref = 'apache-commons-pool2' } @@ -142,5 +145,11 @@ google-oauth-client = { module = 'com.google.oauth-client:google-oauth-client', modelcontextprotocol-spring-webflux = {module = 'io.modelcontextprotocol.sdk:mcp-spring-webflux', version = '0.10.0'} victools-jsonschema-generator = {module = 'com.github.victools:jsonschema-generator', version = '4.38.0'} +prometheus-metrics-core = {module = 'io.prometheus:prometheus-metrics-core', version.ref = 'prometheus'} +prometheus-metrics-textformats = { module = 'io.prometheus:prometheus-metrics-exposition-textformats', version.ref = 'prometheus'} +prometheus-metrics-exporter-pushgateway = { module = 'io.prometheus:prometheus-metrics-exporter-pushgateway', version.ref = 'prometheus'} + +snappy = {module = 'org.xerial.snappy:snappy-java', version = '1.1.10.7'} + # CVE fixes reactor-netty-http = {module = 'io.projectreactor.netty:reactor-netty-http', version = '1.2.8'} diff --git a/serde-api/src/main/java/io/kafbat/ui/serde/api/Serde.java b/serde-api/src/main/java/io/kafbat/ui/serde/api/Serde.java index b9f812b62..5dc1057a2 100644 --- a/serde-api/src/main/java/io/kafbat/ui/serde/api/Serde.java +++ b/serde-api/src/main/java/io/kafbat/ui/serde/api/Serde.java @@ -123,6 +123,11 @@ interface Serializer { */ byte[] serialize(String input); + /** + * Serializes input string to bytes. Uses provided headers for additional information. + * @param input string entered by user into UI text field.
Note: this input is not formatted in any way. + * @return serialized bytes. Can be null if input is null or empty string. + */ default byte[] serialize(String input, Headers headers) { return serialize(input); }