diff --git a/ee/query-service/app/api/api.go b/ee/query-service/app/api/api.go index 6c507179fb2..5f76a2b8961 100644 --- a/ee/query-service/app/api/api.go +++ b/ee/query-service/app/api/api.go @@ -203,3 +203,21 @@ func (ah *APIHandler) getVersion(w http.ResponseWriter, r *http.Request) { ah.WriteJSON(w, r, versionResponse) } + +func (ah *APIHandler) MetricExplorerRoutes(router *mux.Router, am *baseapp.AuthMiddleware) { + router.HandleFunc("/api/v1/metrics", + am.ViewAccess(ah.ListMetrics)). + Methods(http.MethodPost) + router.HandleFunc("/api/v1/metrics/treemap", + am.ViewAccess(ah.GetTreeMap)). + Methods(http.MethodPost) + router.HandleFunc("/api/v1/metrics/{metric_name}", + am.ViewAccess(ah.GetMetricsDetails)). + Methods(http.MethodGet) + router.HandleFunc("/api/v1/metrics/filters/keys", + am.ViewAccess(ah.FilterKeysSuggestion)). + Methods(http.MethodGet) + router.HandleFunc("/api/v1/metrics/filters/values", + am.ViewAccess(ah.FilterValuesSuggestion)). + Methods(http.MethodPost) +} diff --git a/ee/query-service/app/api/summary.go b/ee/query-service/app/api/summary.go new file mode 100644 index 00000000000..0d836f98957 --- /dev/null +++ b/ee/query-service/app/api/summary.go @@ -0,0 +1,103 @@ +package api + +import ( + "bytes" + "go.signoz.io/signoz/pkg/query-service/model" + "io" + "net/http" + + "github.com/gorilla/mux" + explorer "go.signoz.io/signoz/pkg/query-service/app/metricsexplorer" + "go.uber.org/zap" +) + +func (aH *APIHandler) FilterKeysSuggestion(w http.ResponseWriter, r *http.Request) { + bodyBytes, _ := io.ReadAll(r.Body) + r.Body = io.NopCloser(bytes.NewBuffer(bodyBytes)) + ctx := r.Context() + params, apiError := explorer.ParseFilterKeySuggestions(r) + if apiError != nil { + zap.L().Error("error parsing summary filter keys request", zap.Error(apiError.Err)) + RespondError(w, apiError, nil) + return + } + keys, apiError := aH.APIHandler.SummaryService.FilterKeys(ctx, params) + if apiError != nil { + zap.L().Error("error getting filter keys", zap.Error(apiError.Err)) + RespondError(w, apiError, nil) + return + } + aH.Respond(w, keys) +} + +func (aH *APIHandler) FilterValuesSuggestion(w http.ResponseWriter, r *http.Request) { + bodyBytes, _ := io.ReadAll(r.Body) + r.Body = io.NopCloser(bytes.NewBuffer(bodyBytes)) + ctx := r.Context() + params, apiError := explorer.ParseFilterValueSuggestions(r) + if apiError != nil { + zap.L().Error("error parsing summary filter values request", zap.Error(apiError.Err)) + RespondError(w, apiError, nil) + return + } + + values, apiError := aH.APIHandler.SummaryService.FilterValues(ctx, params) + if apiError != nil { + zap.L().Error("error getting filter values", zap.Error(apiError.Err)) + RespondError(w, apiError, nil) + return + } + aH.Respond(w, values) +} + +func (aH *APIHandler) ListMetrics(w http.ResponseWriter, r *http.Request) { + bodyBytes, _ := io.ReadAll(r.Body) + r.Body = io.NopCloser(bytes.NewBuffer(bodyBytes)) + ctx := r.Context() + params, apiError := explorer.ParseSummaryListMetricsParams(r) + if apiError != nil { + zap.L().Error("error parsing metric list metric summary api request", zap.Error(apiError.Err)) + RespondError(w, model.BadRequest(apiError), nil) + return + } + + slmr, apiErr := aH.APIHandler.SummaryService.ListMetricsWithSummary(ctx, params) + if apiErr != nil { + zap.L().Error("error parsing metric query range params", zap.Error(apiErr.Err)) + RespondError(w, apiError, nil) + return + } + aH.Respond(w, slmr) +} + +func (aH *APIHandler) GetMetricsDetails(w http.ResponseWriter, r *http.Request) { + metricName := mux.Vars(r)["metric_name"] + ctx := r.Context() + metricsDetail, apiError := aH.APIHandler.SummaryService.GetMetricsSummary(ctx, metricName) + if apiError != nil { + zap.L().Error("error parsing metric query range params", zap.Error(apiError.Err)) + RespondError(w, apiError, nil) + return + } + aH.Respond(w, metricsDetail) +} + +func (aH *APIHandler) GetTreeMap(w http.ResponseWriter, r *http.Request) { + bodyBytes, _ := io.ReadAll(r.Body) + r.Body = io.NopCloser(bytes.NewBuffer(bodyBytes)) + ctx := r.Context() + params, apiError := explorer.ParseTreeMapMetricsParams(r) + if apiError != nil { + zap.L().Error("error parsing metric query range params", zap.Error(apiError.Err)) + RespondError(w, apiError, nil) + return + } + result, apiError := aH.APIHandler.SummaryService.GetMetricsTreemap(ctx, params) + if apiError != nil { + zap.L().Error("error getting heatmap data", zap.Error(apiError.Err)) + RespondError(w, apiError, nil) + return + } + aH.Respond(w, result) + +} diff --git a/ee/query-service/app/server.go b/ee/query-service/app/server.go index 57212cd33dd..0b981e7a613 100644 --- a/ee/query-service/app/server.go +++ b/ee/query-service/app/server.go @@ -374,6 +374,7 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler, web web.Web) (*h apiHandler.RegisterQueryRangeV4Routes(r, am) apiHandler.RegisterWebSocketPaths(r, am) apiHandler.RegisterMessagingQueuesRoutes(r, am) + apiHandler.MetricExplorerRoutes(r, am) c := cors.New(cors.Options{ AllowedOrigins: []string{"*"}, diff --git a/pkg/query-service/app/clickhouseReader/reader.go b/pkg/query-service/app/clickhouseReader/reader.go index a923b826cb0..c9fc24a0b78 100644 --- a/pkg/query-service/app/clickhouseReader/reader.go +++ b/pkg/query-service/app/clickhouseReader/reader.go @@ -16,6 +16,8 @@ import ( "sync" "time" + "go.signoz.io/signoz/pkg/query-service/model/metrics_explorer" + "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/google/uuid" @@ -1141,7 +1143,7 @@ func (r *ClickHouseReader) GetUsage(ctx context.Context, queryParams *model.GetU func (r *ClickHouseReader) SearchTracesV2(ctx context.Context, params *model.SearchTracesParams, smartTraceAlgorithm func(payload []model.SearchSpanResponseItem, targetSpanId string, - levelUp int, levelDown int, spanLimit int) ([]model.SearchSpansResult, error)) (*[]model.SearchSpansResult, error) { + levelUp int, levelDown int, spanLimit int) ([]model.SearchSpansResult, error)) (*[]model.SearchSpansResult, error) { searchSpansResult := []model.SearchSpansResult{ { Columns: []string{"__time", "SpanId", "TraceId", "ServiceName", "Name", "Kind", "DurationNano", "TagsKeys", "TagsValues", "References", "Events", "HasError", "StatusMessage", "StatusCodeString", "SpanKind"}, @@ -1289,7 +1291,7 @@ func (r *ClickHouseReader) SearchTracesV2(ctx context.Context, params *model.Sea func (r *ClickHouseReader) SearchTraces(ctx context.Context, params *model.SearchTracesParams, smartTraceAlgorithm func(payload []model.SearchSpanResponseItem, targetSpanId string, - levelUp int, levelDown int, spanLimit int) ([]model.SearchSpansResult, error)) (*[]model.SearchSpansResult, error) { + levelUp int, levelDown int, spanLimit int) ([]model.SearchSpansResult, error)) (*[]model.SearchSpansResult, error) { if r.useTraceNewSchema { return r.SearchTracesV2(ctx, params, smartTraceAlgorithm) @@ -5594,3 +5596,419 @@ func (r *ClickHouseReader) SubscribeToQueryProgress( ) (<-chan model.QueryProgress, func(), *model.ApiError) { return r.queryProgressTracker.SubscribeToQueryProgress(queryId) } + +func (r *ClickHouseReader) GetMetricsCardinalityPercentage(ctx context.Context, req *metrics_explorer.TreeMapMetricsRequest) (*[]metrics_explorer.CardinalityTreemap, *model.ApiError) { + var args []interface{} + + // Convert time range to Unix timestamps + startUnix, _ := time.Parse("2006-01-02T15:04:05Z", req.StartDate) + endUnix, _ := time.Parse("2006-01-02T15:04:05Z", req.EndDate) + + // Build filters dynamically + conditions, _ := utils.BuildFilterConditions(&req.Filters, "") + whereClause := "" + if len(conditions) > 0 { + whereClause = "AND " + strings.Join(conditions, " AND ") + } + + // Construct the query without backticks + query := fmt.Sprintf(` + SELECT + metric_name, + total_value, + (total_value * 100.0 / total_cardinality) AS relative_percentage + FROM ( + SELECT + metric_name, + uniqExact(fingerprint) AS total_value, + (SELECT uniqExact(fingerprint) + FROM %s.%s + WHERE unix_milli BETWEEN ? AND ?) AS total_cardinality + FROM %s.%s + WHERE unix_milli BETWEEN ? AND ? %s + GROUP BY metric_name + ) + ORDER BY relative_percentage DESC + LIMIT %d OFFSET %d;`, + signozMetricDBName, + signozTSTableNameV4, + signozMetricDBName, + signozTSTableNameV4, + whereClause, + req.Limit, + req.Offset, + ) + + args = append(args, + startUnix.UnixMilli(), endUnix.UnixMilli(), // For total_cardinality subquery + startUnix.UnixMilli(), endUnix.UnixMilli(), // For main query + ) + + rows, err := r.db.Query(ctx, query, args...) + if err != nil { + zap.L().Error("Error executing cardinality query", zap.Error(err), zap.String("query", query)) + return nil, &model.ApiError{Typ: "ClickHouseError", Err: err} + } + defer rows.Close() + + var heatmap []metrics_explorer.CardinalityTreemap + for rows.Next() { + var item metrics_explorer.CardinalityTreemap + if err := rows.Scan(&item.MetricName, &item.TotalValue, &item.RelativePercentage); err != nil { + zap.L().Error("Error scanning row", zap.Error(err)) + return nil, &model.ApiError{Typ: "ClickHouseError", Err: err} + } + heatmap = append(heatmap, item) + } + + if err := rows.Err(); err != nil { + zap.L().Error("Error iterating over rows", zap.Error(err)) + return nil, &model.ApiError{Typ: "ClickHouseError", Err: err} + } + + return &heatmap, nil +} + +func (r *ClickHouseReader) GetMetricsDataPointsPercentage(ctx context.Context, req *metrics_explorer.TreeMapMetricsRequest) (*[]metrics_explorer.DataPointTreemap, *model.ApiError) { + conditions, _ := utils.BuildFilterConditions(&req.Filters, "") + var args []interface{} + + startUnix, _ := time.Parse("2006-01-02T15:04:05Z", req.StartDate) + endUnix, _ := time.Parse("2006-01-02T15:04:05Z", req.EndDate) + + whereClause := "" + if len(conditions) > 0 { + whereClause = "AND " + strings.Join(conditions, " AND ") + } + query := fmt.Sprintf( + `SELECT + s.metric_name, + COALESCE(s.data_points, 0) AS total_value, + COALESCE((s.data_points * 100.0 / total.total_data_points), 0) AS relative_percentage + FROM ( + SELECT + metric_name, + COUNT(*) AS data_points + FROM %s.%s + WHERE unix_milli BETWEEN ? AND ? + AND metric_name IN ( + SELECT DISTINCT metric_name + FROM %s.%s + WHERE unix_milli BETWEEN ? AND ? %s + ) + GROUP BY metric_name + ) AS s + JOIN ( + -- Compute total data points from the entire distributed_samples_v4 table (NO FILTERS) + SELECT COUNT(*) AS total_data_points + FROM %s.%s + WHERE unix_milli BETWEEN ? AND ? + ) AS total ON 1=1 -- Use a dummy condition to join + ORDER BY relative_percentage DESC LIMIT %d OFFSET %d;`, + signozMetricDBName, + signozSampleTableName, + signozMetricDBName, + signozTSTableNameV4, + whereClause, + signozMetricDBName, + signozSampleTableName, + req.Limit, + req.Offset, + ) + + args = append(args, + startUnix.UnixMilli(), endUnix.UnixMilli(), // For total_cardinality subquery + startUnix.UnixMilli(), endUnix.UnixMilli(), // For main query + startUnix.UnixMilli(), endUnix.UnixMilli(), // For total data points + ) + + rows, err := r.db.Query(ctx, query, args...) + if err != nil { + zap.L().Error("Error executing cardinality query", zap.Error(err), zap.String("query", query)) + return nil, &model.ApiError{Typ: "ClickHouseError", Err: err} + } + defer rows.Close() + + var heatmap []metrics_explorer.DataPointTreemap + for rows.Next() { + var item metrics_explorer.DataPointTreemap + if err := rows.Scan(&item.MetricName, &item.TotalValue, &item.Percentage); err != nil { + zap.L().Error("Error scanning row", zap.Error(err)) + return nil, &model.ApiError{Typ: "ClickHouseError", Err: err} + } + heatmap = append(heatmap, item) + } + + if err := rows.Err(); err != nil { + zap.L().Error("Error iterating over rows", zap.Error(err)) + return nil, &model.ApiError{Typ: "ClickHouseError", Err: err} + } + + return &heatmap, nil +} + +func (r *ClickHouseReader) GetMetricsDataPointsAndLastReceived(ctx context.Context, metricName string) (uint64, uint64, *model.ApiError) { + query := fmt.Sprintf("SELECT COUNT(*) AS data_points, MAX(unix_milli) AS last_received_time FROM %s.%s WHERE metric_name = ?", signozMetricDBName, signozSampleTableName) + var lastRecievedTimestamp int64 // Changed from uint64 to int64 + var dataPoints uint64 + err := r.db.QueryRow(ctx, query, metricName).Scan(&dataPoints, &lastRecievedTimestamp) + if err != nil { + return 0, 0, &model.ApiError{Typ: "ClickHouseError", Err: err} + } + return dataPoints, uint64(lastRecievedTimestamp), nil // Convert to uint64 before returning +} + +func (r *ClickHouseReader) GetTotalTimeSeriesForMetricName(ctx context.Context, metricName string) (uint64, uint64, uint64, *model.ApiError) { + query := fmt.Sprintf("SELECT count(*), uniq(labels), count(distinct fingerprint) AS cardinality FROM %s.%s WHERE metric_name = '%s'", signozMetricDBName, signozTSTableNameV4, metricName) + var timeSeriesCount uint64 + var cardinality uint64 + var fingerprint uint64 + err := r.db.QueryRow(ctx, query).Scan(&timeSeriesCount, &cardinality, &fingerprint) + if err != nil { + return 0, 0, 0, &model.ApiError{Typ: "ClickHouseError", Err: err} + } + return timeSeriesCount, cardinality, fingerprint, nil +} + +func (r *ClickHouseReader) GetActiveTimeSeriesForMetricName(ctx context.Context, metricName string, duration time.Duration) (uint64, *model.ApiError) { + milli := time.Now().Add(-duration).UnixMilli() + query := fmt.Sprintf("SELECT count(DISTINCT fingerprint) FROM %s.%s WHERE metric_name = '%s' and unix_milli >= ?", signozMetricDBName, signozSampleTableName, metricName) + var timeSeries uint64 + // Using QueryRow instead of Select since we're only expecting a single value + err := r.db.QueryRow(ctx, query, milli).Scan(&timeSeries) + if err != nil { + return 0, &model.ApiError{Typ: "ClickHouseError", Err: err} + } + return timeSeries, nil +} + +func (r *ClickHouseReader) GetAttributesForMetricName(ctx context.Context, metricName string) (*[]metrics_explorer.Attribute, *model.ApiError) { + query := fmt.Sprintf(` + SELECT + kv.1 AS key, + groupUniqArray(kv.2) AS value, + 1 / log2(1 + length(groupUniqArray(kv.2))) AS contribution + FROM %s.%s + ARRAY JOIN arrayZip( + JSONExtractKeys(labels), + arrayMap(k -> JSONExtractString(labels, k), JSONExtractKeys(labels)) + ) AS kv + WHERE + metric_name = ? + AND NOT startsWith(kv.1, '__') + GROUP BY kv.1 + ORDER BY contribution DESC; + `, signozMetricDBName, signozTSLocalTableNameV4) + + rows, err := r.db.Query(ctx, query, metricName) + if err != nil { + return nil, &model.ApiError{Typ: "ClickHouseError", Err: err} + } + defer rows.Close() // Ensure the rows are closed + + var attributesList []metrics_explorer.Attribute + for rows.Next() { + var key string + var value []string + var contribution float64 + + // Manually scan each value into its corresponding variable + if err := rows.Scan(&key, &value, &contribution); err != nil { + return nil, &model.ApiError{Typ: "ClickHouseError", Err: err} + } + + // Append the scanned values into the struct + attributesList = append(attributesList, metrics_explorer.Attribute{ + Key: key, + Value: value, + Contribution: contribution, + }) + } + + // Handle any errors encountered while scanning rows + if err := rows.Err(); err != nil { + return nil, &model.ApiError{Typ: "ClickHouseError", Err: err} + } + + return &attributesList, nil +} + +func (r *ClickHouseReader) GetAllMetricFilterAttributeKeys(ctx context.Context, req *metrics_explorer.FilterKeyRequest, skipDotNames bool) (*[]v3.AttributeKey, *model.ApiError) { + var rows driver.Rows + var response []v3.AttributeKey + query := fmt.Sprintf("SELECT arrayJoin(tagKeys) AS distinctTagKey FROM (SELECT JSONExtractKeys(labels) AS tagKeys FROM %s.%s WHERE unix_milli >= $1 GROUP BY tagKeys) WHERE distinctTagKey ILIKE $2 AND distinctTagKey NOT LIKE '\\_\\_%%' GROUP BY distinctTagKey", signozMetricDBName, signozTSTableNameV41Day) + if req.Limit != 0 { + query = query + fmt.Sprintf(" LIMIT %d;", req.Limit) + } + rows, err := r.db.Query(ctx, query, common.PastDayRoundOff(), fmt.Sprintf("%%%s%%", req.SearchText)) + if err != nil { + zap.L().Error("Error while executing query", zap.Error(err)) + return nil, &model.ApiError{Typ: "ClickHouseError", Err: err} + } + + var attributeKey string + for rows.Next() { + if err := rows.Scan(&attributeKey); err != nil { + return nil, &model.ApiError{Typ: "ClickHouseError", Err: err} + } + if skipDotNames && strings.Contains(attributeKey, ".") { + continue + } + key := v3.AttributeKey{ + Key: attributeKey, + DataType: v3.AttributeKeyDataTypeString, // https://github.com/OpenObservability/OpenMetrics/blob/main/proto/openmetrics_data_model.proto#L64-L72. + Type: v3.AttributeKeyTypeTag, + IsColumn: false, + } + response = append(response, key) + } + return &response, nil +} + +func (r *ClickHouseReader) GetAllMetricFilterAttributeValues(ctx context.Context, req *metrics_explorer.FilterValueRequest) (*[]v3.AttributeKey, *model.ApiError) { + var query string + var err error + var rows driver.Rows + var attributeValues []v3.AttributeKey + + query = fmt.Sprintf("SELECT JSONExtractString(labels, $1) AS tagValue FROM %s.%s WHERE JSONExtractString(labels, $2) ILIKE $3 AND unix_milli >= $4 GROUP BY tagValue", signozMetricDBName, signozTSTableNameV41Day) + if req.Limit != 0 { + query = query + fmt.Sprintf(" LIMIT %d;", req.Limit) + } + rows, err = r.db.Query(ctx, query, req.FilterAttributeKey, req.FilterAttributeKey, fmt.Sprintf("%%%s%%", req.SearchText), common.PastDayRoundOff()) + + if err != nil { + zap.L().Error("Error while executing query", zap.Error(err)) + return nil, &model.ApiError{Typ: "ClickHouseError", Err: err} + } + defer rows.Close() + + var atrributeValue string + for rows.Next() { + if err := rows.Scan(&atrributeValue); err != nil { + return nil, &model.ApiError{Typ: "ClickHouseError", Err: err} + } + key := v3.AttributeKey{ + Key: atrributeValue, + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + IsColumn: false, + } + attributeValues = append(attributeValues, key) + } + + return &attributeValues, nil +} + +func (r *ClickHouseReader) GetAllMetricFilterUnits(ctx context.Context, req *metrics_explorer.FilterValueRequest) (*[]v3.AttributeKey, *model.ApiError) { + var rows driver.Rows + var response []v3.AttributeKey + query := fmt.Sprintf("SELECT DISTINCT unit FROM %s.%s WHERE unit ILIKE $1 AND unit IS NOT NULL ORDER BY unit", signozMetricDBName, signozTSTableNameV41Day) + if req.Limit != 0 { + query = query + fmt.Sprintf(" LIMIT %d;", req.Limit) + } + // Remove the unnecessary parameters from db.Query + rows, err := r.db.Query(ctx, query, fmt.Sprintf("%%%s%%", req.SearchText)) + if err != nil { + zap.L().Error("Error while executing query", zap.Error(err)) + return nil, &model.ApiError{Typ: "ClickHouseError", Err: err} + } + + var attributeKey string + for rows.Next() { + if err := rows.Scan(&attributeKey); err != nil { + return nil, &model.ApiError{Typ: "ClickHouseError", Err: err} + } + key := v3.AttributeKey{ + Key: attributeKey, + DataType: v3.AttributeKeyDataTypeString, + IsColumn: true, + } + response = append(response, key) + } + return &response, nil +} + +func (r *ClickHouseReader) ListSummaryMetrics(ctx context.Context, req *metrics_explorer.SummaryListMetricsRequest) (*metrics_explorer.SummaryListMetricsResponse, *model.ApiError) { + var args []interface{} + + // Convert time range to Unix timestamps + startUnix, _ := time.Parse("2006-01-02T15:04:05Z", req.StartDate) + endUnix, _ := time.Parse("2006-01-02T15:04:05Z", req.EndDate) + + // Build filters dynamically + conditions, _ := utils.BuildFilterConditions(&req.Filters, "t") + + // Build ordering dynamically + orderByClause := "" + if len(req.OrderBy) > 0 { + orderParts := []string{} + for _, order := range req.OrderBy { + orderParts = append(orderParts, fmt.Sprintf("%s %s", order.ColumnName, order.Order)) + } + orderByClause = "ORDER BY " + strings.Join(orderParts, ", ") + } + + whereClause := strings.Join(conditions, " AND ") + + // Final query with simplified structure + query := fmt.Sprintf(` + SELECT + t.metric_name AS metric_name, + argMax(t.description, t.fingerprint) AS description, + argMax(t.type, t.fingerprint) AS type, + t.unit, + COUNT(DISTINCT t.fingerprint) AS cardinality, + COALESCE(MAX(s.data_points), 0) AS dataPoints, + MAX(s.last_received_time) AS lastReceived, + COUNT(DISTINCT t.metric_name) OVER () AS total + FROM %s.%s AS t + LEFT JOIN ( + SELECT + metric_name, + COUNT(*) AS data_points, + MAX(unix_milli) AS last_received_time + FROM %s.%s + WHERE unix_milli BETWEEN ? AND ? + GROUP BY metric_name + ) AS s USING (metric_name) + WHERE t.unix_milli BETWEEN ? AND ? AND + %s + GROUP BY t.metric_name, t.unit + %s + LIMIT %d OFFSET %d;`, + signozMetricDBName, signozTSTableNameV4, + signozMetricDBName, signozSampleTableName, + whereClause, orderByClause, req.Limit, req.Offset) + + // Add query parameters + args = append(args, + startUnix.UnixMilli(), endUnix.UnixMilli(), // For samples subquery + startUnix.UnixMilli(), endUnix.UnixMilli(), // For main query + ) + + rows, err := r.db.Query(ctx, query, args...) + if err != nil { + zap.L().Error("Error executing metrics summary query", zap.Error(err)) + return &metrics_explorer.SummaryListMetricsResponse{}, &model.ApiError{Typ: "ClickHouseError", Err: err} + } + defer rows.Close() + + // Process results + var response metrics_explorer.SummaryListMetricsResponse + for rows.Next() { + var metric metrics_explorer.MetricDetail + if err := rows.Scan(&metric.MetricName, &metric.Description, &metric.Type, &metric.Unit, &metric.Cardinality, &metric.DataPoints, &metric.LastReceived, &response.Total); err != nil { + zap.L().Error("Error scanning metric row", zap.Error(err)) + return &response, &model.ApiError{Typ: "ClickHouseError", Err: err} + } + response.Metrics = append(response.Metrics, metric) + } + + if err := rows.Err(); err != nil { + zap.L().Error("Error iterating over metric rows", zap.Error(err)) + return &response, &model.ApiError{Typ: "ClickHouseError", Err: err} + } + + return &response, nil +} diff --git a/pkg/query-service/app/dashboards/model.go b/pkg/query-service/app/dashboards/model.go index 90c9943708e..d25c43fe152 100644 --- a/pkg/query-service/app/dashboards/model.go +++ b/pkg/query-service/app/dashboards/model.go @@ -537,3 +537,87 @@ func countPanelsInDashboard(inputData map[string]interface{}) model.DashboardsIn LogsPanelsWithAttrContainsOp: logsPanelsWithAttrContains, } } + +func GetDashboardsWithMetricName(ctx context.Context, metricName string) ([]map[string]string, *model.ApiError) { + // Get all dashboards first + query := `SELECT uuid, data FROM dashboards` + + type dashboardRow struct { + Uuid string `db:"uuid"` + Data json.RawMessage `db:"data"` + } + + var dashboards []dashboardRow + err := db.Select(&dashboards, query) + if err != nil { + zap.L().Error("Error in getting dashboards", zap.Error(err)) + return nil, &model.ApiError{Typ: model.ErrorExec, Err: err} + } + + // Process the JSON data in Go + var result []map[string]string + for _, dashboard := range dashboards { + var dashData map[string]interface{} + if err := json.Unmarshal(dashboard.Data, &dashData); err != nil { + continue + } + + dashTitle, _ := dashData["title"].(string) + widgets, ok := dashData["widgets"].([]interface{}) + if !ok { + continue + } + + for _, w := range widgets { + widget, ok := w.(map[string]interface{}) + if !ok { + continue + } + + widgetTitle, _ := widget["title"].(string) + widgetID, _ := widget["id"].(string) + + query, ok := widget["query"].(map[string]interface{}) + if !ok { + continue + } + + builder, ok := query["builder"].(map[string]interface{}) + if !ok { + continue + } + + queryData, ok := builder["queryData"].([]interface{}) + if !ok { + continue + } + + for _, qd := range queryData { + data, ok := qd.(map[string]interface{}) + if !ok { + continue + } + + if dataSource, ok := data["dataSource"].(string); !ok || dataSource != "metrics" { + continue + } + + aggregateAttr, ok := data["aggregateAttribute"].(map[string]interface{}) + if !ok { + continue + } + + if key, ok := aggregateAttr["key"].(string); ok && strings.TrimSpace(key) == metricName { + result = append(result, map[string]string{ + "dashboard_id": dashboard.Uuid, + "widget_title": widgetTitle, + "widget_id": widgetID, + "dashboard_title": dashTitle, + }) + } + } + } + } + + return result, nil +} diff --git a/pkg/query-service/app/http_handler.go b/pkg/query-service/app/http_handler.go index 1b20f067118..6ca4b7cb2d5 100644 --- a/pkg/query-service/app/http_handler.go +++ b/pkg/query-service/app/http_handler.go @@ -6,6 +6,7 @@ import ( "encoding/json" "errors" "fmt" + "go.signoz.io/signoz/pkg/query-service/app/metricsexplorer" "io" "math" "net/http" @@ -125,6 +126,8 @@ type APIHandler struct { statefulsetsRepo *inframetrics.StatefulSetsRepo jobsRepo *inframetrics.JobsRepo + SummaryService *metricsexplorer.SummaryService + pvcsRepo *inframetrics.PvcsRepo } @@ -209,6 +212,8 @@ func NewAPIHandler(opts APIHandlerOpts) (*APIHandler, error) { statefulsetsRepo := inframetrics.NewStatefulSetsRepo(opts.Reader, querierv2) jobsRepo := inframetrics.NewJobsRepo(opts.Reader, querierv2) pvcsRepo := inframetrics.NewPvcsRepo(opts.Reader, querierv2) + //explorerCache := metricsexplorer.NewExplorerCache(metricsexplorer.WithCache(opts.Cache)) + summaryService := metricsexplorer.NewSummaryService(opts.Reader, querierv2) aH := &APIHandler{ reader: opts.Reader, @@ -237,6 +242,7 @@ func NewAPIHandler(opts APIHandlerOpts) (*APIHandler, error) { statefulsetsRepo: statefulsetsRepo, jobsRepo: jobsRepo, pvcsRepo: pvcsRepo, + SummaryService: summaryService, } logsQueryBuilder := logsv3.PrepareLogsQuery @@ -2146,7 +2152,7 @@ func (aH *APIHandler) loginUser(w http.ResponseWriter, r *http.Request) { } // http.SetCookie(w, &http.Cookie{ - // Name: "refresh-token", + // MetricName: "refresh-token", // Value: resp.RefreshJwt, // Expires: time.Unix(resp.RefreshJwtExpiry, 0), // HttpOnly: true, @@ -2192,7 +2198,7 @@ func (aH *APIHandler) getUser(w http.ResponseWriter, r *http.Request) { aH.WriteJSON(w, r, user) } -// editUser only changes the user's Name and ProfilePictureURL. It is intentionally designed +// editUser only changes the user's MetricName and ProfilePictureURL. It is intentionally designed // to not support update of orgId, Password, createdAt for the sucurity reasons. func (aH *APIHandler) editUser(w http.ResponseWriter, r *http.Request) { id := mux.Vars(r)["id"] diff --git a/pkg/query-service/app/metricsexplorer/cache.go b/pkg/query-service/app/metricsexplorer/cache.go new file mode 100644 index 00000000000..3e20b06b523 --- /dev/null +++ b/pkg/query-service/app/metricsexplorer/cache.go @@ -0,0 +1,184 @@ +package metricsexplorer + +import ( + "encoding/json" + "sort" + + "go.signoz.io/signoz/pkg/query-service/cache" + "go.uber.org/zap" +) + +type MetricsExplorerCache struct { + cache cache.Cache +} + +type CachedSeriesData struct { + Start int64 `json:"start"` + End int64 `json:"end"` + Data interface{} `json:"data"` +} + +type MetricsExplorerCacheOption func(c *MetricsExplorerCache) + +type MissInterval struct { + Start, End int64 // in milliseconds +} + +func NewExplorerCache(opts ...MetricsExplorerCacheOption) *MetricsExplorerCache { + c := &MetricsExplorerCache{} + for _, opt := range opts { + opt(c) + } + return c +} + +func WithCache(cache cache.Cache) MetricsExplorerCacheOption { + return func(c *MetricsExplorerCache) { + c.cache = cache + } +} + +func (c *MetricsExplorerCache) FindMissingTimeRanges(start, end int64, cacheKey string) []MissInterval { + if c.cache == nil || cacheKey == "" { + return []MissInterval{{Start: start, End: end}} + } + + cachedSeriesDataList := c.getCachedSeriesData(cacheKey) + + // Sort the cached data by start time + sort.Slice(cachedSeriesDataList, func(i, j int) bool { + return cachedSeriesDataList[i].Start < cachedSeriesDataList[j].Start + }) + + zap.L().Info("Number of non-overlapping cached series data", zap.Int("count", len(cachedSeriesDataList))) + + var missingRanges []MissInterval + currentTime := start + + for _, data := range cachedSeriesDataList { + // Ignore cached data that ends before the start time + if data.End <= start { + continue + } + // Stop processing if we've reached the end time + if data.Start >= end { + break + } + + // Add missing range if there's a gap + if currentTime < data.Start { + missingRanges = append(missingRanges, MissInterval{Start: currentTime, End: min(data.Start, end)}) + } + + // Update currentTime, but don't go past the end time + currentTime = max(currentTime, min(data.End, end)) + } + + // Add final missing range if necessary + if currentTime < end { + missingRanges = append(missingRanges, MissInterval{Start: currentTime, End: end}) + } + + return missingRanges +} + +func (c *MetricsExplorerCache) getCachedSeriesData(cacheKey string) []*CachedSeriesData { + cachedData, _, _ := c.cache.Retrieve(cacheKey, true) + var cachedSeriesDataList []*CachedSeriesData + if err := json.Unmarshal(cachedData, &cachedSeriesDataList); err != nil { + return nil + } + return cachedSeriesDataList +} + +func (c *MetricsExplorerCache) storeMergedData(cacheKey string, mergedData []CachedSeriesData) { + mergedDataJSON, err := json.Marshal(mergedData) + if err != nil { + zap.L().Error("error marshalling merged data", zap.Error(err)) + return + } + err = c.cache.Store(cacheKey, mergedDataJSON, 0) + if err != nil { + zap.L().Error("error storing merged data", zap.Error(err)) + } +} + +func (c *MetricsExplorerCache) MergeWithCachedSeriesData(cacheKey string, newData []CachedSeriesData) []CachedSeriesData { + + if c.cache == nil { + return newData + } + + cachedData, _, _ := c.cache.Retrieve(cacheKey, true) + var existingData []CachedSeriesData + if err := json.Unmarshal(cachedData, &existingData); err != nil { + // In case of error, we return the entire range as a miss + c.storeMergedData(cacheKey, newData) + return newData + } + + allData := append(existingData, newData...) + + sort.Slice(allData, func(i, j int) bool { + return allData[i].Start < allData[j].Start + }) + + var mergedData []CachedSeriesData + var current *CachedSeriesData + + for _, data := range allData { + if current == nil { + current = &CachedSeriesData{ + Start: data.Start, + End: data.End, + Data: data.Data, + } + continue + } + if data.Start <= current.End { + // Overlapping intervals, merge them + current.End = max(current.End, data.End) + current.Start = min(current.Start, data.Start) + // Merge the Data fields + current.Data = c.mergeSeries(current.Data, data.Data) + } else { + // No overlap, add current to mergedData + mergedData = append(mergedData, *current) + // Start new current + current = &CachedSeriesData{ + Start: data.Start, + End: data.End, + Data: data.Data, + } + } + } + + // After the loop, add the last current + if current != nil { + mergedData = append(mergedData, *current) + } + + c.storeMergedData(cacheKey, mergedData) + + return mergedData +} + +func (c *MetricsExplorerCache) mergeSeries(existingData interface{}, newData interface{}) interface{} { + // Assuming existingData and newData are slices of some type, e.g., []float64 + existingSlice, ok1 := existingData.([]float64) + newSlice, ok2 := newData.([]float64) + + if !ok1 || !ok2 { + // Handle the case where the data types are not as expected + zap.L().Error("Data types are not as expected for merging") + return existingData // Return existing data if types are incorrect + } + + // Merge logic: append new data to existing data + merged := append(existingSlice, newSlice...) + + // Optionally, you can sort or deduplicate the merged data here if needed + sort.Float64s(merged) // Sort the merged data + + return merged +} diff --git a/pkg/query-service/app/metricsexplorer/parser.go b/pkg/query-service/app/metricsexplorer/parser.go new file mode 100644 index 00000000000..5568d8df1a8 --- /dev/null +++ b/pkg/query-service/app/metricsexplorer/parser.go @@ -0,0 +1,55 @@ +package metricsexplorer + +import ( + "encoding/json" + "fmt" + "net/http" + "strconv" + + "go.signoz.io/signoz/pkg/query-service/model" + "go.signoz.io/signoz/pkg/query-service/model/metrics_explorer" +) + +func ParseSummaryListMetricsParams(r *http.Request) (*metrics_explorer.SummaryListMetricsRequest, *model.ApiError) { + var listMetricsParams *metrics_explorer.SummaryListMetricsRequest + + // parse the request body + if err := json.NewDecoder(r.Body).Decode(&listMetricsParams); err != nil { + return nil, &model.ApiError{Typ: model.ErrorBadData, Err: fmt.Errorf("cannot parse the request body: %v", err)} + } + + return listMetricsParams, nil +} + +func ParseFilterKeySuggestions(r *http.Request) (*metrics_explorer.FilterKeyRequest, *model.ApiError) { + + searchText := r.URL.Query().Get("searchText") + limit, err := strconv.Atoi(r.URL.Query().Get("limit")) + if err != nil { + limit = 50 + } + + return &metrics_explorer.FilterKeyRequest{Limit: limit, SearchText: searchText}, nil +} + +func ParseFilterValueSuggestions(r *http.Request) (*metrics_explorer.FilterValueRequest, *model.ApiError) { + var filterValueRequest *metrics_explorer.FilterValueRequest + + // parse the request body + if err := json.NewDecoder(r.Body).Decode(&filterValueRequest); err != nil { + return nil, &model.ApiError{Typ: model.ErrorBadData, Err: fmt.Errorf("cannot parse the request body: %v", err)} + } + + return filterValueRequest, nil +} + +func ParseTreeMapMetricsParams(r *http.Request) (*metrics_explorer.TreeMapMetricsRequest, *model.ApiError) { + var treeMapMetricParams *metrics_explorer.TreeMapMetricsRequest + + // parse the request body + if err := json.NewDecoder(r.Body).Decode(&treeMapMetricParams); err != nil { + return nil, &model.ApiError{Typ: model.ErrorBadData, Err: fmt.Errorf("cannot parse the request body: %v", err)} + } + + return treeMapMetricParams, nil +} diff --git a/pkg/query-service/app/metricsexplorer/summary.go b/pkg/query-service/app/metricsexplorer/summary.go new file mode 100644 index 00000000000..44f71422c62 --- /dev/null +++ b/pkg/query-service/app/metricsexplorer/summary.go @@ -0,0 +1,230 @@ +package metricsexplorer + +import ( + "context" + "encoding/json" + "fmt" + "sync" + "time" + + "go.signoz.io/signoz/pkg/query-service/app/dashboards" + "go.signoz.io/signoz/pkg/query-service/interfaces" + "go.signoz.io/signoz/pkg/query-service/model" + "go.signoz.io/signoz/pkg/query-service/model/metrics_explorer" + v3 "go.signoz.io/signoz/pkg/query-service/model/v3" +) + +type SummaryService struct { + reader interfaces.Reader + querierV2 interfaces.Querier + cache MetricsExplorerCache +} + +func NewSummaryService(reader interfaces.Reader, querierV2 interfaces.Querier) *SummaryService { + return &SummaryService{reader: reader, querierV2: querierV2} +} + +func (receiver *SummaryService) FilterKeys(ctx context.Context, params *metrics_explorer.FilterKeyRequest) (*metrics_explorer.FilterKeyResponse, *model.ApiError) { + var response metrics_explorer.FilterKeyResponse + keys, apiError := receiver.reader.GetAllMetricFilterAttributeKeys( + ctx, + params, + true, + ) + if apiError != nil { + return nil, apiError + } + response.AttributeKeys = *keys + var availableColumnFilter []string + for key := range metrics_explorer.AvailableColumnFilterMap { + availableColumnFilter = append(availableColumnFilter, string(key)) + } + response.MetricColumns = availableColumnFilter + return &response, nil +} + +func (receiver *SummaryService) FilterValues(ctx context.Context, params *metrics_explorer.FilterValueRequest) (*metrics_explorer.FilterValueResponse, *model.ApiError) { + var response metrics_explorer.FilterValueResponse + switch params.FilterTypeKey { + case metrics_explorer.FilterKeyMetricName: + request := v3.AggregateAttributeRequest{DataSource: v3.DataSourceMetrics, SearchText: params.SearchText, Limit: params.Limit} + attributes, err := receiver.reader.GetMetricAggregateAttributes(ctx, &request, true) + if err != nil { + return nil, model.InternalError(err) + } + response.FilterValues = attributes.AttributeKeys + return &response, nil + case metrics_explorer.FilterKeyAttributes: + attributes, err := receiver.reader.GetAllMetricFilterAttributeValues(ctx, params) + if err != nil { + return nil, err + } + response.FilterValues = *attributes + return &response, nil + case metrics_explorer.FilterKeyUnit: + attributes, err := receiver.reader.GetAllMetricFilterUnits(ctx, params) + if err != nil { + return nil, err + } + response.FilterValues = *attributes + return &response, nil + default: + return nil, nil + } +} + +func (receiver *SummaryService) ListMetricsWithSummary(ctx context.Context, params *metrics_explorer.SummaryListMetricsRequest) (*metrics_explorer.SummaryListMetricsResponse, *model.ApiError) { + return receiver.reader.ListSummaryMetrics(ctx, params) +} + +func (receiver *SummaryService) GetMetricsTreemap(ctx context.Context, params *metrics_explorer.TreeMapMetricsRequest) (*metrics_explorer.TreeMap, *model.ApiError) { + var response metrics_explorer.TreeMap + switch params.Treemap { + case metrics_explorer.CardinalityTreeMap: + cardinality, apiError := receiver.reader.GetMetricsCardinalityPercentage(ctx, params) + if apiError != nil { + return nil, apiError + } + response.Cardinality = *cardinality + return &response, nil + case metrics_explorer.DataPointsTreeMap: + dataPoints, apiError := receiver.reader.GetMetricsDataPointsPercentage(ctx, params) + if apiError != nil { + return nil, apiError + } + response.DataPoints = *dataPoints + return &response, nil + default: + return nil, nil + } +} + +// +//func (receiver *SummaryService) UpdateMetricsMetadata(ctx context.Context) { +// +//} + +func (receiver *SummaryService) GetMetricsSummary(ctx context.Context, metricName string) (metrics_explorer.MetricDetailsDTO, *model.ApiError) { + var ( + wg sync.WaitGroup + metricDetailsDTO metrics_explorer.MetricDetailsDTO + errCh = make(chan *model.ApiError, 1) + ) + + // Create a context with cancellation to stop other goroutines on the first error + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + handleError := func(err *model.ApiError) { + select { + case errCh <- err: // Send the error if no error was sent yet + cancel() // Cancel other goroutines + default: + } + } + + wg.Add(6) + + go func() { + defer wg.Done() + metadata, err := receiver.reader.GetMetricMetadata(ctx, metricName, metricName) + if err != nil { + handleError(&model.ApiError{Typ: "ClickHouseError", Err: err}) + return + } + metricDetailsDTO.Name = metricName + metricDetailsDTO.Unit = metadata.Unit + metricDetailsDTO.Description = metadata.Description + metricDetailsDTO.Type = metadata.Type + metricDetailsDTO.Metadata.MetricType = metadata.Type + metricDetailsDTO.Metadata.Description = metadata.Description + metricDetailsDTO.Metadata.Unit = metadata.Unit + }() + + // Call 1: GetMetricsDataPointsAndLastReceived + go func() { + defer wg.Done() + dataPoints, lastReceived, err := receiver.reader.GetMetricsDataPointsAndLastReceived(ctx, metricName) + if err != nil { + handleError(err) + return + } + metricDetailsDTO.DataPoints = dataPoints + metricDetailsDTO.LastReceived = lastReceived + }() + + // Call 2: GetTotalTimeSeriesForMetricName + go func() { + defer wg.Done() + totalSeries, totalSeriesLastReceived, cardinality, err := receiver.reader.GetTotalTimeSeriesForMetricName(ctx, metricName) + if err != nil { + handleError(err) + return + } + metricDetailsDTO.TimeSeriesTotal = totalSeries + metricDetailsDTO.LastReceived = totalSeriesLastReceived + metricDetailsDTO.Cardinality = cardinality + }() + + // Call 3: GetActiveTimeSeriesForMetricName + go func() { + defer wg.Done() + activeSeries, err := receiver.reader.GetActiveTimeSeriesForMetricName(ctx, metricName, 30*time.Minute) + if err != nil { + handleError(err) + return + } + metricDetailsDTO.TimeSeriesActive = activeSeries + }() + + // Call 4: GetAttributesForMetricName + go func() { + defer wg.Done() + attributes, err := receiver.reader.GetAttributesForMetricName(ctx, metricName) + if err != nil { + handleError(err) + return + } + if attributes != nil { + metricDetailsDTO.Attributes = *attributes + } + }() + + go func() { + defer wg.Done() + data, err := dashboards.GetDashboardsWithMetricName(ctx, metricName) + if err != nil { + handleError(err) + return + } + if data != nil { + jsonData, err := json.Marshal(data) + if err != nil { + fmt.Printf("Error marshalling data: %v\n", err) + return + } + + // Unmarshal the JSON directly into a slice of Dashboard structs + var dashboards []metrics_explorer.Dashboard + err = json.Unmarshal(jsonData, &dashboards) + if err != nil { + fmt.Printf("Error unmarshalling JSON: %v\n", err) + return + } + metricDetailsDTO.Dashboards = dashboards + } + }() + + //TODO: ADD ALERTS CONFIG + + // Wait for all goroutines to complete + wg.Wait() + close(errCh) + + // If an error occurred, return immediately + if apiErr := <-errCh; apiErr != nil { + return metrics_explorer.MetricDetailsDTO{}, apiErr + } + + return metricDetailsDTO, nil +} diff --git a/pkg/query-service/interfaces/interface.go b/pkg/query-service/interfaces/interface.go index afb1e9f7288..8fa457cbe73 100644 --- a/pkg/query-service/interfaces/interface.go +++ b/pkg/query-service/interfaces/interface.go @@ -4,6 +4,8 @@ import ( "context" "time" + "go.signoz.io/signoz/pkg/query-service/model/metrics_explorer" + "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/util/stats" @@ -115,6 +117,17 @@ type Reader interface { //trace GetTraceFields(ctx context.Context) (*model.GetFieldsResponse, *model.ApiError) UpdateTraceField(ctx context.Context, field *model.UpdateField) *model.ApiError + + GetMetricsCardinalityPercentage(ctx context.Context, request *metrics_explorer.TreeMapMetricsRequest) (*[]metrics_explorer.CardinalityTreemap, *model.ApiError) + GetMetricsDataPointsPercentage(ctx context.Context, req *metrics_explorer.TreeMapMetricsRequest) (*[]metrics_explorer.DataPointTreemap, *model.ApiError) + GetMetricsDataPointsAndLastReceived(ctx context.Context, metricName string) (uint64, uint64, *model.ApiError) + GetTotalTimeSeriesForMetricName(ctx context.Context, metricName string) (uint64, uint64, uint64, *model.ApiError) + GetActiveTimeSeriesForMetricName(ctx context.Context, metricName string, duration time.Duration) (uint64, *model.ApiError) + GetAttributesForMetricName(ctx context.Context, metricName string) (*[]metrics_explorer.Attribute, *model.ApiError) + GetAllMetricFilterAttributeValues(ctx context.Context, req *metrics_explorer.FilterValueRequest) (*[]v3.AttributeKey, *model.ApiError) + GetAllMetricFilterUnits(ctx context.Context, req *metrics_explorer.FilterValueRequest) (*[]v3.AttributeKey, *model.ApiError) + GetAllMetricFilterAttributeKeys(ctx context.Context, req *metrics_explorer.FilterKeyRequest, skipDotNames bool) (*[]v3.AttributeKey, *model.ApiError) + ListSummaryMetrics(ctx context.Context, req *metrics_explorer.SummaryListMetricsRequest) (*metrics_explorer.SummaryListMetricsResponse, *model.ApiError) } type Querier interface { diff --git a/pkg/query-service/model/metrics_explorer/summary.go b/pkg/query-service/model/metrics_explorer/summary.go new file mode 100644 index 00000000000..b32284f0da7 --- /dev/null +++ b/pkg/query-service/model/metrics_explorer/summary.go @@ -0,0 +1,159 @@ +package metrics_explorer + +import ( + v3 "go.signoz.io/signoz/pkg/query-service/model/v3" +) + +type SummaryListMetricsRequest struct { + Offset int `json:"offset"` + Limit int `json:"limit"` + OrderBy []v3.OrderBy `json:"orderBy"` + StartDate string `json:"startDate"` + EndDate string `json:"endDate"` + Filters SummaryFilterSet `json:"filters"` +} + +type SummaryFilterItems struct { + v3.FilterItem + FilterTypeKey FilterTypeKey `json:"filterTypeKey"` +} + +type SummaryFilterSet struct { + v3.FilterSet + Items []SummaryFilterItems `json:"items"` +} + +type TreeMapType string + +const ( + CardinalityTreeMap TreeMapType = "cardinality" + DataPointsTreeMap TreeMapType = "datapoints" +) + +type TreeMapMetricsRequest struct { + Offset int `json:"offset"` + Limit int `json:"limit"` + Treemap TreeMapType `json:"treemap"` + OrderBy []v3.OrderBy `json:"orderBy"` + StartDate string `json:"startDate"` + EndDate string `json:"endDate"` + Filters SummaryFilterSet `json:"filters"` +} + +type MetricDetail struct { + MetricName string `json:"metric_name"` + Description string `json:"description"` + Type string `json:"type"` + Unit string `json:"unit"` + Cardinality uint64 `json:"cardinality"` + DataPoints uint64 `json:"dataPoints"` + LastReceived int64 `json:"lastReceived"` +} + +type CardinalityTreemap struct { + RelativePercentage float64 `json:"relative_percentage"` + TotalValue uint64 `json:"total_value"` + MetricName string `json:"metric_name"` +} + +type DataPointTreemap struct { + Percentage float64 `json:"relative_percentage"` + TotalValue uint64 `json:"total_value"` + MetricName string `json:"metric_name"` +} + +type TreeMap struct { + Cardinality []CardinalityTreemap `json:"cardinality"` + DataPoints []DataPointTreemap `json:"dataPoints"` +} + +type SummaryListMetricsResponse struct { + Metrics []MetricDetail `json:"metrics"` + Total uint64 `json:"total"` +} + +type Attribute struct { + Key string `json:"key" db:"key"` + Value []string `json:"value" db:"value"` + Contribution float64 `json:"contribution" db:"contribution"` +} + +// Metadata holds additional information about the metric. +type Metadata struct { + MetricType string `json:"metric_type"` + Description string `json:"description"` + Unit string `json:"unit"` +} + +// Alert represents individual alerts associated with the metric. +type Alert struct { + AlertName string `json:"alert_name"` + AlertID string `json:"alert_id"` +} + +// Dashboard represents individual dashboards associated with the metric. +type Dashboard struct { + DashboardName string `json:"dashboard_name"` + DashboardID string `json:"dashboard_id"` + WidgetID string `json:"widget_id"` + WidgetName string `json:"widget_name"` +} + +type MetricDetailsDTO struct { + Name string `json:"name"` + Description string `json:"description"` + Type string `json:"type"` + Unit string `json:"unit"` + Cardinality uint64 `json:"cardinality"` + DataPoints uint64 `json:"dataPoints"` + TimeSeriesTotal uint64 `json:"timeSeriesTotal"` + TimeSeriesActive uint64 `json:"timeSeriesActive"` + LastReceived uint64 `json:"lastReceived"` + Attributes []Attribute `json:"attributes"` + Metadata Metadata `json:"metadata"` + Alerts []Alert `json:"alerts"` + Dashboards []Dashboard `json:"dashboards"` +} + +type FilterKeyRequest struct { + SearchText string `json:"searchText"` + Limit int `json:"limit"` +} + +type FilterValueRequest struct { + FilterAttributeKey string `json:"filterAttributeKey"` + FilterAttributeKeyDataType v3.AttributeKeyDataType `json:"filterAttributeKeyDataType"` + FilterTypeKey FilterTypeKey `json:"filterTypeKey"` + SearchText string `json:"searchText"` + Limit int `json:"limit"` +} + +type FilterValueResponse struct { + FilterValues []v3.AttributeKey `json:"FilterValues"` +} + +type FilterKeyResponse struct { + MetricColumns []string `json:"metricColumns"` + AttributeKeys []v3.AttributeKey `json:"attributeKeys"` +} + +type FilterTypeKey string + +const ( + FilterKeyMetricName FilterTypeKey = "metric_name" + FilterKeyType FilterTypeKey = "type" + FilterKeyAttributes FilterTypeKey = "attributes" + FilterKeyUnit FilterTypeKey = "unit" +) + +var AvailableColumnFilter = []string{ + string(FilterKeyMetricName), + string(FilterKeyType), + string(FilterKeyUnit), +} + +var AvailableColumnFilterMap = map[FilterTypeKey]bool{ + FilterKeyMetricName: true, + FilterKeyType: true, + FilterKeyUnit: true, +} diff --git a/pkg/query-service/utils/filter_conditions.go b/pkg/query-service/utils/filter_conditions.go new file mode 100644 index 00000000000..9cceb0fc624 --- /dev/null +++ b/pkg/query-service/utils/filter_conditions.go @@ -0,0 +1,96 @@ +package utils + +import ( + "fmt" + "strings" + + "go.signoz.io/signoz/pkg/query-service/model/metrics_explorer" + v3 "go.signoz.io/signoz/pkg/query-service/model/v3" +) + +// skipKey is an optional parameter to skip processing of a specific key +func BuildFilterConditions(fs *metrics_explorer.SummaryFilterSet, skipKey string) ([]string, error) { + if fs == nil || len(fs.Items) == 0 { + return nil, nil + } + + var conditions []string + + for _, item := range fs.Items { + if skipKey != "" && item.Key.Key == skipKey { + continue + } + + toFormat := item.Value + op := v3.FilterOperator(strings.ToLower(strings.TrimSpace(string(item.Operator)))) + if op == v3.FilterOperatorContains || op == v3.FilterOperatorNotContains { + toFormat = fmt.Sprintf("%%%s%%", toFormat) + } + fmtVal := ClickHouseFormattedValue(toFormat) + + // Determine if the key is a JSON key or a normal column + isJSONKey := false + switch item.FilterTypeKey { + case metrics_explorer.FilterKeyAttributes: + isJSONKey = true // Assuming attributes are stored as JSON + case metrics_explorer.FilterKeyMetricName: + isJSONKey = false // Assuming metric names are normal columns + case metrics_explorer.FilterKeyUnit: + isJSONKey = false // Assuming units are normal columns + } + + condition, err := buildSingleFilterCondition(item.Key.Key, op, fmtVal, isJSONKey) + if err != nil { + return nil, err + } + conditions = append(conditions, condition) + } + + return conditions, nil +} + +func buildSingleFilterCondition(key string, op v3.FilterOperator, fmtVal string, isJSONKey bool) (string, error) { + var keyCondition string + if isJSONKey { + keyCondition = fmt.Sprintf("JSONExtractString(labels, '%s')", key) + } else { + keyCondition = key // Assuming normal column access + } + + switch op { + case v3.FilterOperatorEqual: + return fmt.Sprintf("%s = %s", keyCondition, fmtVal), nil + case v3.FilterOperatorNotEqual: + return fmt.Sprintf("%s != %s", keyCondition, fmtVal), nil + case v3.FilterOperatorIn: + return fmt.Sprintf("%s IN %s", keyCondition, fmtVal), nil + case v3.FilterOperatorNotIn: + return fmt.Sprintf("%s NOT IN %s", keyCondition, fmtVal), nil + case v3.FilterOperatorLike: + return fmt.Sprintf("like(%s, %s)", keyCondition, fmtVal), nil + case v3.FilterOperatorNotLike: + return fmt.Sprintf("notLike(%s, %s)", keyCondition, fmtVal), nil + case v3.FilterOperatorRegex: + return fmt.Sprintf("match(%s, %s)", keyCondition, fmtVal), nil + case v3.FilterOperatorNotRegex: + return fmt.Sprintf("not match(%s, %s)", keyCondition, fmtVal), nil + case v3.FilterOperatorGreaterThan: + return fmt.Sprintf("%s > %s", keyCondition, fmtVal), nil + case v3.FilterOperatorGreaterThanOrEq: + return fmt.Sprintf("%s >= %s", keyCondition, fmtVal), nil + case v3.FilterOperatorLessThan: + return fmt.Sprintf("%s < %s", keyCondition, fmtVal), nil + case v3.FilterOperatorLessThanOrEq: + return fmt.Sprintf("%s <= %s", keyCondition, fmtVal), nil + case v3.FilterOperatorContains: + return fmt.Sprintf("like(%s, %s)", keyCondition, fmtVal), nil + case v3.FilterOperatorNotContains: + return fmt.Sprintf("notLike(%s, %s)", keyCondition, fmtVal), nil + case v3.FilterOperatorExists: + return fmt.Sprintf("has(JSONExtractKeys(labels), '%s')", key), nil + case v3.FilterOperatorNotExists: + return fmt.Sprintf("not has(JSONExtractKeys(labels), '%s')", key), nil + default: + return "", fmt.Errorf("unsupported filter operator: %s", op) + } +}