Skip to content

Commit

Permalink
Optimize overview (#23)
Browse files Browse the repository at this point in the history
* chore: splitting overview info request

* fix: no index health metric with agent mode

* feat: add query param `timeout` for overview info api

* fix: index overview info format
  • Loading branch information
silenceqi authored Dec 11, 2024
1 parent ccd44c9 commit 217008f
Show file tree
Hide file tree
Showing 6 changed files with 243 additions and 812 deletions.
28 changes: 24 additions & 4 deletions modules/elastic/api/cluster_overview.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,8 @@ func (h *APIHandler) FetchClusterInfo(w http.ResponseWriter, req *http.Request,
h.WriteJSON(w, util.MapStr{}, http.StatusOK)
return
}
//only query the first cluster info
clusterIDs = clusterIDs[0:1]

cids := make([]interface{}, 0, len(clusterIDs))
for _, clusterID := range clusterIDs {
Expand All @@ -62,7 +64,7 @@ func (h *APIHandler) FetchClusterInfo(w http.ResponseWriter, req *http.Request,
q1.Conds = orm.And(
orm.Eq("metadata.category", "elasticsearch"),
orm.Eq("metadata.name", "cluster_stats"),
orm.In("metadata.labels.cluster_id", cids),
orm.Eq("metadata.labels.cluster_id", cids[0]),
)
q1.Collapse("metadata.labels.cluster_id")
q1.AddSort("timestamp", orm.DESC)
Expand Down Expand Up @@ -173,8 +175,8 @@ func (h *APIHandler) FetchClusterInfo(w http.ResponseWriter, req *http.Request,
"bool": util.MapStr{
"must": []util.MapStr{
{
"terms": util.MapStr{
"metadata.labels.cluster_uuid": clusterUUIDs,
"term": util.MapStr{
"metadata.labels.cluster_uuid": clusterUUIDs[0],
},
},
{
Expand Down Expand Up @@ -252,7 +254,16 @@ func (h *APIHandler) FetchClusterInfo(w http.ResponseWriter, req *http.Request,
},
},
}
indexMetrics := h.getMetrics(context.Background(), query, indexMetricItems, bucketSize)
timeout := h.GetParameterOrDefault(req, "timeout", "60s")
du, err := time.ParseDuration(timeout)
if err != nil {
log.Error(err)
h.WriteError(w, err.Error(), http.StatusInternalServerError)
return
}
ctx, cancel := context.WithTimeout(context.Background(), du)
defer cancel()
indexMetrics := h.getMetrics(ctx, query, indexMetricItems, bucketSize)
indexingMetricData := util.MapStr{}
for _, line := range indexMetrics["cluster_indexing"].Lines {
// remove first metric dot
Expand Down Expand Up @@ -716,6 +727,9 @@ func (h *APIHandler) GetRealtimeClusterNodes(w http.ResponseWriter, req *http.Re
info.IndexQPS = qps[nodeInfo.Id]["index"]
info.QueryQPS = qps[nodeInfo.Id]["query"]
info.IndexBytesQPS = qps[nodeInfo.Id]["index_bytes"]
if v, ok := qps[nodeInfo.Id]["latest_timestamp"].(float64); ok {
info.Timestamp = uint64(v)
}
}
nodeInfos = append(nodeInfos, info)
}
Expand Down Expand Up @@ -826,6 +840,7 @@ type RealtimeNodeInfo struct {
IndexQPS interface{} `json:"index_qps"`
QueryQPS interface{} `json:"query_qps"`
IndexBytesQPS interface{} `json:"index_bytes_qps"`
Timestamp uint64 `json:"timestamp"`
CatNodeResponse
}

Expand Down Expand Up @@ -1122,6 +1137,11 @@ func (h *APIHandler) getNodeQPS(clusterID string, bucketSizeInSeconds int) (map[
"buckets_path": "query_total",
},
},
"latest_timestamp": util.MapStr{
"max": util.MapStr{
"field": "timestamp",
},
},
},
},
},
Expand Down
Loading

0 comments on commit 217008f

Please sign in to comment.