Skip to content
This repository was archived by the owner on Jul 6, 2021. It is now read-only.

Commit 6f41a4a

Browse files
committed
feat: Rework Conclusions and Recommendations in H002, H004
1 parent 2ccd79e commit 6f41a4a

File tree

11 files changed

+458
-5
lines changed

11 files changed

+458
-5
lines changed

pghrep/src/checkup/h002/h002.go

Lines changed: 88 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,88 @@
1+
package h002
2+
3+
import (
4+
"encoding/json"
5+
6+
checkup ".."
7+
)
8+
9+
const H002_UNUSED_INDEXES_FOUND_P2 string = "H002_UNUSED_INDEXES_FOUND_P2"
10+
const H002_UNUSED_INDEXES_FOUND_P3 string = "H002_UNUSED_INDEXES_FOUND_P3"
11+
const H002_UNUSED_INDEXES_FOUND string = "H002_UNUSED_INDEXES_FOUND"
12+
const H002_UNUSED_INDEXES_FOUND_DO string = "H002_UNUSED_INDEXES_FOUND_DO"
13+
const H002_UNUSED_INDEXES_FOUND_UNDO string = "H002_UNUSED_INDEXES_FOUND_UNDO"
14+
15+
const UNUSED_INDEXES_CRITICL_SIZE_PERCENT float64 = 5.0
16+
17+
func H002Process(report H002Report) (checkup.ReportResult, error) {
18+
var result checkup.ReportResult
19+
var masterHost = ""
20+
21+
for host, hostData := range report.LastNodesJson.Hosts {
22+
if hostData.Role == "master" {
23+
masterHost = host
24+
break
25+
}
26+
}
27+
28+
for host, hostData := range report.Results {
29+
if host != masterHost {
30+
continue
31+
}
32+
33+
if len(hostData.Data.NeverUsedIndexes) > 0 && len(hostData.Data.Do) > 0 &&
34+
len(hostData.Data.UnDo) > 0 {
35+
if (float64(hostData.Data.NeverUsedIndexesTotal.IndexSizeBytesSum) /
36+
float64(hostData.Data.DatabaseStat.DatabaseSizeBytes) * 100) > UNUSED_INDEXES_CRITICL_SIZE_PERCENT {
37+
result.P2 = true
38+
result.AppendConclusion(H002_UNUSED_INDEXES_FOUND_P2, MSG_UNUSED_INDEXES_FOUND_P2_CONCLUSION,
39+
len(hostData.Data.NeverUsedIndexes), UNUSED_INDEXES_CRITICL_SIZE_PERCENT)
40+
} else {
41+
result.P3 = true
42+
result.AppendConclusion(H002_UNUSED_INDEXES_FOUND_P3, MSG_UNUSED_INDEXES_FOUND_P3_CONCLUSION,
43+
len(hostData.Data.NeverUsedIndexes))
44+
}
45+
46+
var p = "[P3] "
47+
if result.P2 {
48+
p = "[P2] "
49+
}
50+
result.AppendRecommendation(H002_UNUSED_INDEXES_FOUND, p+MSG_UNUSED_INDEXES_FOUND_R1)
51+
result.AppendRecommendation(H002_UNUSED_INDEXES_FOUND, MSG_UNUSED_INDEXES_FOUND_R2)
52+
result.AppendRecommendation(H002_UNUSED_INDEXES_FOUND, MSG_UNUSED_INDEXES_FOUND_R3)
53+
54+
var doCode = "``` \n"
55+
for _, doIndex := range hostData.Data.Do {
56+
doCode = doCode + doIndex + " \n"
57+
}
58+
doCode = doCode + "```"
59+
result.AppendRecommendation(H002_UNUSED_INDEXES_FOUND_DO, MSG_UNUSED_INDEXES_FOUND_DO, doCode)
60+
61+
var undoCode = "``` \n"
62+
for _, undoIndex := range hostData.Data.UnDo {
63+
undoCode = undoCode + undoIndex + " \n"
64+
}
65+
undoCode = undoCode + "```"
66+
result.AppendRecommendation(H002_UNUSED_INDEXES_FOUND_UNDO, MSG_UNUSED_INDEXES_FOUND_UNDO, undoCode)
67+
}
68+
69+
}
70+
71+
return result, nil
72+
}
73+
74+
func H002PreprocessReportData(data map[string]interface{}) {
75+
var report H002Report
76+
filePath := data["source_path_full"].(string)
77+
jsonRaw := checkup.LoadRawJsonReport(filePath)
78+
79+
if !checkup.CheckUnmarshalResult(json.Unmarshal(jsonRaw, &report)) {
80+
return
81+
}
82+
83+
result, err := H002Process(report)
84+
85+
if err == nil {
86+
checkup.SaveReportResult(data, result)
87+
}
88+
}
Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,22 @@
1+
package h002
2+
3+
const MSG_UNUSED_INDEXES_FOUND_P2_CONCLUSION string = "[P2] %d unused index(es) have been found and their total size " +
4+
"exceeds %.2f%% of the database size."
5+
const MSG_UNUSED_INDEXES_FOUND_P3_CONCLUSION string = "[P3] %d unused index(es) have been found."
6+
7+
const MSG_UNUSED_INDEXES_FOUND_R1 string = "Use the database migration provided below to drop the unused indexes. " +
8+
"Keep in mind, that under load, it is recommended to use `DROP INDEX CONCURRENTLY` (and `CREATE INDEX CONCURRENTLY` " +
9+
"if reverting is needed) to avoid blocking issues."
10+
const MSG_UNUSED_INDEXES_FOUND_R2 string = "Be careful dropping the indexes. If you have multiple setups of your " +
11+
"software, the analysis of just a single setup might be not enough. Some indexes might be used (and therefore, needed) " +
12+
"only on a limited number of setups. Also, in some cases, developers prepare indexes for new features in advance – " +
13+
"in such cases, dropping those indexes is not a good idea."
14+
const MSG_UNUSED_INDEXES_FOUND_R3 string = "If there are some doubts, consider a more careful approach. Before actual" +
15+
"dropping, indexes disable listed in this report. For this, use queries like `UPDATE pg_index SET indisvalid = false " +
16+
"WHERE indexrelid::regclass = (select oid from pg_class where relname = 'u_users_email');. Indexes will " +
17+
"continue to get updates. In case of some performance degradations, re-enable the corresponding indexes, " +
18+
"setting indisvalid to true. If everything looks fine, after a significant period of observations, " +
19+
"proceed with DROP INDEX CONCURRENTLY."
20+
21+
const MSG_UNUSED_INDEXES_FOUND_DO string = "\"DO\" database migrations \n%s"
22+
const MSG_UNUSED_INDEXES_FOUND_UNDO string = "\"UNDO\" database migrations \n%s"
Lines changed: 72 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,72 @@
1+
package h002
2+
3+
import checkup ".."
4+
5+
type H002Index struct {
6+
Num int `json:"num"`
7+
Reason string `json:"reason"`
8+
IndexId string `json:"index_id"`
9+
SchemaName string `json:"schema_name"`
10+
TableName string `json:"table_name"`
11+
IndexName string `json:"index_name"`
12+
IdxScan int64 `json:"idx_scan"`
13+
AllScans int64 `json:"all_scans"`
14+
IndexScanPct float64 `json:"index_scan_pct"`
15+
Writes int64 `json:"writes"`
16+
ScansPerWrite float64 `json:"scans_per_write"`
17+
IndexSizeBytes int64 `json:"index_size_bytes"`
18+
TableSizeBytes int64 `json:"table_size_bytes"`
19+
Relpages int64 `json:"relpages"`
20+
IdxIsBtree bool `json:"idx_is_btree"`
21+
IndexDef string `json:"index_def"`
22+
FormatedIndexName string `json:"formated_index_name"`
23+
FormatedSchemaName string `json:"formated_schema_name"`
24+
FormatedTableName string `json:"formated_table_name"`
25+
FormatedRelationName string `json:"formated_relation_name"`
26+
Opclasses string `json:"opclasses"`
27+
SupportsFk bool `json:"supports_fk"`
28+
Grp int64 `json:"grp"`
29+
}
30+
31+
type H002Indexes map[string]H002Index
32+
33+
type H002IndexesTotal struct {
34+
IndexSizeBytesSum int64 `json:"index_size_bytes_sum"`
35+
TableSizeBytesSum int64 `json:"table_size_bytes_sum"`
36+
}
37+
38+
type DatabaseStat struct {
39+
StatsReset string `json:"stats_reset"`
40+
StatsAge string `json:"stats_age"`
41+
Days int64 `json:"days"`
42+
DatabaseSizeBytes int64 `json:"database_size_bytes"`
43+
}
44+
45+
type H002ReportHostResultData struct {
46+
NeverUsedIndexes H002Indexes `json:"never_used_indexes"`
47+
NeverUsedIndexesTotal H002IndexesTotal `json:"never_used_indexes_total"`
48+
RarelyUsedIsndexes H002Indexes `json:"rarely_used_indexes"`
49+
RarelyUsedIndexesTotal H002IndexesTotal `json:"rarely_used_indexes_total"`
50+
Do []string `json:"do"`
51+
UnDo []string `json:"undo"`
52+
DatabaseStat DatabaseStat `json:"database_stat"`
53+
MinIndexSizeBytes int64 `json:"min_index_size_bytes"`
54+
}
55+
56+
type H002ReportHostResult struct {
57+
Data H002ReportHostResultData `json:"data"`
58+
NodesJson checkup.ReportLastNodes `json:"nodes.json"`
59+
}
60+
61+
type H002ReportHostsResults map[string]H002ReportHostResult
62+
63+
type H002Report struct {
64+
Project string `json:"project"`
65+
Name string `json:"name"`
66+
CheckId string `json:"checkId"`
67+
Timestamptz string `json:"timestamptz"`
68+
Database string `json:"database"`
69+
Dependencies map[string]interface{} `json:"dependencies"`
70+
LastNodesJson checkup.ReportLastNodes `json:"last_nodes_json"`
71+
Results H002ReportHostsResults `json:"results"`
72+
}

pghrep/src/checkup/h004/h004.go

Lines changed: 88 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,88 @@
1+
package h004
2+
3+
import (
4+
"encoding/json"
5+
6+
checkup ".."
7+
)
8+
9+
const H004_REDUNDANT_INDEXES_FOUND_P2 string = "H004_REDUNDANT_INDEXES_FOUND_P2"
10+
const H004_REDUNDANT_INDEXES_FOUND_P3 string = "H004_REDUNDANT_INDEXES_FOUND_P3"
11+
const H004_REDUNDANT_INDEXES_FOUND string = "H004_REDUNDANT_INDEXES_FOUND"
12+
const H004_REDUNDANT_INDEXES_FOUND_DO string = "H004_REDUNDANT_INDEXES_FOUND_DO"
13+
const H004_REDUNDANT_INDEXES_FOUND_UNDO string = "H004_REDUNDANT_INDEXES_FOUND_UNDO"
14+
15+
const REDUNDANT_INDEXES_CRITICL_SIZE_PERCENT float64 = 5.0
16+
17+
func H004Process(report H004Report) (checkup.ReportResult, error) {
18+
var result checkup.ReportResult
19+
var masterHost = ""
20+
21+
for host, hostData := range report.LastNodesJson.Hosts {
22+
if hostData.Role == "master" {
23+
masterHost = host
24+
break
25+
}
26+
}
27+
28+
for host, hostData := range report.Results {
29+
if host != masterHost {
30+
continue
31+
}
32+
33+
if len(hostData.Data.RedundantIndexes) > 0 && len(hostData.Data.Do) > 0 &&
34+
len(hostData.Data.UnDo) > 0 {
35+
if (float64(hostData.Data.RedundantIndexesTotal.IndexSizeBytesSum) /
36+
float64(hostData.Data.DatabaseStat.DatabaseSizeBytes) * 100) > REDUNDANT_INDEXES_CRITICL_SIZE_PERCENT {
37+
result.P2 = true
38+
result.AppendConclusion(H004_REDUNDANT_INDEXES_FOUND_P2, MSG_REDUNDANT_INDEXES_FOUND_P2_CONCLUSION,
39+
len(hostData.Data.RedundantIndexes), REDUNDANT_INDEXES_CRITICL_SIZE_PERCENT)
40+
} else {
41+
result.P3 = true
42+
result.AppendConclusion(H004_REDUNDANT_INDEXES_FOUND_P3, MSG_REDUNDANT_INDEXES_FOUND_P3_CONCLUSION,
43+
len(hostData.Data.RedundantIndexes))
44+
}
45+
46+
var p = "[P3] "
47+
if result.P2 {
48+
p = "[P2] "
49+
}
50+
result.AppendRecommendation(H004_REDUNDANT_INDEXES_FOUND, p+MSG_REDUNDANT_INDEXES_FOUND_R1)
51+
result.AppendRecommendation(H004_REDUNDANT_INDEXES_FOUND, MSG_REDUNDANT_INDEXES_FOUND_R2)
52+
result.AppendRecommendation(H004_REDUNDANT_INDEXES_FOUND, MSG_REDUNDANT_INDEXES_FOUND_R3)
53+
54+
var doCode = "``` \n"
55+
for _, doIndex := range hostData.Data.Do {
56+
doCode = doCode + doIndex + " \n"
57+
}
58+
doCode = doCode + "```"
59+
result.AppendRecommendation(H004_REDUNDANT_INDEXES_FOUND_DO, MSG_REDUNDANT_INDEXES_FOUND_DO, doCode)
60+
61+
var undoCode = "``` \n"
62+
for _, undoIndex := range hostData.Data.UnDo {
63+
undoCode = undoCode + undoIndex + " \n"
64+
}
65+
undoCode = undoCode + "```"
66+
result.AppendRecommendation(H004_REDUNDANT_INDEXES_FOUND_UNDO, MSG_REDUNDANT_INDEXES_FOUND_UNDO, undoCode)
67+
}
68+
69+
}
70+
71+
return result, nil
72+
}
73+
74+
func H004PreprocessReportData(data map[string]interface{}) {
75+
var report H004Report
76+
filePath := data["source_path_full"].(string)
77+
jsonRaw := checkup.LoadRawJsonReport(filePath)
78+
79+
if !checkup.CheckUnmarshalResult(json.Unmarshal(jsonRaw, &report)) {
80+
return
81+
}
82+
83+
result, err := H004Process(report)
84+
85+
if err == nil {
86+
checkup.SaveReportResult(data, result)
87+
}
88+
}
Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,22 @@
1+
package h004
2+
3+
const MSG_REDUNDANT_INDEXES_FOUND_P2_CONCLUSION string = "[P2] %d redundant index(es) have been found and their total size " +
4+
"exceeds %.2f%% of the database size."
5+
const MSG_REDUNDANT_INDEXES_FOUND_P3_CONCLUSION string = "[P3] %d redundant index(es) have been found."
6+
7+
const MSG_REDUNDANT_INDEXES_FOUND_R1 string = "Use the database migration provided below to drop the redundant indexes. " +
8+
"Keep in mind, that under load, it is recommended to use `DROP INDEX CONCURRENTLY` (and `CREATE INDEX CONCURRENTLY` " +
9+
"if reverting is needed) to avoid blocking issues."
10+
const MSG_REDUNDANT_INDEXES_FOUND_R2 string = "Be careful dropping the indexes. If you have multiple setups of your " +
11+
"software, the analysis of just a single setup might be not enough. Some indexes might be used (and therefore, needed) " +
12+
"only on a limited number of setups. Also, in some cases, developers prepare indexes for new features in advance – " +
13+
"in such cases, dropping those indexes is not a good idea."
14+
const MSG_REDUNDANT_INDEXES_FOUND_R3 string = "If there are some doubts, consider a more careful approach. Before actual" +
15+
"dropping, indexes disable listed in this report. For this, use queries like `UPDATE pg_index SET indisvalid = false " +
16+
"WHERE indexrelid::regclass = (select oid from pg_class where relname = 'u_users_email');. Indexes will " +
17+
"continue to get updates. In case of some performance degradations, re-enable the corresponding indexes, " +
18+
"setting indisvalid to true. If everything looks fine, after a significant period of observations, " +
19+
"proceed with DROP INDEX CONCURRENTLY."
20+
21+
const MSG_REDUNDANT_INDEXES_FOUND_DO string = "\"DO\" database migrations \n%s"
22+
const MSG_REDUNDANT_INDEXES_FOUND_UNDO string = "\"UNDO\" database migrations \n%s"
Lines changed: 65 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,65 @@
1+
package h004
2+
3+
import checkup ".."
4+
5+
type H004Index struct {
6+
Num int `json:"num"`
7+
IndexId string `json:"index_id"`
8+
SchemaName string `json:"schema_name"`
9+
TableName string `json:"table_name"`
10+
TableSizeBytes int64 `json:"table_size_bytes"`
11+
IndexName string `json:"index_name"`
12+
AccessMethod string `json:"access_method"`
13+
Reason string `json:"reason"`
14+
MainIndexDef string `json:"main_index_def"`
15+
MainIndexSize string `json:"main_index_size"`
16+
IndexDef string `json:"index_def"`
17+
IndexSizeBytes int64 `json:"index_size_bytes"`
18+
IndexUsage int64 `json:"index_usage"`
19+
FormatedIndexName string `json:"formated_index_name"`
20+
FormatedSchemaName string `json:"formated_schema_name"`
21+
FormatedTableName string `json:"formated_table_name"`
22+
FormatedRelationName string `json:"formated_relation_name"`
23+
SupportsFk bool `json:"supports_fk"`
24+
}
25+
26+
type H004Indexes map[string]H004Index
27+
28+
type H004IndexesTotal struct {
29+
IndexSizeBytesSum int64 `json:"index_size_bytes_sum"`
30+
TableSizeBytesSum int64 `json:"table_size_bytes_sum"`
31+
}
32+
33+
type DatabaseStat struct {
34+
StatsReset string `json:"stats_reset"`
35+
StatsAge string `json:"stats_age"`
36+
Days int64 `json:"days"`
37+
DatabaseSizeBytes int64 `json:"database_size_bytes"`
38+
}
39+
40+
type H004ReportHostResultData struct {
41+
RedundantIndexes H004Indexes `json:"redundant_indexes"`
42+
RedundantIndexesTotal H004IndexesTotal `json:"redundant_indexes_total"`
43+
Do []string `json:"do"`
44+
UnDo []string `json:"undo"`
45+
DatabaseStat DatabaseStat `json:"database_stat"`
46+
MinIndexSizeBytes int64 `json:"min_index_size_bytes"`
47+
}
48+
49+
type H004ReportHostResult struct {
50+
Data H004ReportHostResultData `json:"data"`
51+
NodesJson checkup.ReportLastNodes `json:"nodes.json"`
52+
}
53+
54+
type H004ReportHostsResults map[string]H004ReportHostResult
55+
56+
type H004Report struct {
57+
Project string `json:"project"`
58+
Name string `json:"name"`
59+
CheckId string `json:"checkId"`
60+
Timestamptz string `json:"timestamptz"`
61+
Database string `json:"database"`
62+
Dependencies map[string]interface{} `json:"dependencies"`
63+
LastNodesJson checkup.ReportLastNodes `json:"last_nodes_json"`
64+
Results H004ReportHostsResults `json:"results"`
65+
}

pghrep/src/main.go

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,8 @@ import (
3434
"./checkup/g001"
3535
"./checkup/g002"
3636
"./checkup/h001"
37+
"./checkup/h002"
38+
"./checkup/h004"
3739
"./checkup/k000"
3840
"./checkup/l003"
3941

@@ -550,6 +552,10 @@ func preprocessReportData(checkId string, config cfg.Config,
550552
a008.A008PreprocessReportData(data)
551553
case "H001":
552554
h001.H001PreprocessReportData(data)
555+
case "H002":
556+
h002.H002PreprocessReportData(data)
557+
case "H004":
558+
h004.H004PreprocessReportData(data)
553559
case "F001":
554560
f001.F001PreprocessReportData(data)
555561
case "F002":

0 commit comments

Comments
 (0)