diff --git a/.examples/docker-compose-mysql-storage/config/config.yaml b/.examples/docker-compose-mysql-storage/config/config.yaml
new file mode 100644
index 000000000..a904a9b84
--- /dev/null
+++ b/.examples/docker-compose-mysql-storage/config/config.yaml
@@ -0,0 +1,42 @@
+storage:
+ type: mysql
+ path: "${MYSQL_USER}:${MYSQL_PASSWORD}@tcp(mysql:3306)/${MYSQL_DATABASE}?charset=utf8mb4&parseTime=True&loc=Local&multiStatements=true&interpolateParams=true"
+
+endpoints:
+ - name: back-end
+ group: core
+ url: "https://example.org/"
+ interval: 5m
+ conditions:
+ - "[STATUS] == 200"
+ - "[CERTIFICATE_EXPIRATION] > 48h"
+
+ - name: monitoring
+ group: internal
+ url: "https://example.org/"
+ interval: 5m
+ conditions:
+ - "[STATUS] == 200"
+
+ - name: nas
+ group: internal
+ url: "https://example.org/"
+ interval: 5m
+ conditions:
+ - "[STATUS] == 200"
+
+ - name: example-dns-query
+ url: "8.8.8.8" # Address of the DNS server to use
+ interval: 5m
+ dns:
+ query-name: "example.com"
+ query-type: "A"
+ conditions:
+ - "[BODY] == 93.184.215.14"
+ - "[DNS_RCODE] == NOERROR"
+
+ - name: icmp-ping
+ url: "icmp://example.org"
+ interval: 1m
+ conditions:
+ - "[CONNECTED] == true"
diff --git a/.examples/docker-compose-mysql-storage/docker-compose.yml b/.examples/docker-compose-mysql-storage/docker-compose.yml
new file mode 100644
index 000000000..fe736ff47
--- /dev/null
+++ b/.examples/docker-compose-mysql-storage/docker-compose.yml
@@ -0,0 +1,34 @@
+version: "3.9"
+services:
+ mysql:
+ image: mysql:lts
+ volumes:
+ - ./data/db:/var/lib/mysql
+ ports:
+ - "3306:3306"
+ environment:
+ - MYSQL_DATABASE=gatus
+ - MYSQL_USER=username
+ - MYSQL_PASSWORD=password
+ - MYSQL_ROOT_PASSWORD=root_password
+ networks:
+ - web
+
+ gatus:
+ image: twinproduction/gatus:latest
+ restart: always
+ ports:
+ - "8080:8080"
+ environment:
+ - MYSQL_USER=username
+ - MYSQL_PASSWORD=password
+ - MYSQL_DATABASE=gatus
+ volumes:
+ - ./config:/config
+ networks:
+ - web
+ depends_on:
+ - mysql
+
+networks:
+ web:
diff --git a/README.md b/README.md
index a809ae42c..91b8364eb 100644
--- a/README.md
+++ b/README.md
@@ -382,14 +382,14 @@ Here are some examples of conditions you can use:
### Storage
-| Parameter | Description | Default |
-|:------------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------|:-----------|
-| `storage` | Storage configuration | `{}` |
-| `storage.path` | Path to persist the data in. Only supported for types `sqlite` and `postgres`. | `""` |
-| `storage.type` | Type of storage. Valid types: `memory`, `sqlite`, `postgres`. | `"memory"` |
-| `storage.caching` | Whether to use write-through caching. Improves loading time for large dashboards.
Only supported if `storage.type` is `sqlite` or `postgres` | `false` |
-| `storage.maximum-number-of-results` | The maximum number of results that an endpoint can have | `100` |
-| `storage.maximum-number-of-events` | The maximum number of events that an endpoint can have | `50` |
+| Parameter | Description | Default |
+|:------------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------|:-----------|
+| `storage` | Storage configuration | `{}` |
+| `storage.path` | Path to persist the data in. Only supported for types `sqlite`, `postgres` and `mysql`. | `""` |
+| `storage.type` | Type of storage. Valid types: `memory`, `sqlite`, `postgres` and `mysql`. | `"memory"` |
+| `storage.caching` | Whether to use write-through caching. Improves loading time for large dashboards.
Only supported if `storage.type` is `sqlite` or `postgres` or `mysql` | `false` |
+| `storage.maximum-number-of-results` | The maximum number of results that an endpoint can have | `100` |
+| `storage.maximum-number-of-events` | The maximum number of events that an endpoint can have | `50` |
The results for each endpoint health check as well as the data for uptime and the past events must be persisted
so that they can be displayed on the dashboard. These parameters allow you to configure the storage in question.
@@ -419,6 +419,14 @@ storage:
```
See [examples/docker-compose-postgres-storage](.examples/docker-compose-postgres-storage) for an example.
+- If `storage.type` is `mysql`, `storage.path` must be the connection URL:
+```yaml
+storage:
+ type: mysql
+ path: "user:password@tcp(127.0.0.1:3306)/gatus?charset=utf8mb4&parseTime=True&loc=Local&multiStatements=true&interpolateParams=true"
+```
+See [examples/docker-compose-mysql-storage](.examples/docker-compose-mysql-storage) for an example.
+
### Client configuration
In order to support a wide range of environments, each monitored endpoint has a unique configuration for
diff --git a/go.mod b/go.mod
index a1731467d..1991be5a9 100644
--- a/go.mod
+++ b/go.mod
@@ -12,6 +12,7 @@ require (
github.com/TwiN/whois v1.1.10
github.com/aws/aws-sdk-go v1.55.6
github.com/coreos/go-oidc/v3 v3.14.1
+ github.com/go-sql-driver/mysql v1.9.2
github.com/gofiber/fiber/v2 v2.52.6
github.com/google/go-github/v48 v48.2.0
github.com/google/uuid v1.6.0
@@ -35,6 +36,7 @@ require (
cloud.google.com/go/auth v0.15.0 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
cloud.google.com/go/compute/metadata v0.6.0 // indirect
+ filippo.io/edwards25519 v1.1.0 // indirect
github.com/andybalholm/brotli v1.1.1 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
diff --git a/go.sum b/go.sum
index 27b0ef162..cd648a8fc 100644
--- a/go.sum
+++ b/go.sum
@@ -6,6 +6,8 @@ cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4
cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg=
code.gitea.io/sdk/gitea v0.19.0 h1:8I6s1s4RHgzxiPHhOQdgim1RWIRcr0LVMbHBjBFXq4Y=
code.gitea.io/sdk/gitea v0.19.0/go.mod h1:IG9xZJoltDNeDSW0qiF2Vqx5orMWa7OhVWrjvrd5NpI=
+filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
+filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
github.com/TwiN/deepmerge v0.2.2 h1:FUG9QMIYg/j2aQyPPhA3XTFJwXSNHI/swaR4Lbyxwg4=
github.com/TwiN/deepmerge v0.2.2/go.mod h1:4OHvjV3pPNJCJZBHswYAwk6rxiD8h8YZ+9cPo7nu4oI=
github.com/TwiN/g8/v2 v2.0.0 h1:+hwIbRLMhDd2iwHzkZUPp2FkX7yTx8ddYOnS91HkDqQ=
@@ -46,6 +48,8 @@ github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
+github.com/go-sql-driver/mysql v1.9.2 h1:4cNKDYQ1I84SXslGddlsrMhc8k4LeDVj6Ad6WRjiHuU=
+github.com/go-sql-driver/mysql v1.9.2/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU=
github.com/gofiber/fiber/v2 v2.52.6 h1:Rfp+ILPiYSvvVuIPvxrBns+HJp8qGLDnLJawAu27XVI=
github.com/gofiber/fiber/v2 v2.52.6/go.mod h1:YEcBbO/FB+5M1IZNBP9FO3J9281zgPAreiI1oqg8nDw=
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 h1:DACJavvAHhabrF08vX0COfcOBJRhZ8lUbR+ZWIs0Y5g=
diff --git a/storage/config.go b/storage/config.go
index a707d5ef4..1fae9378f 100644
--- a/storage/config.go
+++ b/storage/config.go
@@ -43,7 +43,7 @@ func (c *Config) ValidateAndSetDefaults() error {
if c.Type == "" {
c.Type = TypeMemory
}
- if (c.Type == TypePostgres || c.Type == TypeSQLite) && len(c.Path) == 0 {
+ if (c.Type == TypeMySQL || c.Type == TypePostgres || c.Type == TypeSQLite) && len(c.Path) == 0 {
return ErrSQLStorageRequiresPath
}
if c.Type == TypeMemory && len(c.Path) > 0 {
diff --git a/storage/store/sql/specific_mysql.go b/storage/store/sql/specific_mysql.go
new file mode 100644
index 000000000..3a0284dc0
--- /dev/null
+++ b/storage/store/sql/specific_mysql.go
@@ -0,0 +1,91 @@
+package sql
+
+func (s *Store) createMySQLSchema() error {
+ _, err := s.db.Exec(`
+ CREATE TABLE IF NOT EXISTS endpoints (
+ endpoint_id BIGINT AUTO_INCREMENT PRIMARY KEY,
+ endpoint_key VARCHAR(255) UNIQUE,
+ endpoint_name VARCHAR(255) NOT NULL,
+ endpoint_group VARCHAR(255) NOT NULL,
+ UNIQUE(endpoint_name, endpoint_group)
+ )
+ `)
+ if err != nil {
+ return err
+ }
+ _, err = s.db.Exec(`
+ CREATE TABLE IF NOT EXISTS endpoint_events (
+ endpoint_event_id BIGINT AUTO_INCREMENT PRIMARY KEY,
+ endpoint_id BIGINT NOT NULL,
+ event_type VARCHAR(255) NOT NULL,
+ event_timestamp DATETIME NOT NULL,
+ FOREIGN KEY (endpoint_id) REFERENCES endpoints(endpoint_id) ON DELETE CASCADE
+ )
+ `)
+ if err != nil {
+ return err
+ }
+ _, err = s.db.Exec(`
+ CREATE TABLE IF NOT EXISTS endpoint_results (
+ endpoint_result_id BIGINT AUTO_INCREMENT PRIMARY KEY,
+ endpoint_id BIGINT NOT NULL,
+ success BOOLEAN NOT NULL,
+ errors TEXT NOT NULL,
+ connected BOOLEAN NOT NULL,
+ status INT NOT NULL,
+ dns_rcode VARCHAR(255) NOT NULL,
+ certificate_expiration BIGINT NOT NULL,
+ domain_expiration BIGINT NOT NULL,
+ hostname VARCHAR(255) NOT NULL,
+ ip VARCHAR(255) NOT NULL,
+ duration BIGINT NOT NULL,
+ timestamp DATETIME NOT NULL,
+ FOREIGN KEY (endpoint_id) REFERENCES endpoints(endpoint_id) ON DELETE CASCADE
+ )
+ `)
+ if err != nil {
+ return err
+ }
+ _, err = s.db.Exec(`
+ CREATE TABLE IF NOT EXISTS endpoint_result_conditions (
+ endpoint_result_condition_id BIGINT AUTO_INCREMENT PRIMARY KEY,
+ endpoint_result_id BIGINT NOT NULL,
+ ` + "`condition`" + ` TEXT NOT NULL,
+ success BOOLEAN NOT NULL,
+ FOREIGN KEY (endpoint_result_id) REFERENCES endpoint_results(endpoint_result_id) ON DELETE CASCADE
+ )
+ `)
+ if err != nil {
+ return err
+ }
+ _, err = s.db.Exec(`
+ CREATE TABLE IF NOT EXISTS endpoint_uptimes (
+ endpoint_uptime_id BIGINT AUTO_INCREMENT PRIMARY KEY,
+ endpoint_id BIGINT NOT NULL,
+ hour_unix_timestamp BIGINT NOT NULL,
+ total_executions BIGINT NOT NULL,
+ successful_executions BIGINT NOT NULL,
+ total_response_time BIGINT NOT NULL,
+ UNIQUE(endpoint_id, hour_unix_timestamp),
+ FOREIGN KEY (endpoint_id) REFERENCES endpoints(endpoint_id) ON DELETE CASCADE
+ )
+ `)
+ if err != nil {
+ return err
+ }
+ _, err = s.db.Exec(`
+ CREATE TABLE IF NOT EXISTS endpoint_alerts_triggered (
+ endpoint_alert_trigger_id BIGINT AUTO_INCREMENT PRIMARY KEY,
+ endpoint_id BIGINT NOT NULL,
+ configuration_checksum VARCHAR(255) NOT NULL,
+ resolve_key VARCHAR(255) NOT NULL,
+ number_of_successes_in_a_row INT NOT NULL,
+ UNIQUE(endpoint_id, configuration_checksum),
+ FOREIGN KEY (endpoint_id) REFERENCES endpoints(endpoint_id) ON DELETE CASCADE
+ )
+ `)
+ if err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/storage/store/sql/sql.go b/storage/store/sql/sql.go
index cb0dd058b..ad1da4ad6 100644
--- a/storage/store/sql/sql.go
+++ b/storage/store/sql/sql.go
@@ -14,6 +14,7 @@ import (
"github.com/TwiN/gatus/v5/storage/store/common/paging"
"github.com/TwiN/gocache/v2"
"github.com/TwiN/logr"
+ _ "github.com/go-sql-driver/mysql"
_ "github.com/lib/pq"
_ "modernc.org/sqlite"
)
@@ -104,10 +105,16 @@ func NewStore(driver, path string, caching bool, maximumNumberOfResults, maximum
// createSchema creates the schema required to perform all database operations.
func (s *Store) createSchema() error {
- if s.driver == "sqlite" {
+ switch s.driver {
+ case "sqlite":
return s.createSQLiteSchema()
+ case "postgres":
+ return s.createPostgresSchema()
+ case "mysql":
+ return s.createMySQLSchema()
+ default:
+ return fmt.Errorf("unsupported database driver: %s", s.driver)
}
- return s.createPostgresSchema()
}
// GetAllEndpointStatuses returns all monitored endpoint.Status
@@ -385,10 +392,15 @@ func (s *Store) DeleteAllEndpointStatusesNotInKeys(keys []string) int {
// Delete everything
result, err = s.db.Exec("DELETE FROM endpoints")
} else {
+ var query string
args := make([]interface{}, 0, len(keys))
- query := "DELETE FROM endpoints WHERE endpoint_key NOT IN ("
+ query = "DELETE FROM endpoints WHERE endpoint_key NOT IN ("
for i := range keys {
- query += fmt.Sprintf("$%d,", i+1)
+ if s.driver == "mysql" {
+ query += "?,"
+ } else {
+ query += fmt.Sprintf("$%d,", i+1)
+ }
args = append(args, keys[i])
}
query = query[:len(query)-1] + ")" // Remove the last comma and add the closing parenthesis
@@ -412,8 +424,14 @@ func (s *Store) DeleteAllEndpointStatusesNotInKeys(keys []string) int {
// GetTriggeredEndpointAlert returns whether the triggered alert for the specified endpoint as well as the necessary information to resolve it
func (s *Store) GetTriggeredEndpointAlert(ep *endpoint.Endpoint, alert *alert.Alert) (exists bool, resolveKey string, numberOfSuccessesInARow int, err error) {
//logr.Debugf("[sql.GetTriggeredEndpointAlert] Getting triggered alert with checksum=%s for endpoint with key=%s", alert.Checksum(), ep.Key())
+ var query string
+ if s.driver == "mysql" {
+ query = `SELECT resolve_key, number_of_successes_in_a_row FROM endpoint_alerts_triggered WHERE endpoint_id = (SELECT endpoint_id FROM endpoints WHERE endpoint_key = ? LIMIT 1) AND configuration_checksum = ?`
+ } else {
+ query = `SELECT resolve_key, number_of_successes_in_a_row FROM endpoint_alerts_triggered WHERE endpoint_id = (SELECT endpoint_id FROM endpoints WHERE endpoint_key = $1 LIMIT 1) AND configuration_checksum = $2`
+ }
err = s.db.QueryRow(
- "SELECT resolve_key, number_of_successes_in_a_row FROM endpoint_alerts_triggered WHERE endpoint_id = (SELECT endpoint_id FROM endpoints WHERE endpoint_key = $1 LIMIT 1) AND configuration_checksum = $2",
+ query,
ep.Key(),
alert.Checksum(),
).Scan(&resolveKey, &numberOfSuccessesInARow)
@@ -450,14 +468,25 @@ func (s *Store) UpsertTriggeredEndpointAlert(ep *endpoint.Endpoint, triggeredAle
return err
}
}
- _, err = tx.Exec(
- `
+ var query string
+ if s.driver == "mysql" {
+ query = `INSERT INTO endpoint_alerts_triggered
+ (endpoint_id, configuration_checksum, resolve_key, number_of_successes_in_a_row)
+ VALUES (?, ?, ?, ?)
+ ON DUPLICATE KEY UPDATE
+ resolve_key = VALUES(resolve_key),
+ number_of_successes_in_a_row = VALUES(number_of_successes_in_a_row)`
+ } else {
+ query = `
INSERT INTO endpoint_alerts_triggered (endpoint_id, configuration_checksum, resolve_key, number_of_successes_in_a_row)
VALUES ($1, $2, $3, $4)
ON CONFLICT(endpoint_id, configuration_checksum) DO UPDATE SET
resolve_key = $3,
number_of_successes_in_a_row = $4
- `,
+ `
+ }
+ _, err = tx.Exec(
+ query,
endpointID,
triggeredAlert.Checksum(),
triggeredAlert.ResolveKey,
@@ -477,7 +506,13 @@ func (s *Store) UpsertTriggeredEndpointAlert(ep *endpoint.Endpoint, triggeredAle
// DeleteTriggeredEndpointAlert deletes a triggered alert for an endpoint
func (s *Store) DeleteTriggeredEndpointAlert(ep *endpoint.Endpoint, triggeredAlert *alert.Alert) error {
//logr.Debugf("[sql.DeleteTriggeredEndpointAlert] Deleting triggered alert with checksum=%s for endpoint with key=%s", triggeredAlert.Checksum(), ep.Key())
- _, err := s.db.Exec("DELETE FROM endpoint_alerts_triggered WHERE configuration_checksum = $1 AND endpoint_id = (SELECT endpoint_id FROM endpoints WHERE endpoint_key = $2 LIMIT 1)", triggeredAlert.Checksum(), ep.Key())
+ var query string
+ if s.driver == "mysql" {
+ query = `DELETE FROM endpoint_alerts_triggered WHERE configuration_checksum = ? AND endpoint_id = (SELECT endpoint_id FROM endpoints WHERE endpoint_key = ? LIMIT 1)`
+ } else {
+ query = `DELETE FROM endpoint_alerts_triggered WHERE configuration_checksum = $1 AND endpoint_id = (SELECT endpoint_id FROM endpoints WHERE endpoint_key = $2 LIMIT 1)`
+ }
+ _, err := s.db.Exec(query, triggeredAlert.Checksum(), ep.Key())
return err
}
@@ -491,15 +526,32 @@ func (s *Store) DeleteAllTriggeredAlertsNotInChecksumsByEndpoint(ep *endpoint.En
if len(checksums) == 0 {
// No checksums? Then it means there are no (enabled) alerts configured for that endpoint, so we can get rid of all
// persisted triggered alerts for that endpoint
- result, err = s.db.Exec("DELETE FROM endpoint_alerts_triggered WHERE endpoint_id = (SELECT endpoint_id FROM endpoints WHERE endpoint_key = $1 LIMIT 1)", ep.Key())
+ var query string
+ if s.driver == "mysql" {
+ query = `DELETE FROM endpoint_alerts_triggered WHERE endpoint_id = (SELECT endpoint_id FROM endpoints WHERE endpoint_key = ? LIMIT 1)`
+ } else {
+ query = `DELETE FROM endpoint_alerts_triggered WHERE endpoint_id = (SELECT endpoint_id FROM endpoints WHERE endpoint_key = $1 LIMIT 1)`
+ }
+ result, err = s.db.Exec(query, ep.Key())
} else {
args := make([]interface{}, 0, len(checksums)+1)
args = append(args, ep.Key())
- query := `DELETE FROM endpoint_alerts_triggered
+ var query string
+ if s.driver == "mysql" {
+ query = `DELETE FROM endpoint_alerts_triggered
+ WHERE endpoint_id = (SELECT endpoint_id FROM endpoints WHERE endpoint_key = ? LIMIT 1)
+ AND configuration_checksum NOT IN (`
+ } else {
+ query = `DELETE FROM endpoint_alerts_triggered
WHERE endpoint_id = (SELECT endpoint_id FROM endpoints WHERE endpoint_key = $1 LIMIT 1)
AND configuration_checksum NOT IN (`
+ }
for i := range checksums {
- query += fmt.Sprintf("$%d,", i+2)
+ if s.driver == "mysql" {
+ query += "?,"
+ } else {
+ query += fmt.Sprintf("$%d,", i+2)
+ }
args = append(args, checksums[i])
}
query = query[:len(query)-1] + ")" // Remove the last comma and add the closing parenthesis
@@ -540,41 +592,57 @@ func (s *Store) Close() {
func (s *Store) insertEndpoint(tx *sql.Tx, ep *endpoint.Endpoint) (int64, error) {
//logr.Debugf("[sql.insertEndpoint] Inserting endpoint with group=%s and name=%s", ep.Group, ep.Name)
var id int64
- err := tx.QueryRow(
- "INSERT INTO endpoints (endpoint_key, endpoint_name, endpoint_group) VALUES ($1, $2, $3) RETURNING endpoint_id",
- ep.Key(),
- ep.Name,
- ep.Group,
- ).Scan(&id)
- if err != nil {
- return 0, err
+ if s.driver == "mysql" {
+ result, err := tx.Exec(
+ "INSERT INTO endpoints (endpoint_key, endpoint_name, endpoint_group) VALUES (?, ?, ?)",
+ ep.Key(), ep.Name, ep.Group,
+ )
+ if err != nil {
+ return 0, err
+ }
+ id, err = result.LastInsertId()
+ if err != nil {
+ return 0, err
+ }
+ } else {
+ err := tx.QueryRow(
+ "INSERT INTO endpoints (endpoint_key, endpoint_name, endpoint_group) VALUES ($1, $2, $3) RETURNING endpoint_id",
+ ep.Key(), ep.Name, ep.Group,
+ ).Scan(&id)
+ if err != nil {
+ return 0, err
+ }
}
return id, nil
}
// insertEndpointEvent inserts en event in the store
func (s *Store) insertEndpointEvent(tx *sql.Tx, endpointID int64, event *endpoint.Event) error {
- _, err := tx.Exec(
- "INSERT INTO endpoint_events (endpoint_id, event_type, event_timestamp) VALUES ($1, $2, $3)",
- endpointID,
- event.Type,
- event.Timestamp.UTC(),
- )
- if err != nil {
- return err
+ var err error
+ if s.driver == "mysql" {
+ _, err = tx.Exec(
+ "INSERT INTO endpoint_events (endpoint_id, event_type, event_timestamp) VALUES (?, ?, ?)",
+ endpointID,
+ event.Type,
+ event.Timestamp.UTC(),
+ )
+ } else {
+ _, err = tx.Exec(
+ "INSERT INTO endpoint_events (endpoint_id, event_type, event_timestamp) VALUES ($1, $2, $3)",
+ endpointID,
+ event.Type,
+ event.Timestamp.UTC(),
+ )
}
- return nil
+ return err
}
// insertEndpointResult inserts a result in the store
func (s *Store) insertEndpointResult(tx *sql.Tx, endpointID int64, result *endpoint.Result) error {
var endpointResultID int64
- err := tx.QueryRow(
- `
- INSERT INTO endpoint_results (endpoint_id, success, errors, connected, status, dns_rcode, certificate_expiration, domain_expiration, hostname, ip, duration, timestamp)
- VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12)
- RETURNING endpoint_result_id
- `,
+ var err error
+
+ queryParams := []any{
endpointID,
result.Success,
strings.Join(result.Errors, arraySeparator),
@@ -587,21 +655,49 @@ func (s *Store) insertEndpointResult(tx *sql.Tx, endpointID int64, result *endpo
result.IP,
result.Duration,
result.Timestamp.UTC(),
- ).Scan(&endpointResultID)
+ }
+
+ var query string
+ if s.driver == "mysql" {
+ query = `
+ INSERT INTO endpoint_results (endpoint_id, success, errors, connected, status, dns_rcode, certificate_expiration, domain_expiration, hostname, ip, duration, timestamp)
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`
+ res, err := tx.Exec(query, queryParams...)
+ if err != nil {
+ return err
+ }
+ endpointResultID, err = res.LastInsertId()
+ } else {
+ query = `
+ INSERT INTO endpoint_results (endpoint_id, success, errors, connected, status, dns_rcode, certificate_expiration, domain_expiration, hostname, ip, duration, timestamp)
+ VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12)
+ RETURNING endpoint_result_id`
+ err = tx.QueryRow(query, queryParams...).Scan(&endpointResultID)
+ }
+
if err != nil {
return err
}
+
return s.insertConditionResults(tx, endpointResultID, result.ConditionResults)
}
func (s *Store) insertConditionResults(tx *sql.Tx, endpointResultID int64, conditionResults []*endpoint.ConditionResult) error {
var err error
for _, cr := range conditionResults {
- _, err = tx.Exec("INSERT INTO endpoint_result_conditions (endpoint_result_id, condition, success) VALUES ($1, $2, $3)",
- endpointResultID,
- cr.Condition,
- cr.Success,
- )
+ if s.driver == "mysql" {
+ _, err = tx.Exec("INSERT INTO endpoint_result_conditions (endpoint_result_id, `condition`, success) VALUES (?, ?, ?)",
+ endpointResultID,
+ cr.Condition,
+ cr.Success,
+ )
+ } else {
+ _, err = tx.Exec("INSERT INTO endpoint_result_conditions (endpoint_result_id, condition, success) VALUES ($1, $2, $3)",
+ endpointResultID,
+ cr.Condition,
+ cr.Success,
+ )
+ }
if err != nil {
return err
}
@@ -615,15 +711,28 @@ func (s *Store) updateEndpointUptime(tx *sql.Tx, endpointID int64, result *endpo
if result.Success {
successfulExecutions = 1
}
- _, err := tx.Exec(
- `
+
+ var query string
+ if s.driver == "mysql" {
+ query = `INSERT INTO endpoint_uptimes
+ (endpoint_id, hour_unix_timestamp, total_executions, successful_executions, total_response_time)
+ VALUES (?, ?, ?, ?, ?)
+ ON DUPLICATE KEY UPDATE
+ total_executions = total_executions + VALUES(total_executions),
+ successful_executions = successful_executions + VALUES(successful_executions),
+ total_response_time = total_response_time + VALUES(total_response_time)`
+ } else {
+ query = `
INSERT INTO endpoint_uptimes (endpoint_id, hour_unix_timestamp, total_executions, successful_executions, total_response_time)
VALUES ($1, $2, $3, $4, $5)
ON CONFLICT(endpoint_id, hour_unix_timestamp) DO UPDATE SET
total_executions = excluded.total_executions + endpoint_uptimes.total_executions,
successful_executions = excluded.successful_executions + endpoint_uptimes.successful_executions,
total_response_time = excluded.total_response_time + endpoint_uptimes.total_response_time
- `,
+ `
+ }
+ _, err := tx.Exec(
+ query,
endpointID,
unixTimestampFlooredAtHour,
1,
@@ -678,15 +787,23 @@ func (s *Store) getEndpointStatusByKey(tx *sql.Tx, key string, parameters *pagin
}
func (s *Store) getEndpointIDGroupAndNameByKey(tx *sql.Tx, key string) (id int64, group, name string, err error) {
- err = tx.QueryRow(
- `
- SELECT endpoint_id, endpoint_group, endpoint_name
- FROM endpoints
- WHERE endpoint_key = $1
- LIMIT 1
- `,
- key,
- ).Scan(&id, &group, &name)
+ var query string
+ if s.driver == "mysql" {
+ query = `
+ SELECT endpoint_id, endpoint_group, endpoint_name
+ FROM endpoints
+ WHERE endpoint_key = ?
+ LIMIT 1
+ `
+ } else {
+ query = `
+ SELECT endpoint_id, endpoint_group, endpoint_name
+ FROM endpoints
+ WHERE endpoint_key = $1
+ LIMIT 1
+ `
+ }
+ err = tx.QueryRow(query, key).Scan(&id, &group, &name)
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
return 0, "", "", common.ErrEndpointNotFound
@@ -697,14 +814,26 @@ func (s *Store) getEndpointIDGroupAndNameByKey(tx *sql.Tx, key string) (id int64
}
func (s *Store) getEndpointEventsByEndpointID(tx *sql.Tx, endpointID int64, page, pageSize int) (events []*endpoint.Event, err error) {
- rows, err := tx.Query(
+ var query string
+ if s.driver == "mysql" {
+ query = `
+ SELECT event_type, event_timestamp
+ FROM endpoint_events
+ WHERE endpoint_id = ?
+ ORDER BY endpoint_event_id ASC
+ LIMIT ? OFFSET ?
`
+ } else {
+ query = `
SELECT event_type, event_timestamp
FROM endpoint_events
WHERE endpoint_id = $1
ORDER BY endpoint_event_id ASC
LIMIT $2 OFFSET $3
- `,
+ `
+ }
+ rows, err := tx.Query(
+ query,
endpointID,
pageSize,
(page-1)*pageSize,
@@ -721,14 +850,26 @@ func (s *Store) getEndpointEventsByEndpointID(tx *sql.Tx, endpointID int64, page
}
func (s *Store) getEndpointResultsByEndpointID(tx *sql.Tx, endpointID int64, page, pageSize int) (results []*endpoint.Result, err error) {
- rows, err := tx.Query(
+ var query string
+ if s.driver == "mysql" {
+ query = `
+ SELECT endpoint_result_id, success, errors, connected, status, dns_rcode, certificate_expiration, domain_expiration, hostname, ip, duration, timestamp
+ FROM endpoint_results
+ WHERE endpoint_id = ?
+ ORDER BY endpoint_result_id DESC -- Normally, we'd sort by timestamp, but sorting by endpoint_result_id is faster
+ LIMIT ? OFFSET ?
`
+ } else {
+ query = `
SELECT endpoint_result_id, success, errors, connected, status, dns_rcode, certificate_expiration, domain_expiration, hostname, ip, duration, timestamp
FROM endpoint_results
WHERE endpoint_id = $1
ORDER BY endpoint_result_id DESC -- Normally, we'd sort by timestamp, but sorting by endpoint_result_id is faster
LIMIT $2 OFFSET $3
- `,
+ `
+ }
+ rows, err := tx.Query(
+ query,
endpointID,
pageSize,
(page-1)*pageSize,
@@ -759,12 +900,22 @@ func (s *Store) getEndpointResultsByEndpointID(tx *sql.Tx, endpointID int64, pag
}
// Get condition results
args := make([]interface{}, 0, len(idResultMap))
- query := `SELECT endpoint_result_id, condition, success
+ if s.driver == "mysql" {
+ query = `SELECT endpoint_result_id, ` + "`condition`" + `, success
FROM endpoint_result_conditions
WHERE endpoint_result_id IN (`
+ } else {
+ query = `SELECT endpoint_result_id, condition, success
+ FROM endpoint_result_conditions
+ WHERE endpoint_result_id IN (`
+ }
index := 1
for endpointResultID := range idResultMap {
- query += "$" + strconv.Itoa(index) + ","
+ if s.driver == "mysql" {
+ query += "?,"
+ } else {
+ query += "$" + strconv.Itoa(index) + ","
+ }
args = append(args, endpointResultID)
index++
}
@@ -786,14 +937,26 @@ func (s *Store) getEndpointResultsByEndpointID(tx *sql.Tx, endpointID int64, pag
}
func (s *Store) getEndpointUptime(tx *sql.Tx, endpointID int64, from, to time.Time) (uptime float64, avgResponseTime time.Duration, err error) {
- rows, err := tx.Query(
+ var query string
+ if s.driver == "mysql" {
+ query = `
+ SELECT SUM(total_executions), SUM(successful_executions), SUM(total_response_time)
+ FROM endpoint_uptimes
+ WHERE endpoint_id = ?
+ AND hour_unix_timestamp >= ?
+ AND hour_unix_timestamp <= ?
`
+ } else {
+ query = `
SELECT SUM(total_executions), SUM(successful_executions), SUM(total_response_time)
FROM endpoint_uptimes
WHERE endpoint_id = $1
AND hour_unix_timestamp >= $2
AND hour_unix_timestamp <= $3
- `,
+ `
+ }
+ rows, err := tx.Query(
+ query,
endpointID,
from.Unix(),
to.Unix(),
@@ -813,15 +976,28 @@ func (s *Store) getEndpointUptime(tx *sql.Tx, endpointID int64, from, to time.Ti
}
func (s *Store) getEndpointAverageResponseTime(tx *sql.Tx, endpointID int64, from, to time.Time) (int, error) {
- rows, err := tx.Query(
+ var query string
+ if s.driver == "mysql" {
+ query = `
+ SELECT SUM(total_executions), SUM(total_response_time)
+ FROM endpoint_uptimes
+ WHERE endpoint_id = ?
+ AND total_executions > 0
+ AND hour_unix_timestamp >= ?
+ AND hour_unix_timestamp <= ?
`
+ } else {
+ query = `
SELECT SUM(total_executions), SUM(total_response_time)
FROM endpoint_uptimes
WHERE endpoint_id = $1
AND total_executions > 0
AND hour_unix_timestamp >= $2
AND hour_unix_timestamp <= $3
- `,
+ `
+ }
+ rows, err := tx.Query(
+ query,
endpointID,
from.Unix(),
to.Unix(),
@@ -840,15 +1016,28 @@ func (s *Store) getEndpointAverageResponseTime(tx *sql.Tx, endpointID int64, fro
}
func (s *Store) getEndpointHourlyAverageResponseTimes(tx *sql.Tx, endpointID int64, from, to time.Time) (map[int64]int, error) {
- rows, err := tx.Query(
+ var query string
+ if s.driver == "mysql" {
+ query = `
+ SELECT hour_unix_timestamp, total_executions, total_response_time
+ FROM endpoint_uptimes
+ WHERE endpoint_id = ?
+ AND total_executions > 0
+ AND hour_unix_timestamp >= ?
+ AND hour_unix_timestamp <= ?
`
+ } else {
+ query = `
SELECT hour_unix_timestamp, total_executions, total_response_time
FROM endpoint_uptimes
WHERE endpoint_id = $1
AND total_executions > 0
AND hour_unix_timestamp >= $2
AND hour_unix_timestamp <= $3
- `,
+ `
+ }
+ rows, err := tx.Query(
+ query,
endpointID,
from.Unix(),
to.Unix(),
@@ -868,7 +1057,13 @@ func (s *Store) getEndpointHourlyAverageResponseTimes(tx *sql.Tx, endpointID int
func (s *Store) getEndpointID(tx *sql.Tx, ep *endpoint.Endpoint) (int64, error) {
var id int64
- err := tx.QueryRow("SELECT endpoint_id FROM endpoints WHERE endpoint_key = $1", ep.Key()).Scan(&id)
+ var query string
+ if s.driver == "mysql" {
+ query = "SELECT endpoint_id FROM endpoints WHERE endpoint_key = ?"
+ } else {
+ query = "SELECT endpoint_id FROM endpoints WHERE endpoint_key = $1"
+ }
+ err := tx.QueryRow(query, ep.Key()).Scan(&id)
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
return 0, common.ErrEndpointNotFound
@@ -880,33 +1075,48 @@ func (s *Store) getEndpointID(tx *sql.Tx, ep *endpoint.Endpoint) (int64, error)
func (s *Store) getNumberOfEventsByEndpointID(tx *sql.Tx, endpointID int64) (int64, error) {
var numberOfEvents int64
- err := tx.QueryRow("SELECT COUNT(1) FROM endpoint_events WHERE endpoint_id = $1", endpointID).Scan(&numberOfEvents)
+ var query string
+ if s.driver == "mysql" {
+ query = "SELECT COUNT(1) FROM endpoint_events WHERE endpoint_id = ?"
+ } else {
+ query = "SELECT COUNT(1) FROM endpoint_events WHERE endpoint_id = $1"
+ }
+ err := tx.QueryRow(query, endpointID).Scan(&numberOfEvents)
return numberOfEvents, err
}
func (s *Store) getNumberOfResultsByEndpointID(tx *sql.Tx, endpointID int64) (int64, error) {
var numberOfResults int64
- err := tx.QueryRow("SELECT COUNT(1) FROM endpoint_results WHERE endpoint_id = $1", endpointID).Scan(&numberOfResults)
+ var query string
+ if s.driver == "mysql" {
+ query = "SELECT COUNT(1) FROM endpoint_results WHERE endpoint_id = ?"
+ } else {
+ query = "SELECT COUNT(1) FROM endpoint_results WHERE endpoint_id = $1"
+ }
+ err := tx.QueryRow(query, endpointID).Scan(&numberOfResults)
return numberOfResults, err
}
func (s *Store) getNumberOfUptimeEntriesByEndpointID(tx *sql.Tx, endpointID int64) (int64, error) {
var numberOfUptimeEntries int64
- err := tx.QueryRow("SELECT COUNT(1) FROM endpoint_uptimes WHERE endpoint_id = $1", endpointID).Scan(&numberOfUptimeEntries)
+ var query string
+ if s.driver == "mysql" {
+ query = "SELECT COUNT(1) FROM endpoint_uptimes WHERE endpoint_id = ?"
+ } else {
+ query = "SELECT COUNT(1) FROM endpoint_uptimes WHERE endpoint_id = $1"
+ }
+ err := tx.QueryRow(query, endpointID).Scan(&numberOfUptimeEntries)
return numberOfUptimeEntries, err
}
func (s *Store) getAgeOfOldestEndpointUptimeEntry(tx *sql.Tx, endpointID int64) (time.Duration, error) {
- rows, err := tx.Query(
- `
- SELECT hour_unix_timestamp
- FROM endpoint_uptimes
- WHERE endpoint_id = $1
- ORDER BY hour_unix_timestamp
- LIMIT 1
- `,
- endpointID,
- )
+ var query string
+ if s.driver == "mysql" {
+ query = "SELECT hour_unix_timestamp FROM endpoint_uptimes WHERE endpoint_id = ? ORDER BY hour_unix_timestamp LIMIT 1"
+ } else {
+ query = "SELECT hour_unix_timestamp FROM endpoint_uptimes WHERE endpoint_id = $1 ORDER BY hour_unix_timestamp LIMIT 1"
+ }
+ rows, err := tx.Query(query, endpointID)
if err != nil {
return 0, err
}
@@ -924,7 +1134,13 @@ func (s *Store) getAgeOfOldestEndpointUptimeEntry(tx *sql.Tx, endpointID int64)
func (s *Store) getLastEndpointResultSuccessValue(tx *sql.Tx, endpointID int64) (bool, error) {
var success bool
- err := tx.QueryRow("SELECT success FROM endpoint_results WHERE endpoint_id = $1 ORDER BY endpoint_result_id DESC LIMIT 1", endpointID).Scan(&success)
+ var query string
+ if s.driver == "mysql" {
+ query = "SELECT success FROM endpoint_results WHERE endpoint_id = ? ORDER BY endpoint_result_id DESC LIMIT 1"
+ } else {
+ query = "SELECT success FROM endpoint_results WHERE endpoint_id = $1 ORDER BY endpoint_result_id DESC LIMIT 1"
+ }
+ err := tx.QueryRow(query, endpointID).Scan(&success)
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
return false, errNoRowsReturned
@@ -936,8 +1152,28 @@ func (s *Store) getLastEndpointResultSuccessValue(tx *sql.Tx, endpointID int64)
// deleteOldEndpointEvents deletes endpoint events that are no longer needed
func (s *Store) deleteOldEndpointEvents(tx *sql.Tx, endpointID int64) error {
- _, err := tx.Exec(
- `
+ var err error
+ if s.driver == "mysql" {
+ _, err = tx.Exec(
+ `
+ DELETE e1 FROM endpoint_events e1
+ LEFT JOIN (
+ SELECT endpoint_event_id
+ FROM endpoint_events
+ WHERE endpoint_id = ?
+ ORDER BY endpoint_event_id DESC
+ LIMIT ?
+ ) e2 ON e1.endpoint_event_id = e2.endpoint_event_id
+ WHERE e1.endpoint_id = ?
+ AND e2.endpoint_event_id IS NULL
+ `,
+ endpointID,
+ s.maximumNumberOfEvents,
+ endpointID,
+ )
+ } else {
+ _, err = tx.Exec(
+ `
DELETE FROM endpoint_events
WHERE endpoint_id = $1
AND endpoint_event_id NOT IN (
@@ -948,16 +1184,37 @@ func (s *Store) deleteOldEndpointEvents(tx *sql.Tx, endpointID int64) error {
LIMIT $2
)
`,
- endpointID,
- s.maximumNumberOfEvents,
- )
+ endpointID,
+ s.maximumNumberOfEvents,
+ )
+ }
return err
}
// deleteOldEndpointResults deletes endpoint results that are no longer needed
func (s *Store) deleteOldEndpointResults(tx *sql.Tx, endpointID int64) error {
- _, err := tx.Exec(
- `
+ var err error
+ if s.driver == "mysql" {
+ _, err = tx.Exec(
+ `
+ DELETE e1 FROM endpoint_results e1
+ LEFT JOIN (
+ SELECT endpoint_result_id
+ FROM endpoint_results
+ WHERE endpoint_id = ?
+ ORDER BY endpoint_result_id DESC
+ LIMIT ?
+ ) e2 ON e1.endpoint_result_id = e2.endpoint_result_id
+ WHERE e1.endpoint_id = ?
+ AND e2.endpoint_result_id IS NULL;
+ `,
+ endpointID,
+ s.maximumNumberOfResults,
+ endpointID,
+ )
+ } else {
+ _, err = tx.Exec(
+ `
DELETE FROM endpoint_results
WHERE endpoint_id = $1
AND endpoint_result_id NOT IN (
@@ -968,14 +1225,21 @@ func (s *Store) deleteOldEndpointResults(tx *sql.Tx, endpointID int64) error {
LIMIT $2
)
`,
- endpointID,
- s.maximumNumberOfResults,
- )
+ endpointID,
+ s.maximumNumberOfResults,
+ )
+ }
return err
}
func (s *Store) deleteOldUptimeEntries(tx *sql.Tx, endpointID int64, maxAge time.Time) error {
- _, err := tx.Exec("DELETE FROM endpoint_uptimes WHERE endpoint_id = $1 AND hour_unix_timestamp < $2", endpointID, maxAge.Unix())
+ var query string
+ if s.driver == "mysql" {
+ query = "DELETE FROM endpoint_uptimes WHERE endpoint_id = ? AND hour_unix_timestamp < ?"
+ } else {
+ query = "DELETE FROM endpoint_uptimes WHERE endpoint_id = $1 AND hour_unix_timestamp < $2"
+ }
+ _, err := tx.Exec(query, endpointID, maxAge.Unix())
return err
}
@@ -998,14 +1262,26 @@ func (s *Store) mergeHourlyUptimeEntriesOlderThanMergeThresholdIntoDailyUptimeEn
minThreshold = time.Date(minThreshold.Year(), minThreshold.Month(), minThreshold.Day(), 0, 0, 0, 0, minThreshold.Location())
maxThreshold := now.Add(-uptimeRetention)
// Get all uptime entries older than uptimeHourlyMergeThreshold
- rows, err := tx.Query(
+ var query string
+ if s.driver == "mysql" {
+ query = `
+ SELECT hour_unix_timestamp, total_executions, successful_executions, total_response_time
+ FROM endpoint_uptimes
+ WHERE endpoint_id = ?
+ AND hour_unix_timestamp < ?
+ AND hour_unix_timestamp >= ?
`
+ } else {
+ query = `
SELECT hour_unix_timestamp, total_executions, successful_executions, total_response_time
FROM endpoint_uptimes
WHERE endpoint_id = $1
AND hour_unix_timestamp < $2
AND hour_unix_timestamp >= $3
- `,
+ `
+ }
+ rows, err := tx.Query(
+ query,
endpointID,
minThreshold.Unix(),
maxThreshold.Unix(),
@@ -1036,21 +1312,38 @@ func (s *Store) mergeHourlyUptimeEntriesOlderThanMergeThresholdIntoDailyUptimeEn
}
}
// Delete older hourly uptime entries
- _, err = tx.Exec("DELETE FROM endpoint_uptimes WHERE endpoint_id = $1 AND hour_unix_timestamp < $2", endpointID, minThreshold.Unix())
+ if s.driver == "mysql" {
+ query = "DELETE FROM endpoint_uptimes WHERE endpoint_id = ? AND hour_unix_timestamp < ?"
+ } else {
+ query = "DELETE FROM endpoint_uptimes WHERE endpoint_id = $1 AND hour_unix_timestamp < $2"
+ }
+ _, err = tx.Exec(query, endpointID, minThreshold.Unix())
if err != nil {
return err
}
// Insert new daily uptime entries
for unixTimestamp, entry := range dailyEntries {
- _, err = tx.Exec(
+ if s.driver == "mysql" {
+ query = `
+ INSERT INTO endpoint_uptimes (endpoint_id, hour_unix_timestamp, total_executions, successful_executions, total_response_time)
+ VALUES (?, ?, ?, ?, ?)
+ ON DUPLICATE KEY UPDATE
+ total_executions = VALUES(total_executions),
+ successful_executions = VALUES(successful_executions),
+ total_response_time = VALUES(total_response_time)
`
- INSERT INTO endpoint_uptimes (endpoint_id, hour_unix_timestamp, total_executions, successful_executions, total_response_time)
- VALUES ($1, $2, $3, $4, $5)
- ON CONFLICT(endpoint_id, hour_unix_timestamp) DO UPDATE SET
- total_executions = $3,
- successful_executions = $4,
- total_response_time = $5
- `,
+ } else {
+ query = `
+ INSERT INTO endpoint_uptimes (endpoint_id, hour_unix_timestamp, total_executions, successful_executions, total_response_time)
+ VALUES ($1, $2, $3, $4, $5)
+ ON CONFLICT(endpoint_id, hour_unix_timestamp) DO UPDATE SET
+ total_executions = $3,
+ successful_executions = $4,
+ total_response_time = $5
+ `
+ }
+ _, err = tx.Exec(
+ query,
endpointID,
unixTimestamp,
entry.totalExecutions,
diff --git a/storage/store/store.go b/storage/store/store.go
index f7b581ec4..5d9ba7335 100644
--- a/storage/store/store.go
+++ b/storage/store/store.go
@@ -116,12 +116,12 @@ func Initialize(cfg *storage.Config) error {
MaximumNumberOfEvents: storage.DefaultMaximumNumberOfEvents,
}
}
- if len(cfg.Path) == 0 && cfg.Type != storage.TypePostgres {
+ if len(cfg.Path) == 0 && (cfg.Type != storage.TypePostgres && cfg.Type != storage.TypeMySQL) {
logr.Infof("[store.Initialize] Creating storage provider of type=%s", cfg.Type)
}
ctx, cancelFunc = context.WithCancel(context.Background())
switch cfg.Type {
- case storage.TypeSQLite, storage.TypePostgres:
+ case storage.TypeSQLite, storage.TypePostgres, storage.TypeMySQL:
store, err = sql.NewStore(string(cfg.Type), cfg.Path, cfg.Caching, cfg.MaximumNumberOfResults, cfg.MaximumNumberOfEvents)
if err != nil {
return err
diff --git a/storage/type.go b/storage/type.go
index 4c25350c6..4a36bfbe9 100644
--- a/storage/type.go
+++ b/storage/type.go
@@ -7,4 +7,5 @@ const (
TypeMemory Type = "memory" // In-memory store
TypeSQLite Type = "sqlite" // SQLite store
TypePostgres Type = "postgres" // Postgres store
+ TypeMySQL Type = "mysql" // MySQL store
)