diff --git a/README.md b/README.md index e7a7318..ebe32c0 100644 --- a/README.md +++ b/README.md @@ -7,22 +7,33 @@ Table of Contents - [DB Yaml](#db-yaml) - [Features](#features) - [Usage](#usage) - * [Write to DB](#write-to-db) - * [Query DB](#query-db) - + [Get First Key](#get-first-key) - + [Search for Keys](#search-for-keys) - * [Query Path](#query-path) - + [Query Path with Arrays](#query-path-with-arrays) - - [Without trailing array](#without-trailing-array) - - [With trailing array](#with-trailing-array) - * [Delete Key By Path](#delete-key-by-path) - * [Convert Utils](#convert-utils) + * [Write to DB](#write-to-db) + * [Query DB](#query-db) + + [Get First Key](#get-first-key) + + [Search for Keys](#search-for-keys) + * [Query Path](#query-path) + + [Query Path with Arrays](#query-path-with-arrays) + - [Without trailing array](#without-trailing-array) + - [With trailing array](#with-trailing-array) + * [Delete Key By Path](#delete-key-by-path) + * [Convert Utils](#convert-utils) + [Get map of strings from interface](#get-map-of-strings-from-interface) - [Get map directly from a GetPath object](#get-map-directly-from-a-getpath-object) - - [Get map manually](#get-map-manually) - + [Get array of string from interface](#get-array-of-string-from-interface) + - [Get map manually](#get-map-manually) + + [Get array of string from interface](#get-array-of-string-from-interface) - [Get array directly from a GetPath object](#get-array-directly-from-a-getpath-object) - - [Get array manually](#get-array-manually) + - [Get array manually](#get-array-manually) + * [Document Management](#document-management) + + [Add a new doc](#add-a-new-doc) + + [Switch Doc](#switch-doc) + + [Document names](#document-names) + - [Name documents manually](#name-documents-manually) + - [Name all documents automatically](#name-all-documents-automatically) + - [Switch between docs by name](#switch-between-docs-by-name) + + [Import Docs](#import-docs) + + [Global Upsert](#global-upsert) + + [Global Update](#global-update) + ## Features @@ -111,7 +122,7 @@ Get all they keys (if any). This returns the full path for the key, not the key values. To get the values check the next section **GetPath** ```go -keys, err := state.Get("key-1") +keys, err := state.FindKeys("key-1") if err != nil { logger.Fatalf(err.Error()) } @@ -181,7 +192,7 @@ key-1: To get the first index of `key-2`, issue -``` +```go keyPath, err := state.GetPath("key-1.key-2.[0]") if err != nil { logger.Fatalf(err.Error()) @@ -331,3 +342,185 @@ vArray := assertData.GetArray() logger.Info(vArray) ``` + +### Document Management + +DBy creates by default an array of documents called library. That is in fact an array of interfaces + +When initiating DBy, document 0 (index 0) is creatd by default and any action is done to that document, unless we switch to a new one + +#### Add a new doc + +To add a new doc, issue + +```go +err = state.AddDoc() +if err != nil { + logger.Fatal(err) +} + +``` + +**Note: Adding a new doc also switches the pointer to that doc. Any action will write/read from the new doc by default** + +#### Switch Doc + +To switch a different document, we can use **Switch** method that takes as an argument an index + +For example to switch to doc 1 (second doc), issue + +```go +err = state.Switch(1) +if err != nil { + logger.Fatal(err) +} +``` + +#### Document names + +When we work with more than 1 document, we may want to set names in order to easily switch between docs + +We have 2 ways to name our documents + +- Add a name to each document manually +- Add a name providing a path that exists in all documents + +##### Name documents manually + +To name a document manually, we can use the **SetName** method which takes 2 arguments + +- name +- doc index + +For example to name document with index 0, as myDoc + +```go +err := state.SetName("myDoc", 0) +if err != nil { + logger.Fatal(err) +} +``` + +##### Name all documents automatically + +To name all documents automatically we need to ensure that the same path exists in all documents. + +The method for updating all documents is called **SetNames** and takes 2 arguments + +- Prefix: A path in the documents that will be used for the first name +- Suffix: A path in the documents that will be used for the last name + +**Note: Docs that do not have the paths that are queried will not get a name** + +This method best works with **Kubernetes** manifests, where all docs have a common set of fields. + +For example + +```yaml +apiVersion: someApi-0 +kind: someKind-0 +metadata: +... + name: someName-0 +... +--- +apiVersion: someApi-1 +kind: someKind-1 +metadata: +... + name: someName-1 +... +--- +``` + +From above we could give a name for all our documents if we use **kind** + **metadata.name** for the name. + +```go +err := state.SetNames("kind", "metadata.name") +if err != nil { + logger.Fatal(err) +} +``` + +###### List all doc names + +To get the name of all named docs, issue + +```go +for i, j := range state.ListDocs() { + fmt.Println(i, j) +} +``` +Example output based on the previous **SetNames** example + +```bash +0 service/listener-svc +1 poddisruptionbudget/listener-svc +2 horizontalpodautoscaler/caller-svc +3 deployment/caller-svc +4 service/caller-svc +5 poddisruptionbudget/caller-svc +6 horizontalpodautoscaler/listener-svc +7 deployment/listener-svc +``` + +##### Switch between docs by name + +To switch to a doc by using the doc's name, issue + +```go +err = state.SwitchDoc("PodDisruptionBudget/caller-svc") +if err != nil { + logger.Fatal(err) +} +``` + +#### Import Docs + +We can import a set of docs with **ImportDocs** method. For example if we have the following yaml + +```yaml +apiVersion: someApi-0 +kind: someKind-0 +metadata: +... + name: someName-0 +... +--- +apiVersion: someApi-1 +kind: someKind-1 +metadata: +... + name: someName-1 +... +--- +``` + +We can import it by giving the path of the file + +```go +err = state.ImportDocs("file-name.yaml") +if err != nil { + logger.Fatal(err) +} +``` + +#### Global Upsert + +We can use upsert to update or create keys on all documents + +```go +err = state.UpsertGlobal( + "some.path", + "v0.3.0", +) +if err != nil { + logger.Fatal(err) +} + +``` + +#### Global Update + +Global update works as **GlobalUpsert** but it skips documents that +miss a path rather than creating the path on those docs. \ No newline at end of file diff --git a/db/convert.go b/db/convert.go index cae5ee2..e3156f6 100644 --- a/db/convert.go +++ b/db/convert.go @@ -26,108 +26,108 @@ func NewConvertFactory() *AssertData { } // Clear for resetting AssertData -func (s *AssertData) Clear() { - s.Cache.Clear() - s.D0 = make(map[string]string) - s.A0 = make([]string, 0) +func (a *AssertData) Clear() { + a.Cache.Clear() + a.D0 = make(map[string]string) + a.A0 = make([]string, 0) } // Input sets a data source that can be used for assertion -func (s *AssertData) Input(o interface{}) *AssertData { - s.Clear() - s.Cache.V1 = o - return s +func (a *AssertData) Input(o interface{}) *AssertData { + a.Clear() + a.Cache.V1 = o + return a } -func (s *AssertData) toBytes() { - s.Cache.B, s.Cache.E = yaml.Marshal(s.Cache.V1) - if s.Cache.E != nil { - s.Error = s.Cache.E +func (a *AssertData) toBytes() { + a.Cache.B, a.Cache.E = yaml.Marshal(a.Cache.V1) + if a.Cache.E != nil { + a.Error = a.Cache.E } } // GetMap for converting a map[interface{}]interface{} into a map[string]string -func (s *AssertData) GetMap() map[string]string { - if s.Cache.E != nil { - s.Error = s.Cache.E +func (a *AssertData) GetMap() map[string]string { + if a.Cache.E != nil { + a.Error = a.Cache.E return nil } - s.toBytes() - if s.Cache.E != nil { + a.toBytes() + if a.Cache.E != nil { return nil } - s.Cache.E = yaml.Unmarshal(s.Cache.B, &s.D0) - if s.Cache.E != nil { - s.Error = s.Cache.E + a.Cache.E = yaml.Unmarshal(a.Cache.B, &a.D0) + if a.Cache.E != nil { + a.Error = a.Cache.E return nil } - return s.D0 + return a.D0 } // GetArray for converting a []interface{} to []string -func (s *AssertData) GetArray() []string { - if s.Cache.E != nil { - s.Error = s.Cache.E +func (a *AssertData) GetArray() []string { + if a.Cache.E != nil { + a.Error = a.Cache.E return nil } - _, isArray := s.Cache.V1.([]interface{}) + _, isArray := a.Cache.V1.([]interface{}) if !isArray { - s.Cache.E = wrapErr(fmt.Errorf(notArrayObj), getFn()) - s.Error = s.Cache.E + a.Cache.E = wrapErr(fmt.Errorf(notArrayObj), getFn()) + a.Error = a.Cache.E return nil } - s.toBytes() - if s.Cache.E != nil { + a.toBytes() + if a.Cache.E != nil { return nil } - s.Cache.E = yaml.Unmarshal(s.Cache.B, &s.A0) - if s.Cache.E != nil { - s.Error = s.Cache.E + a.Cache.E = yaml.Unmarshal(a.Cache.B, &a.A0) + if a.Cache.E != nil { + a.Error = a.Cache.E return nil } - return s.A0 + return a.A0 } // Key copies initial interface object and returns a map of interfaces{} // Used to easily pipe interfaces -func (s *AssertData) Key(k string) *AssertData { - if s.Cache.E != nil { - s.Error = s.Cache.E - return s +func (a *AssertData) Key(k string) *AssertData { + if a.Cache.E != nil { + a.Error = a.Cache.E + return a } - _, isMap := s.Cache.V1.(map[interface{}]interface{}) + _, isMap := a.Cache.V1.(map[interface{}]interface{}) if !isMap { - s.Cache.E = wrapErr(fmt.Errorf(notAMap), getFn()) - s.Error = s.Cache.E - return s + a.Cache.E = wrapErr(fmt.Errorf(notAMap), getFn()) + a.Error = a.Cache.E + return a } - s.Cache.V1 = s.Cache.V1.(map[interface{}]interface{})[k] + a.Cache.V1 = a.Cache.V1.(map[interface{}]interface{})[k] - return s + return a } // Index getting an interface{} from a []interface{} -func (s *AssertData) Index(i int) *AssertData { - if s.Cache.E != nil { - s.Error = s.Cache.E - return s +func (a *AssertData) Index(i int) *AssertData { + if a.Cache.E != nil { + a.Error = a.Cache.E + return a } - _, isArray := s.Cache.V1.([]interface{}) + _, isArray := a.Cache.V1.([]interface{}) if !isArray { - s.Cache.E = wrapErr(fmt.Errorf(notArrayObj), getFn()) - s.Error = s.Cache.E - return s + a.Cache.E = wrapErr(fmt.Errorf(notArrayObj), getFn()) + a.Error = a.Cache.E + return a } - s.Cache.V1 = s.Cache.V1.([]interface{})[i] + a.Cache.V1 = a.Cache.V1.([]interface{})[i] - return s + return a } diff --git a/db/errors.go b/db/errors.go index d4c4984..29e2021 100644 --- a/db/errors.go +++ b/db/errors.go @@ -17,6 +17,9 @@ const ( arrayOutOfRange = "index value (%s) is bigger than the length (%s) of the array to be indexed" invalidKeyPath = "the key||path [%s] that was given is not valid" emptyKey = "path [%s] contains an empty key" + libOutOfIndex = "lib out of index" + docNotExists = "doc [%s] does not exist in lib" + fieldNotString = "[%s] with value [%s] is not a string" ) func wrapErr(e error, s string) error { diff --git a/db/sql.go b/db/sql.go index 602f105..4db5caf 100644 --- a/db/sql.go +++ b/db/sql.go @@ -40,39 +40,39 @@ func NewSQLFactory() *SQL { } // Clear deletes all objects from Query and Cache structures -func (d *SQL) Clear() *SQL { - d.Query.Clear() - d.Cache.Clear() +func (s *SQL) Clear() *SQL { + s.Query.Clear() + s.Cache.Clear() - return d + return s } -func (d *SQL) getObj(k string, o interface{}) (interface{}, bool) { +func (s *SQL) getObj(k string, o interface{}) (interface{}, bool) { // The object is either a map or an array. // If isMap returns false then check the array obj, isMap := o.(map[interface{}]interface{}) if !isMap { - return d.getArrayObject(k, o) + return s.getArrayObject(k, o) } for thisKey, thisObj := range obj { - d.Cache.Keys = append(d.Cache.Keys, thisKey.(string)) + s.Cache.Keys = append(s.Cache.Keys, thisKey.(string)) if thisKey == k { return thisObj, true } // Call self again - if objFinal, found := d.getObj(k, thisObj); found { + if objFinal, found := s.getObj(k, thisObj); found { return objFinal, found } - d.Cache.dropLastKey() + s.Cache.dropLastKey() } return nil, false } -func (d *SQL) getArrayObject(k string, o interface{}) (interface{}, bool) { +func (s *SQL) getArrayObject(k string, o interface{}) (interface{}, bool) { // This is always called after object has been // checked if it is a map. If isArray is false then // the object is neither and we should return false @@ -86,19 +86,19 @@ func (d *SQL) getArrayObject(k string, o interface{}) (interface{}, bool) { } for i, thisArrayObj := range arrayObj { - d.Cache.Keys = append(d.Cache.Keys, "["+strconv.Itoa(i)+"]") - arrayObjFinal, found := d.getObj(k, thisArrayObj) + s.Cache.Keys = append(s.Cache.Keys, "["+strconv.Itoa(i)+"]") + arrayObjFinal, found := s.getObj(k, thisArrayObj) if found { return arrayObjFinal, found } - d.Cache.dropLastKey() + s.Cache.dropLastKey() } return nil, false } -func (d *SQL) getIndex(k string) (int, error) { +func (s *SQL) getIndex(k string) (int, error) { if !strings.HasPrefix(k, "[") || !strings.HasSuffix(k, "]") { return 0, wrapErr(fmt.Errorf(notAnIndex, k), getFn()) } @@ -110,12 +110,12 @@ func (d *SQL) getIndex(k string) (int, error) { return intVar, nil } -func (d *SQL) getFromIndex(k []string, o interface{}) (interface{}, error) { +func (s *SQL) getFromIndex(k []string, o interface{}) (interface{}, error) { if getObjectType(o) != arrayObj { return nil, wrapErr(errors.New(notArrayObj), getFn()) } - i, err := d.getIndex(k[0]) + i, err := s.getIndex(k[0]) if err != nil { return nil, wrapErr(err, getFn()) } @@ -132,20 +132,20 @@ func (d *SQL) getFromIndex(k []string, o interface{}) (interface{}, error) { } if len(k) > 1 { - return d.getPath(k[1:], o.([]interface{})[i]) + return s.getPath(k[1:], o.([]interface{})[i]) } return o.([]interface{})[i], nil } -func (d *SQL) getPath(k []string, o interface{}) (interface{}, error) { +func (s *SQL) getPath(k []string, o interface{}) (interface{}, error) { if err := checkKeyPath(k); err != nil { return nil, wrapErr(err, getFn()) } obj, err := interfaceToMap(o) if err != nil { - return d.getFromIndex(k, o) + return s.getFromIndex(k, o) } if len(k) == 0 { @@ -156,12 +156,12 @@ func (d *SQL) getPath(k []string, o interface{}) (interface{}, error) { if thisKey != k[0] { continue } - d.Cache.Keys = append(d.Cache.Keys, k[0]) + s.Cache.Keys = append(s.Cache.Keys, k[0]) if len(k) == 1 { return thisObj, nil } - objFinal, err := d.getPath(k[1:], thisObj) + objFinal, err := s.getPath(k[1:], thisObj) if err != nil { return nil, wrapErr(err, getFn()) } @@ -171,7 +171,7 @@ func (d *SQL) getPath(k []string, o interface{}) (interface{}, error) { return nil, wrapErr(fmt.Errorf(keyDoesNotExist, k[0]), getFn()) } -func (d *SQL) deleteArrayItem(k string, o interface{}) bool { +func (s *SQL) deleteArrayItem(k string, o interface{}) bool { if o == nil { return false } @@ -184,10 +184,10 @@ func (d *SQL) deleteArrayItem(k string, o interface{}) bool { return false } -func (d *SQL) deleteItem(k string, o interface{}) bool { +func (s *SQL) deleteItem(k string, o interface{}) bool { _, ok := o.(map[interface{}]interface{}) if !ok { - return d.deleteArrayItem(k, o) + return s.deleteArrayItem(k, o) } for kn := range o.(map[interface{}]interface{}) { @@ -199,7 +199,7 @@ func (d *SQL) deleteItem(k string, o interface{}) bool { return false } -func (d *SQL) delPath(k string, o interface{}) error { +func (s *SQL) delPath(k string, o interface{}) error { keys := strings.Split(k, ".") if err := checkKeyPath(keys); err != nil { return wrapErr(err, getFn()) @@ -210,57 +210,57 @@ func (d *SQL) delPath(k string, o interface{}) error { } if len(keys) == 1 { - if !d.deleteItem(keys[0], o) { + if !s.deleteItem(keys[0], o) { return wrapErr(fmt.Errorf(keyDoesNotExist, k), getFn()) } return nil } - d.Cache.dropKeys() - obj, err := d.getPath(keys[:len(keys)-1], o) + s.Cache.dropKeys() + obj, err := s.getPath(keys[:len(keys)-1], o) if err != nil { return wrapErr(err, getFn()) } - d.Cache.dropKeys() - if !d.deleteItem(keys[len(keys)-1], obj) { + s.Cache.dropKeys() + if !s.deleteItem(keys[len(keys)-1], obj) { return wrapErr(fmt.Errorf(keyDoesNotExist, k), getFn()) } return nil } -func (d *SQL) get(k string, o interface{}) ([]string, error) { +func (s *SQL) get(k string, o interface{}) ([]string, error) { var err error var key string - d.Clear() - d.Cache.V1, err = copyMap(o) + s.Clear() + s.Cache.V1, err = copyMap(o) if err != nil { return nil, wrapErr(err, getFn()) } for { - if _, found := d.getObj(k, d.Cache.V1); !found { + if _, found := s.getObj(k, s.Cache.V1); !found { break } - key = strings.Join(d.Cache.Keys, ".") - d.Query.KeysFound = append(d.Query.KeysFound, key) + key = strings.Join(s.Cache.Keys, ".") + s.Query.KeysFound = append(s.Query.KeysFound, key) - if err := d.delPath(key, d.Cache.V1); err != nil { - return d.Query.KeysFound, wrapErr(err, getFn()) + if err := s.delPath(key, s.Cache.V1); err != nil { + return s.Query.KeysFound, wrapErr(err, getFn()) } - d.Cache.dropKeys() + s.Cache.dropKeys() } - return d.Query.KeysFound, nil + return s.Query.KeysFound, nil } -func (d *SQL) getFirst(k string, o interface{}) (interface{}, error) { - d.Clear() +func (s *SQL) getFirst(k string, o interface{}) (interface{}, error) { + s.Clear() - keys, err := d.get(k, o) + keys, err := s.get(k, o) if err != nil { return nil, wrapErr(err, getFn()) } @@ -274,25 +274,25 @@ func (d *SQL) getFirst(k string, o interface{}) (interface{}, error) { return nil, wrapErr(err, getFn()) } - d.Cache.C1 = len(keySlice) + s.Cache.C1 = len(keySlice) if len(keys) == 1 { - path, err := d.getPath(keySlice, o) + path, err := s.getPath(keySlice, o) return path, wrapErr(err, getFn()) } for i, key := range keys[1:] { - if len(strings.Split(key, ".")) < d.Cache.C1 { - d.Cache.C1 = len(strings.Split(key, ".")) - d.Cache.C2 = i + 1 + if len(strings.Split(key, ".")) < s.Cache.C1 { + s.Cache.C1 = len(strings.Split(key, ".")) + s.Cache.C2 = i + 1 } } - path, err := d.getPath(strings.Split(keys[d.Cache.C2], "."), o) + path, err := s.getPath(strings.Split(keys[s.Cache.C2], "."), o) return path, wrapErr(err, getFn()) } -func (d *SQL) upsertRecursive(k []string, o, v interface{}) error { - d.Clear() +func (s *SQL) upsertRecursive(k []string, o, v interface{}) error { + s.Clear() if err := checkKeyPath(k); err != nil { return wrapErr(err, getFn()) @@ -309,7 +309,7 @@ func (d *SQL) upsertRecursive(k []string, o, v interface{}) error { } if len(k) > 1 { - return wrapErr(d.upsertRecursive(k[1:], thisObj, v), getFn()) + return wrapErr(s.upsertRecursive(k[1:], thisObj, v), getFn()) } switch getObjectType(thisObj) { @@ -325,7 +325,7 @@ func (d *SQL) upsertRecursive(k []string, o, v interface{}) error { obj[k[0]] = make(map[interface{}]interface{}) if len(k) > 1 { - return wrapErr(d.upsertRecursive(k[1:], obj[k[0]], v), getFn()) + return wrapErr(s.upsertRecursive(k[1:], obj[k[0]], v), getFn()) } obj[k[0]] = v @@ -333,7 +333,7 @@ func (d *SQL) upsertRecursive(k []string, o, v interface{}) error { return nil } -func (d *SQL) mergeDBs(path string, o interface{}) error { +func (s *SQL) mergeDBs(path string, o interface{}) error { var dataNew interface{} ok, err := fileExists(path) @@ -358,7 +358,7 @@ func (d *SQL) mergeDBs(path string, o interface{}) error { } for kn, vn := range obj { - err = d.upsertRecursive(strings.Split(kn.(string), "."), o, vn) + err = s.upsertRecursive(strings.Split(kn.(string), "."), o, vn) if err != nil { return wrapErr(err, getFn()) } diff --git a/db/storage.go b/db/storage.go index 821ee33..3d39cb0 100644 --- a/db/storage.go +++ b/db/storage.go @@ -1,10 +1,13 @@ package db import ( + "bytes" + "fmt" "io/ioutil" "os" "path" "path/filepath" + "strings" "sync" "gopkg.in/yaml.v2" @@ -15,7 +18,9 @@ import ( type Storage struct { sync.Mutex SQL *SQL - Data interface{} + Data []interface{} + Lib map[string]int + AD int Path string } @@ -24,6 +29,8 @@ func NewStorageFactory(path string) (*Storage, error) { state := &Storage{ SQL: NewSQLFactory(), Path: path, + Data: make([]interface{}, 0), + Lib: make(map[string]int), } stateDir := filepath.Dir(path) @@ -38,7 +45,8 @@ func NewStorageFactory(path string) (*Storage, error) { } if !stateExists { - state.Data = map[string]string{} + state.Data = append(state.Data, map[string]string{}) + state.AD = 0 state.Write() } @@ -53,36 +61,192 @@ func NewStorageFactory(path string) (*Storage, error) { return state, nil } -// Read for reading the local yaml file and importing it -// in memory -func (i *Storage) Read() error { - f, err := ioutil.ReadFile(i.Path) +// SetNames can set names automatically to the documents +// that have the queried paths. +// input(f) is the first path that will be quieried +// input(l) is the last path +// +// If a document has both paths, a name will be generated +// and will be mapped with the document's index +func (s *Storage) SetNames(f, l string) error { + for i := range s.Data { + s.AD = i + kind, err := s.GetPath(strings.ToLower(f)) + if err != nil { + continue + } + name, err := s.GetPath(strings.ToLower(l)) + if err != nil { + continue + } + + sKind, ok := kind.(string) + if !ok { + wrapErr(fmt.Errorf(fieldNotString, strings.ToLower(f), kind), getFn()) + } + + sName, ok := name.(string) + if !ok { + wrapErr(fmt.Errorf(fieldNotString, strings.ToLower(l), name), getFn()) + } + + docName := fmt.Sprintf("%s/%s", strings.ToLower(sKind), strings.ToLower(sName)) + s.Lib[docName] = i + } + + return nil +} + +// SetName adds a name for a document and maps with it the given doc index +func (s *Storage) SetName(n string, i int) error { + err := s.Switch(i) if err != nil { return wrapErr(err, getFn()) } + s.Lib[strings.ToLower(n)] = i - i.Lock() - defer i.Unlock() + return nil +} - return yaml.Unmarshal(f, &i.Data) +// Switch will change Active Document (AD) to the given index +func (s *Storage) Switch(i int) error { + if i > len(s.Data)-1 { + return wrapErr(fmt.Errorf(libOutOfIndex), getFn()) + } + s.AD = i + return nil } -// Write for writing memory content to the local yaml file -func (i *Storage) Write() error { - i.Lock() - defer i.Unlock() - data, err := yaml.Marshal(&i.Data) +// AddDoc will add a new document to the stack and will switch +// Active Document index to that document +func (s *Storage) AddDoc() error { + s.AD++ + s.Data = append(s.Data, make(map[interface{}]interface{})) + return s.stateReload() +} + +// ListDocs will return an array with all docs names +func (s *Storage) ListDocs() []string { + var docs []string + for i := range s.Lib { + docs = append(docs, i) + } + return docs +} + +// SwitchDoc for switching to a document using the documents name (if any) +func (s *Storage) SwitchDoc(n string) error { + i, exists := s.Lib[strings.ToLower(n)] + if !exists { + return wrapErr(fmt.Errorf(docNotExists, strings.ToLower(n)), getFn()) + } + s.AD = i + return nil +} + +// ImportDocs for importing documents +func (s *Storage) ImportDocs(path string) error { + impf, err := ioutil.ReadFile(path) if err != nil { return wrapErr(err, getFn()) } - wrkDir := path.Dir(i.Path) + var dataArray []interface{} + var counter int + var data interface{} + + data = nil + dec := yaml.NewDecoder(bytes.NewReader(impf)) + for { + dataArray = append(dataArray, data) + err := dec.Decode(&dataArray[counter]) + if err == nil { + counter++ + data = nil + continue + } + + if err.Error() == "EOF" { + break + } + return wrapErr(err, getFn()) + } + + for _, j := range dataArray { + if j == nil { + continue + } + if len(j.(map[interface{}]interface{})) == 0 { + continue + } + s.Data = append(s.Data, j) + } + return s.stateReload() +} + +// Read for reading the local yaml file and importing it +// in memory +func (s *Storage) Read() error { + f, err := ioutil.ReadFile(s.Path) + if err != nil { + return wrapErr(err, getFn()) + } + + s.Lock() + defer s.Unlock() + + s.Data = nil + s.Data = make([]interface{}, 0) + + var counter int + var data interface{} + dec := yaml.NewDecoder(bytes.NewReader(f)) + for { + s.Data = append(s.Data, data) + err := dec.Decode(&s.Data[counter]) + if err == nil { + counter++ + data = nil + continue + } + + if err.Error() == "EOF" { + break + } + return wrapErr(err, getFn()) + } + + return nil +} + +// Write for writing memory content to the local yaml file +func (s *Storage) Write() error { + s.Lock() + defer s.Unlock() + + wrkDir := path.Dir(s.Path) f, err := ioutil.TempFile(wrkDir, ".tx.*") if err != nil { return wrapErr(err, getFn()) } - _, err = f.Write(data) + var buf bytes.Buffer + enc := yaml.NewEncoder(&buf) + + for _, j := range s.Data { + if j == nil { + continue + } else if v, ok := j.(map[interface{}]interface{}); ok && len(v) == 0 { + continue + } + + err := enc.Encode(j) + if err != nil { + return wrapErr(err, getFn()) + } + } + + _, err = f.Write(buf.Bytes()) if err != nil { return wrapErr(err, getFn()) } @@ -91,13 +255,14 @@ func (i *Storage) Write() error { return wrapErr(err, getFn()) } - return wrapErr(os.Rename(f.Name(), i.Path), getFn()) + return wrapErr(os.Rename(f.Name(), s.Path), getFn()) } -func (i *Storage) stateReload() error { - err := i.Write() +func (s *Storage) stateReload() error { + err := s.Write() if err != nil { return wrapErr(err, getFn()) } - return wrapErr(i.Read(), getFn()) + + return wrapErr(s.Read(), getFn()) } diff --git a/db/wrappers.go b/db/wrappers.go index 9b11578..6b61f32 100644 --- a/db/wrappers.go +++ b/db/wrappers.go @@ -1,12 +1,13 @@ package db import ( + "fmt" "strings" ) // Upsert is a SQL wrapper for adding/updating map structures func (s *Storage) Upsert(k string, i interface{}) error { - err := s.SQL.upsertRecursive(strings.Split(k, "."), s.Data, i) + err := s.SQL.upsertRecursive(strings.Split(k, "."), s.Data[s.AD], i) if err != nil { return wrapErr(err, getFn()) } @@ -14,11 +15,51 @@ func (s *Storage) Upsert(k string, i interface{}) error { return s.stateReload() } +// UpsertGlobal is a SQL wrapper for adding/updating map structures +// in all documents. This will change all existing paths to the given +// structure and add new if the path is missing for a document +func (s *Storage) UpsertGlobal(k string, i interface{}) error { + c := s.AD + for j := range s.Data { + err := s.SQL.upsertRecursive(strings.Split(k, "."), s.Data[j], i) + if err != nil { + return wrapErr(err, getFn()) + } + } + + s.AD = c + + return s.stateReload() +} + +// UpdateGlobal is a SQL wrapper for adding/updating map structures +// in all documents. This will change all existing paths to the given +// structure and add new if the path is missing for a document +func (s *Storage) UpdateGlobal(k string, i interface{}) error { + c := s.AD + for j := range s.Data { + s.AD = j + + if _, err := s.GetPath(k); err != nil { + continue + } + + err := s.SQL.upsertRecursive(strings.Split(k, "."), s.Data[s.AD], i) + if err != nil { + return wrapErr(err, getFn()) + } + } + + s.AD = c + + return s.stateReload() +} + // GetFirst is a SQL wrapper for finding the first key in the // yaml hierarchy. If two keys are on the same level but under // different paths, then the selection will be random func (s *Storage) GetFirst(k string) (interface{}, error) { - obj, err := s.SQL.getFirst(k, s.Data) + obj, err := s.SQL.getFirst(k, s.Data[s.AD]) if err != nil { return nil, wrapErr(err, getFn()) } @@ -38,7 +79,21 @@ func (s *Storage) GetFirst(k string) (interface{}, error) { // test: someValue-2 // func (s *Storage) Get(k string) ([]string, error) { - obj, err := s.SQL.get(k, s.Data) + fmt.Println("Warn: Deprecated is Get(). Will be replaced by FindKeys() in the future.") + obj, err := s.SQL.get(k, s.Data[s.AD]) + if err != nil { + return nil, wrapErr(err, getFn()) + } + + return obj, nil +} + +// FindKeys is alias of Get. This function will replace +// Get in the future since this name for finding keys +// makes more sense +// For now we keep both for compatibility +func (s *Storage) FindKeys(k string) ([]string, error) { + obj, err := s.SQL.get(k, s.Data[s.AD]) if err != nil { return nil, wrapErr(err, getFn()) } @@ -55,7 +110,7 @@ func (s *Storage) Get(k string) ([]string, error) { // func (s *Storage) GetPath(k string) (interface{}, error) { keys := strings.Split(k, ".") - obj, err := s.SQL.getPath(keys, s.Data) + obj, err := s.SQL.getPath(keys, s.Data[s.AD]) if err != nil { return nil, wrapErr(err, getFn()) } @@ -68,7 +123,7 @@ func (s *Storage) GetPath(k string) (interface{}, error) { // validate that the path exists, then it would export the value of // GetPath("key-1.key-2") and delete the object that matches key-3 func (s *Storage) Delete(k string) error { - err := s.SQL.delPath(k, s.Data) + err := s.SQL.delPath(k, s.Data[s.AD]) if err != nil { return wrapErr(err, getFn()) } @@ -79,7 +134,7 @@ func (s *Storage) Delete(k string) error { // MergeDBs is a SQL wrapper that merges a source yaml file // with the DBy local yaml file. func (s *Storage) MergeDBs(path string) error { - err := s.SQL.mergeDBs(path, s.Data) + err := s.SQL.mergeDBs(path, s.Data[s.AD]) if err != nil { return wrapErr(err, getFn()) } diff --git a/docs/examples/kubernetes-labels-update.md b/docs/examples/kubernetes-labels-update.md new file mode 100644 index 0000000..d247b4e --- /dev/null +++ b/docs/examples/kubernetes-labels-update.md @@ -0,0 +1,92 @@ +### Update labels for all kubernetes manifests + +In the **manifests directory** we have a **deployment.yaml** that we will import and update + +In this example we will import the manifest and update the version for all documents from **v0.2.0** to **v0.3.0** + +```go + +package main + +import ( + "fmt" + + "github.com/sirupsen/logrus" + "github.com/ulfox/dby/db" +) + +func main() { + logger := logrus.New() + state, err := db.NewStorageFactory("local/db.yaml") + if err != nil { + logger.Fatal(err) + } + err = state.ImportDocs("docs/examples/manifests/deployment.yaml") + if err != nil { + logger.Fatal(err) + } + + // Automatically update all document names based on "kind/metadata.name" values + state.SetNames("kind", "metadata.name") + + // Set the paths we want to update + paths := []string{ + "spec.selector.matchLabels.version", + "metadata.labels.version", + "spec.selector.version", + "spec.template.selector.matchLabels.version", + "spec.template.metadata.labels.version", + } + + // UpdateGlobal is a global command that updates all fields + // that match the given path. Documents that do not have the + // specific path will not be updated + // + // If we wanted to update or create the path then we could issue + // UpsertGlobal() instead. Using that command however for Kubernetes + // manifests is not recommended since you may end up having + // manifests with fields that are not supported by the resource API + for _, j := range paths { + err = state.UpdateGlobal( + j, + "v0.3.0", + ) + if err != nil { + logger.Fatal(err) + } + + } + + // List Docs by name + for _, j := range state.ListDocs() { + // Switch to a doc by name + err = state.SwitchDoc(j) + if err != nil { + logger.Fatal(err) + } + + // Get the metadata + val, err := state.GetPath("metadata.labels.version") + if err != nil { + // We use continue here because HorizontalPodAutoscaler does not have labels set + // so no update was done and no path exists for us to get + continue + } + logger.Infof("%s has version: %s", j, val) + } +} + +``` + + +Example output + + +```bash +INFO[0000] poddisruptionbudget/caller-svc has version: v0.3.0 +INFO[0000] deployment/listener-svc has version: v0.3.0 +INFO[0000] service/listener-svc has version: v0.3.0 +INFO[0000] poddisruptionbudget/listener-svc has version: v0.3.0 +INFO[0000] deployment/caller-svc has version: v0.3.0 +INFO[0000] service/caller-svc has version: v0.3.0 +``` \ No newline at end of file diff --git a/docs/examples/manifests/deployment.yaml b/docs/examples/manifests/deployment.yaml new file mode 100644 index 0000000..ed2cd22 --- /dev/null +++ b/docs/examples/manifests/deployment.yaml @@ -0,0 +1,213 @@ +--- +apiVersion: autoscaling/v2beta2 +kind: HorizontalPodAutoscaler +metadata: + name: listener-svc + namespace: echoserver +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: listener-svc + minReplicas: 3 + maxReplicas: 10 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 30 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: listener-svc + namespace: echoserver + # annotations: + # sidecar.istio.io/extraStatTags: destination_port,request_host + labels: + app: listener-svc + version: v0.1.1 +spec: + strategy: + rollingUpdate: + maxSurge: "100%" + maxUnavailable: 3 + type: "RollingUpdate" + replicas: 1 + selector: + matchLabels: + app: listener-svc + version: v0.1.1 + template: + metadata: + # annotations: + # sidecar.istio.io/extraStatTags: destination_port,request_host + labels: + app: listener-svc + version: v0.1.1 + spec: + containers: + - image: gcr.io/google_containers/echoserver:1.9 + imagePullPolicy: Always + name: listener-svc + ports: + - containerPort: 8080 +--- +apiVersion: v1 +kind: Service +metadata: + name: listener-svc + namespace: echoserver + annotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '15090' + prometheus.io/path: '/stats/prometheus' + labels: + app: listener-svc + version: v0.1.1 +spec: + ports: + - port: 80 + targetPort: 8080 + protocol: TCP + name: tcp-web + selector: + app: listener-svc + version: v0.1.1 +--- +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: listener-svc + namespace: echoserver + labels: + app: listener-svc + version: v0.1.1 +spec: + maxUnavailable: 3 + selector: + matchLabels: + app: listener-svc + version: v0.1.1 +--- +apiVersion: autoscaling/v2beta2 +kind: HorizontalPodAutoscaler +metadata: + name: caller-svc + namespace: sysdebug +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: caller-svc + minReplicas: 3 + maxReplicas: 10 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 30 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: caller-svc + namespace: sysdebug + # annotations: + # sidecar.istio.io/extraStatTags: destination_port,request_host + labels: + app: caller-svc + version: v0.2.0 +spec: + strategy: + rollingUpdate: + maxSurge: "100%" + maxUnavailable: 3 + type: "RollingUpdate" + replicas: 1 + selector: + matchLabels: + app: caller-svc + version: v0.2.0 + template: + metadata: + # annotations: + # sidecar.istio.io/extraStatTags: destination_port,request_host + labels: + app: caller-svc + version: v0.2.0 + spec: + # serviceAccount: sysdebug + containers: + - name: caller-svc + ports: + - containerPort: 8080 + image: gcr.io/google_containers/echoserver:1.9 + imagePullPolicy: IfNotPresent + readinessProbe: + timeoutSeconds: 7 + exec: + command: + - curl + - -sS + - --fail + - --connect-timeout + - "5" + - -o + - /dev/null + - listener-svc.echoserver.svc.primef.org + livenessProbe: + timeoutSeconds: 7 + exec: + command: + - curl + - -sS + - --fail + - --connect-timeout + - "5" + - -o + - /dev/null + - listener-svc.echoserver.svc.primef.org +--- +apiVersion: v1 +kind: Service +metadata: + name: caller-svc + namespace: sysdebug + annotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '15090' + prometheus.io/path: '/stats/prometheus' + labels: + app: caller-svc + version: v0.2.0 +spec: + ports: + - port: 80 + targetPort: 8080 + protocol: TCP + name: tcp-web + type: ClusterIP + selector: + app: caller-svc + version: v0.2.0 + +--- +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: caller-svc + namespace: sysdebug + labels: + app: caller-svc + version: v0.2.0 +spec: + maxUnavailable: 3 + selector: + matchLabels: + app: caller-svc + version: v0.2.0 diff --git a/sql_test.go b/sql_test.go index 27fb945..1493f09 100644 --- a/sql_test.go +++ b/sql_test.go @@ -75,7 +75,7 @@ func TestUpsert(t *testing.T) { f, err := ioutil.ReadFile(path) assert.Equal(t, err, nil) - yaml.Unmarshal(f, &state.Data) + yaml.Unmarshal(f, &state.Data[state.AD]) testUpsert := []struct { Key string @@ -85,7 +85,7 @@ func TestUpsert(t *testing.T) { {"key-2", "value-2"}, } - data, ok := state.Data.(map[interface{}]interface{}) + data, ok := state.Data[state.AD].(map[interface{}]interface{}) assert.Equal(t, ok, true) for _, testCase := range testUpsert { @@ -366,7 +366,7 @@ func TestGet(t *testing.T) { assert.Equal(t, err, nil) assertData := db.NewConvertFactory() - assertData.Input(state.Data) + assertData.Input(state.Data[state.AD]) assertData. Key("path-1").