2023-07-20 09:26:25 +00:00
|
|
|
package lockfile
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"os"
|
|
|
|
|
|
|
|
"git.badhouseplants.net/allanger/shoebill/pkg/repository"
|
|
|
|
"github.com/sirupsen/logrus"
|
|
|
|
"gopkg.in/yaml.v2"
|
|
|
|
)
|
|
|
|
|
|
|
|
const LOCKFILE_NAME = "shoebill.lock.yaml"
|
|
|
|
|
|
|
|
type LockEntry struct {
|
|
|
|
Chart string
|
|
|
|
Release string
|
|
|
|
Version string
|
|
|
|
Namespace string
|
|
|
|
RepoUrl string
|
|
|
|
RepoName string
|
|
|
|
GitCommit string
|
2024-07-25 16:44:58 +00:00
|
|
|
Values map[string]string
|
|
|
|
Secrets map[string]string
|
2023-07-20 09:26:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type HashPerRelease struct {
|
|
|
|
Release string
|
|
|
|
Namespace string
|
|
|
|
CommitHash string
|
|
|
|
}
|
|
|
|
type HashesPerReleases []*HashPerRelease
|
|
|
|
|
|
|
|
type LockRepository struct {
|
|
|
|
URL string
|
|
|
|
Name string
|
|
|
|
}
|
|
|
|
|
|
|
|
type LockFile []*LockEntry
|
|
|
|
|
|
|
|
// Init the LockFile object by reading the yaml file
|
|
|
|
func NewFromFile(lockfileDirPath string) (LockFile, error) {
|
|
|
|
var lockEntries LockFile
|
|
|
|
lockfilePath := fmt.Sprintf("%s/%s", lockfileDirPath, LOCKFILE_NAME)
|
|
|
|
|
|
|
|
logrus.Infof("reading the lockfile file: %s", lockfilePath)
|
|
|
|
|
|
|
|
lockFileData, err := os.ReadFile(lockfilePath)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := yaml.Unmarshal(lockFileData, &lockEntries); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return lockEntries, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (lockfile LockFile) ReposFromLockfile() (repository.Repositories, error) {
|
|
|
|
repositories := repository.Repositories{}
|
|
|
|
for _, lockentry := range lockfile {
|
|
|
|
newRepoEntry := &repository.Repository{
|
|
|
|
URL: lockentry.RepoUrl,
|
|
|
|
Name: lockentry.RepoName,
|
|
|
|
}
|
|
|
|
repositories = append(repositories, newRepoEntry)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lockfile contains an entry per a release, so one repo might be met several times
|
|
|
|
allKeys := make(map[string]bool)
|
|
|
|
dedupedRepositories := repository.Repositories{}
|
|
|
|
|
|
|
|
for _, repo := range repositories {
|
|
|
|
if _, value := allKeys[repo.Name]; !value {
|
|
|
|
allKeys[repo.Name] = true
|
|
|
|
dedupedRepositories = append(dedupedRepositories, repo)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, repoEntry := range dedupedRepositories {
|
|
|
|
if err := repoEntry.KindFromUrl(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return dedupedRepositories, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (lf LockFile) AddHashes(hashes HashesPerReleases) {
|
|
|
|
for _, lockEntry := range lf {
|
|
|
|
for _, hash := range hashes {
|
|
|
|
if lockEntry.Namespace == hash.Namespace && lockEntry.Release == hash.Release {
|
|
|
|
lockEntry.GitCommit = hash.CommitHash
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (lf LockFile) WriteToFile(dir string) error {
|
|
|
|
lockfilePath := fmt.Sprintf("%s/%s", dir, LOCKFILE_NAME)
|
|
|
|
lockfileContent, err := yaml.Marshal(lf)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := os.WriteFile(lockfilePath, lockfileContent, os.ModeExclusive); err != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|