A lot of thigs are going on
All checks were successful
ci/woodpecker/push/build Pipeline was successful

Signed-off-by: Nikolai Rodionov <allanger@badhouseplants.net>
This commit is contained in:
2026-05-10 23:14:29 +02:00
parent 16fab87949
commit 33f48f2bfb
31 changed files with 72 additions and 1514 deletions

View File

@@ -8,7 +8,7 @@ import (
"time"
"gitea.badhouseplants.net/softplayer/softplayer-backend/internal/helpers/hash"
"gitea.badhouseplants.net/softplayer/softplayer-backend/internal/tools/logger"
"gitea.badhouseplants.net/softplayer/softplayer-backend/internal/helpers/logger"
"github.com/golang-jwt/jwt/v5"
"github.com/google/uuid"
"github.com/redis/go-redis/v9"

View File

@@ -1,420 +0,0 @@
package controllers
import (
"bytes"
"context"
b64 "encoding/base64"
"fmt"
"os"
"strings"
"text/template"
"gitea.badhouseplants.net/softplayer/softplayer-backend/internal/consts"
"gitea.badhouseplants.net/softplayer/softplayer-backend/internal/helpers/helm"
"gitea.badhouseplants.net/softplayer/softplayer-backend/internal/helpers/kube"
"gitea.badhouseplants.net/softplayer/softplayer-backend/internal/types/helmrelease"
"github.com/go-logr/logr"
"github.com/go-logr/zapr"
"github.com/google/uuid"
"go.uber.org/zap"
"gopkg.in/yaml.v2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
ctrl "sigs.k8s.io/controller-runtime"
)
type Application struct {
Controller ctrl.Manager
UserID string
Data *ApplicationData
Token string
}
type ApplicationData struct {
UUID string
Name string
Description string
Application string
Version string
Environemnt string
Config map[string]string
RawConfig string
}
// Create environment should create a new configmap in the user's namespace
// using a token that belongs to the user.
func (app *Application) Create(ctx context.Context) error {
log, err := logr.FromContext(ctx)
if err != nil {
zapLog, err := zap.NewDevelopment()
if err != nil {
panic(fmt.Sprintf("who watches the watchmen (%v)?", err))
}
log = zapr.NewLogger(zapLog)
}
app.Data.UUID = uuid.New().String()
app.Controller.GetClient()
conf := &rest.Config{
Host: "https://kubernetes.default.svc.cluster.local:443",
BearerToken: app.Token,
TLSClientConfig: rest.TLSClientConfig{
Insecure: true,
},
}
controller, err := ctrl.NewManager(conf, ctrl.Options{})
if err != nil {
return err
}
helmEntry := helm.NewHelm()
// TODO: It should be possible to use other repos
release := &helm.ReleaseData{
Name: app.Data.Name,
Chart: app.Data.Application,
Version: app.Data.Version,
RepositoryURL: "oci://registry.badhouseplants.net/softplayer/helm",
RepositoryKind: "oci",
RepositoryName: "softplayer",
}
formattedName := strings.ToLower(
b64.StdEncoding.EncodeToString(
[]byte(app.Data.Application + app.Data.Name + app.Data.Name + app.Data.Environemnt),
),
)[0:20]
goPath := os.TempDir() + "/softplayer/" + formattedName
if err := os.MkdirAll(goPath, 0o777); err != nil {
return err
}
path, err := helmEntry.PullChart(goPath, release)
if err != nil {
log.Error(err, "Couldn't pull the chart")
return consts.ErrSystemError
}
prettyCfgSupport := true
cfgSchema := map[string]*helmrelease.PrettyConfigSchema{}
cfgSchemaPath, err := os.ReadFile(fmt.Sprintf("%s/%s/config.yaml", goPath, path))
if err != nil {
log.Error(err, "Couldn't find the config file")
prettyCfgSupport = false
} else {
err = yaml.Unmarshal(cfgSchemaPath, cfgSchema)
if err != nil {
log.Error(err, "Couldn't parse the pretty config")
return err
}
}
cfg := &helmrelease.HelmRelease{
Helm: helmrelease.Helm{
Release: app.Data.Name,
Chart: helmrelease.Chart{
Name: app.Data.Application,
Version: app.Data.Version,
},
Repo: helmrelease.Repo{
URL: release.RepositoryURL,
Type: release.RepositoryKind,
},
},
Config: helmrelease.Config{},
}
if len(app.Data.Config) > 0 && prettyCfgSupport {
for key, val := range app.Data.Config {
value, ok := cfgSchema[key]
if !ok {
return fmt.Errorf("unsuported config entry: %s", key)
}
tmpl, err := template.New("prettyconfig").Parse(val)
if err != nil {
log.Error(err, "Coudln't build a tempalte for prettyconfig")
return consts.ErrSystemError
}
var tmplRes bytes.Buffer
if err := tmpl.Execute(&tmplRes, app.Data); err != nil {
log.Error(err, "Couldn't execute the prettyconfig template")
return consts.ErrSystemError
}
cfg.Config.Pretty = append(cfg.Config.Pretty, helmrelease.PrettyConfig{
Key: key,
Path: value.Path,
Value: tmplRes.String(),
})
}
} else if len(app.Data.RawConfig) > 0 {
cfg.Config.Raw = app.Data.RawConfig
}
cfgYaml, err := yaml.Marshal(cfg)
if err != nil {
log.Error(err, "Couldn't marshall a pretty config into a struct")
return consts.ErrSystemError
}
appSecret := corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: formattedName[0:20],
Namespace: app.UserID,
Labels: map[string]string{
"component": "install",
"kind": "action",
"environment": app.Data.Environemnt,
"uuid": app.Data.UUID,
},
},
StringData: map[string]string{
"values.yaml": string(cfgYaml),
},
}
if err := kube.Create(ctx, controller.GetClient(), &appSecret, false); err != nil {
log.Error(err, "Couldn't create a configmap")
return consts.ErrSystemError
}
return nil
}
func (app *Application) Delete(ctx context.Context) error {
log, err := logr.FromContext(ctx)
if err != nil {
zapLog, err := zap.NewDevelopment()
if err != nil {
panic(fmt.Sprintf("who watches the watchmen (%v)?", err))
}
log = zapr.NewLogger(zapLog)
}
app.Controller.GetClient()
conf := &rest.Config{
Host: "https://kubernetes.default.svc.cluster.local:443",
BearerToken: app.Token,
TLSClientConfig: rest.TLSClientConfig{
Insecure: true,
},
}
clientset, err := kubernetes.NewForConfig(conf)
if err != nil {
log.Error(err, "Couldn't create a new clientset")
return consts.ErrSystemError
}
configmaps, err := clientset.CoreV1().ConfigMaps(app.UserID).List(ctx, metav1.ListOptions{LabelSelector: fmt.Sprintf("uuid=%s", app.Data.UUID)})
if err != nil {
log.Error(err, "Couldn't list configmaps")
return consts.ErrSystemError
}
for _, cm := range configmaps.Items {
if err := clientset.CoreV1().ConfigMaps(app.UserID).Delete(ctx, cm.GetName(), *metav1.NewDeleteOptions(100)); err != nil {
log.Error(err, "Couldn't remove configmap", "name", cm.GetName(), "namespace", cm.GetNamespace())
return consts.ErrSystemError
}
}
return nil
}
// func (env *Environemnt) Update(ctx context.Context) error {
// if err := env.isNsVerified(ctx); err != nil {
// log.Println("Can't verify ns")
// return err
// }
// env.Controller.GetClient()
// conf := &rest.Config{
// Host: "https://kubernetes.default.svc.cluster.local:443",
// BearerToken: env.Token,
// TLSClientConfig: rest.TLSClientConfig{
// Insecure: true,
// },
// }
// controller, err := ctrl.NewManager(conf, ctrl.Options{})
// if err != nil {
// return err
// }
// oldEnv := &Environemnt{
// Controller: env.Controller,
// UserID: env.UserID,
// Token: env.Token,
// Data: &ApplicationData{
// UUID: env.Data.UUID,
// },
// }
// if err := oldEnv.Get(ctx); err != nil {
// return err
// }
// // Check whter immutable fields are changed
// if oldEnv.Data.Provider != env.Data.Provider {
// return errors.New("provider can't be changed")
// }
// if oldEnv.Data.Location != env.Data.Location {
// return errors.New("location can't be changed")
// }
// vars, err := env.Data.buildVars()
// if err != nil {
// return err
// }
// obj := corev1.ConfigMap{
// ObjectMeta: metav1.ObjectMeta{
// Name: env.Data.UUID,
// Namespace: env.UserID,
// Labels: map[string]string{
// "component": "bootstrap",
// "kind": "environment",
// },
// },
// Data: map[string]string{
// "name": env.Data.Name,
// "description": env.Data.Description,
// "vars": vars,
// },
// }
// if err := kube.Update(ctx, controller.GetClient(), &obj); err != nil {
// return err
// }
// return nil
// }
// func (*Environemnt) Delete(ctx context.Context) error {
// env.Controller.GetClient()
// conf := &rest.Config{
// Host: "https://kubernetes.default.svc.cluster.local:443",
// BearerToken: env.Token,
// TLSClientConfig: rest.TLSClientConfig{
// Insecure: true,
// },
// }
// controller, err := ctrl.NewManager(conf, ctrl.Options{})
// if err != nil {
// return err
// }
// obj := corev1.ConfigMap{
// ObjectMeta: metav1.ObjectMeta{
// Name: env.Data.UUID,
// Namespace: env.UserID,
// Labels: map[string]string{
// "component": "bootstrap",
// },
// },
// }
// if err := kube.Delete(ctx, controller.GetClient(), &obj, false); err != nil {
// return err
// }
// return nil
// }
// func (env *Environemnt) ListEnvs(ctx context.Context) ([]*Environemnt, error) {
// env.Controller.GetClient()
// conf := &rest.Config{
// Host: "https://kubernetes.default.svc.cluster.local:443",
// BearerToken: env.Token,
// TLSClientConfig: rest.TLSClientConfig{
// Insecure: true,
// },
// }
// clientset, err := kubernetes.NewForConfig(conf)
// if err != nil {
// return nil, err
// }
// configmaps, err := clientset.CoreV1().ConfigMaps(env.UserID).List(ctx, metav1.ListOptions{LabelSelector: "kind=environment"})
// if err != nil {
// return nil, err
// }
// result := []*Environemnt{}
// for _, cm := range configmaps.Items {
// i := &Environemnt{}
// data := &ApplicationData{
// UUID: cm.GetName(),
// }
// i.Token = env.Token
// i.UserID = env.UserID
// i.Data = data
// i.Controller = env.Controller
// if err := i.Get(ctx); err != nil {
// return nil, err
// }
// result = append(result, i)
// }
// return result, nil
// }
// func (env *Environemnt) Get(ctx context.Context) error {
// env.Controller.GetClient()
// conf := &rest.Config{
// Host: "https://kubernetes.default.svc.cluster.local:443",
// BearerToken: env.Token,
// TLSClientConfig: rest.TLSClientConfig{
// Insecure: true,
// },
// }
// clientset, err := kubernetes.NewForConfig(conf)
// if err != nil {
// return err
// }
// envData, err := clientset.CoreV1().ConfigMaps(env.UserID).Get(ctx, env.Data.UUID, metav1.GetOptions{})
// if err != nil {
// return err
// }
// res, err := godotenv.Unmarshal(envData.Data["vars"])
// if err != nil {
// return err
// }
// if val, ok := envData.Data["name"]; ok {
// env.Data.Name = val
// } else {
// env.Data.Name = ""
// }
// if val, ok := envData.Data["description"]; ok {
// env.Data.Description = val
// } else {
// env.Data.Description = ""
// }
// if val, ok := res["SP_PROVIDER"]; ok {
// env.Data.Provider = val
// } else {
// env.Data.Provider = ""
// }
// if val, ok := res["SP_KUBERNETES"]; ok {
// env.Data.Kubernetes = val
// } else {
// env.Data.Kubernetes = ""
// }
// if val, ok := res["SP_SERVER_TYPE"]; ok {
// env.Data.ServerType = val
// } else {
// env.Data.ServerType = ""
// }
// if val, ok := res["SP_SERVER_LOCATION"]; ok {
// env.Data.Location = val
// } else {
// env.Data.Location = ""
// }
// return nil
// }

View File

@@ -0,0 +1,184 @@
package controllers
import (
"context"
"encoding/json"
"errors"
"fmt"
"strings"
"time"
"github.com/golang-jwt/jwt/v5"
"github.com/google/uuid"
"github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/auth"
"github.com/redis/go-redis/v9"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
type TokenType string
const (
TokenTypeAccess TokenType = "access"
TokenTypeRefresh TokenType = "refresh"
)
var (
ErrUnknownTokenType = errors.New("token type unknown")
ErrInvalidToken = errors.New("invalid token")
)
type Claims struct {
UserID string `json:"user_id"`
TokenID string `json:"token_id"`
TokenType TokenType `json:"token_type"`
jwt.RegisteredClaims
}
type AuthController struct {
jwtSecret []byte
accessTTL time.Duration
refreshTTL time.Duration
redis *redis.Client
}
type contextKey string
const claimsContextKey contextKey = "jwt_claims"
func NewAuthController(jwtSecret []byte, accessTTL, refreshTTL time.Duration, redis *redis.Client) *AuthController {
return &AuthController{
jwtSecret: jwtSecret,
accessTTL: accessTTL,
refreshTTL: refreshTTL,
redis: redis,
}
}
// Write claims into context
func (a *AuthController) WithClaims(ctx context.Context, claims *Claims) context.Context {
return context.WithValue(ctx, claimsContextKey, claims)
}
// Extract claims from context
func (a *AuthController) ClaimsFromContext(ctx context.Context) (*Claims, error) {
claims, ok := ctx.Value(claimsContextKey).(*Claims)
if !ok || claims == nil {
return nil, errors.New("claims not found in context")
}
return claims, nil
}
func (a *AuthController) AuthInterceptorFN(ctx context.Context) (context.Context, error) {
tokenString, err := auth.AuthFromMD(ctx, "bearer")
if err != nil {
return nil, err
}
claims, err := a.ParseToken(tokenString)
if err != nil {
return nil, status.Error(codes.Unauthenticated, "Invalid JWT token")
}
if method, ok := grpc.Method(ctx); ok {
if claims.TokenType == TokenTypeRefresh && !strings.Contains(method, "RefreshToken") {
return nil, status.Error(codes.Unauthenticated, "Refresh token is not allowed for this method")
}
}
ctx = a.WithClaims(ctx, claims)
return ctx, nil
}
// Generate JWT token
func (a *AuthController) GenerateToken(userID string, tokenType TokenType) (token, tokenID string, err error) {
var expiresAt time.Time
notBefore := time.Now()
switch tokenType {
case TokenTypeAccess:
expiresAt = time.Now().Add(a.accessTTL)
case TokenTypeRefresh:
expiresAt = time.Now().Add(a.refreshTTL)
default:
return "", "", ErrUnknownTokenType
}
if tokenType != TokenTypeAccess && tokenType != TokenTypeRefresh {
return "", "", ErrUnknownTokenType
}
tokenID = uuid.New().String()
claims := Claims{
UserID: userID,
TokenID: tokenID,
TokenType: tokenType,
RegisteredClaims: jwt.RegisteredClaims{
Issuer: "",
Subject: "",
Audience: jwt.ClaimStrings{},
ExpiresAt: jwt.NewNumericDate(expiresAt),
NotBefore: jwt.NewNumericDate(notBefore),
IssuedAt: jwt.NewNumericDate(time.Now()),
ID: userID,
},
}
tokenJwt := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
token, err = tokenJwt.SignedString(a.jwtSecret)
if err != nil {
return "", "", err
}
return
}
func (a *AuthController) ParseToken(tokenStr string) (*Claims, error) {
token, err := jwt.ParseWithClaims(
tokenStr,
&Claims{},
func(token *jwt.Token) (interface{}, error) {
return a.jwtSecret, nil
},
)
if err != nil {
return nil, err
}
claims, ok := token.Claims.(*Claims)
if !ok || !token.Valid {
return nil, ErrInvalidToken
}
return claims, nil
}
type Session struct {
UserID string `json:"user_id"`
}
func redisSessionKey(input string) string {
return fmt.Sprintf("session:%s", input)
}
func (a *AuthController) SaveSession(ctx context.Context, tokenID string, session *Session) error {
sessionJson, err := json.Marshal(session)
if err != nil {
return err
}
if err := a.redis.Set(ctx, redisSessionKey(tokenID), string(sessionJson), a.refreshTTL).Err(); err != nil {
return err
}
return nil
}
func (a *AuthController) GetSession(ctx context.Context, tokenID string) (*Session, error) {
sessionRaw := a.redis.Get(ctx, redisSessionKey(tokenID)).Val()
if err := a.redis.Del(ctx, redisSessionKey(tokenID)).Err(); err != nil {
return nil, err
}
session := &Session{}
if err := json.Unmarshal([]byte(sessionRaw), session); err != nil {
return nil, err
}
return session, nil
}

View File

@@ -0,0 +1,57 @@
package controllers_test
import (
"testing"
"time"
"gitea.badhouseplants.net/softplayer/softplayer-backend/internal/controllers"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
)
var (
testAccessTTL = time.Second * 5
testRefreshTTL = time.Second * 20
testUserID = uuid.New().String()
)
func TestGenerateInvalidTokenType(t *testing.T) {
authCtrl := controllers.NewAuthController([]byte("test"), testAccessTTL, testRefreshTTL, nil)
token, _, err := authCtrl.GenerateToken(testUserID, "invalid_type")
assert.Equal(t, "", token)
assert.ErrorIs(t, controllers.ErrUnknownTokenType, err)
}
func TestGenerateValidateAccessToken(t *testing.T) {
authCtrl := controllers.NewAuthController([]byte("test"), testAccessTTL, testRefreshTTL, nil)
now := time.Now()
token, _, err := authCtrl.GenerateToken(testUserID, controllers.TokenTypeAccess)
assert.NoError(t, err)
assert.NotEmpty(t, token)
claims, err := authCtrl.ParseToken(token)
assert.NoError(t, err)
assert.Equal(t, testUserID, claims.UserID)
assert.NotEmpty(t, claims.TokenID)
assert.Equal(t, controllers.TokenTypeAccess, claims.TokenType)
assert.Equal(t, now.Add(testAccessTTL).Unix(), claims.ExpiresAt.Unix())
assert.Equal(t, now.Unix(), claims.IssuedAt.Unix())
assert.Equal(t, now.Unix(), claims.NotBefore.Unix())
}
func TestGenerateValidateRefreshToken(t *testing.T) {
authCtrl := controllers.NewAuthController([]byte("test"), testAccessTTL, testRefreshTTL, nil)
now := time.Now()
token, _, err := authCtrl.GenerateToken(testUserID, controllers.TokenTypeRefresh)
assert.NoError(t, err)
assert.NotEmpty(t, token)
claims, err := authCtrl.ParseToken(token)
assert.NoError(t, err)
assert.Equal(t, testUserID, claims.UserID)
assert.NotEmpty(t, claims.TokenID)
assert.Equal(t, controllers.TokenTypeRefresh, claims.TokenType)
assert.Equal(t, now.Add(testRefreshTTL).Unix(), claims.ExpiresAt.Unix())
assert.Equal(t, now.Unix(), claims.IssuedAt.Unix())
assert.Equal(t, now.Unix(), claims.NotBefore.Unix())
}

View File

@@ -1,127 +0,0 @@
package controllers
import (
"context"
"crypto/rand"
"errors"
"fmt"
"io"
"log"
"gitea.badhouseplants.net/softplayer/softplayer-backend/internal/helpers/email"
"gitea.badhouseplants.net/softplayer/softplayer-backend/internal/helpers/kube"
ctrl "sigs.k8s.io/controller-runtime"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
)
type EmailSvc struct {
Controller ctrl.Manager
Data EmailData
EmailConfig email.EmailConf
DevMode bool
}
type EmailData struct {
UserID string
Code string
}
func (svc *EmailSvc) SendVerification(ctx context.Context) error {
client := svc.Controller.GetClient()
userns := &corev1.Namespace{}
if err := client.Get(ctx, types.NamespacedName{
Name: svc.Data.UserID,
}, userns); err != nil {
return err
}
userName, ok := userns.Labels["username"]
if !ok {
return errors.New("user not found")
}
accountData := &corev1.Secret{}
if err := client.Get(ctx, types.NamespacedName{
Namespace: "softplayer-accounts",
Name: userName,
}, accountData); err != nil {
return err
}
if val, ok := userns.Labels["email-verified"]; ok && val == "true" {
return errors.New("email is already verified")
}
number := encodeToString(6)
svc.Data.Code = number
if !svc.DevMode {
emailContent := "Subject: Softplayer verification code\r\n" + "\r\n" + fmt.Sprintf("Your verification code is %s", number)
email := string(accountData.Data["email"])
if err := svc.EmailConfig.SendEmail(email, emailContent); err != nil {
return err
}
}
emailCode := corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "email-verification-code",
Namespace: svc.Data.UserID,
},
Data: map[string]string{
"code": number,
},
}
if err := kube.Create(ctx, client, &emailCode, true); err != nil {
return err
}
return nil
}
func (svc *EmailSvc) ConfirmVerification(ctx context.Context) error {
client := svc.Controller.GetClient()
emailCode := &corev1.ConfigMap{}
if err := client.Get(ctx, types.NamespacedName{
Namespace: svc.Data.UserID,
Name: "email-verification-code",
}, emailCode); err != nil {
return err
}
if svc.Data.Code != emailCode.Data["code"] {
log.Println(svc.Data.Code)
log.Println(emailCode.Data["code"])
return errors.New("wrong verification code")
}
if err := client.Delete(ctx, emailCode); err != nil {
return err
}
userns := &corev1.Namespace{}
if err := client.Get(ctx, types.NamespacedName{
Name: svc.Data.UserID,
}, userns); err != nil {
return err
}
userns.Labels["email-verified"] = "true"
if err := client.Update(ctx, userns); err != nil {
return err
}
return nil
}
func encodeToString(max int) string {
b := make([]byte, max)
n, err := io.ReadAtLeast(rand.Reader, b, max)
if n != max {
panic(err)
}
for i := 0; i < len(b); i++ {
b[i] = table[int(b[i])%len(table)]
}
return string(b)
}
var table = [...]byte{'1', '2', '3', '4', '5', '6', '7', '8', '9', '0'}

View File

@@ -1,413 +0,0 @@
package controllers
import (
"context"
"errors"
"fmt"
"strconv"
"strings"
"github.com/go-logr/logr"
"github.com/go-logr/zapr"
"github.com/google/uuid"
"github.com/joho/godotenv"
"go.uber.org/zap"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"gitea.badhouseplants.net/softplayer/softplayer-backend/internal/consts"
"gitea.badhouseplants.net/softplayer/softplayer-backend/internal/helpers/kube"
corev1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
ctrl "sigs.k8s.io/controller-runtime"
)
type Environemnt struct {
Controller ctrl.Manager
Config *rest.Config
UserID string
Data *EnvironemntData
Token string
}
type EnvironemntData struct {
UUID string
Name string
Description string
Provider string
Kubernetes string
Location string
ServerType string
DiskSize int
}
func (e *EnvironemntData) buildVars() (string, error) {
// Please make sure that the same variables are used by ansible
vars := fmt.Sprintf(`# -- Generated by the softplayer controller
SP_PROVIDER=%s
SP_KUBERNETES=%s
SP_SERVER_TYPE=%s
SP_SERVER_LOCATION=%s
SP_DISK_SIZE=%d`,
e.Provider,
e.Kubernetes,
e.ServerType,
e.Location,
e.DiskSize,
)
return vars, nil
}
// Check whether used has passed the email verification
func (env *Environemnt) isNsVerified(ctx context.Context) error {
log, err := logr.FromContext(ctx)
if err != nil {
zapLog, err := zap.NewDevelopment()
if err != nil {
panic(fmt.Sprintf("who watches the watchmen (%v)?", err))
}
log = zapr.NewLogger(zapLog)
}
clientset, err := kubernetes.NewForConfig(env.Config)
if err != nil {
log.Error(err, "Couldn't create a new clientset")
return consts.ErrSystemError
}
ns, err := clientset.CoreV1().Namespaces().Get(ctx, env.UserID, metav1.GetOptions{})
if err != nil {
log.Error(err, "Couldn't get a user's namespace")
if k8serrors.IsNotFound(err) {
err := errors.New("user not found by ID")
return status.Error(codes.NotFound, err.Error())
}
return consts.ErrSystemError
}
val, ok := ns.GetLabels()["email-verified"]
if !ok || val == "false" {
return errors.New("user email is not verified, can't create an new env")
}
return nil
}
// Create environment should create a new configmap in the user's namespace
// using a token that belongs to the user.
func (env *Environemnt) Create(ctx context.Context) error {
log, err := logr.FromContext(ctx)
if err != nil {
zapLog, err := zap.NewDevelopment()
if err != nil {
panic(fmt.Sprintf("who watches the watchmen (%v)?", err))
}
log = zapr.NewLogger(zapLog)
}
if err := env.isNsVerified(ctx); err != nil {
return status.Error(codes.Unauthenticated, err.Error())
}
// Prepare a new ID for a enironment
env.Data.UUID = uuid.New().String()
env.Controller.GetClient()
conf := &rest.Config{
Host: "https://kubernetes.default.svc.cluster.local:443",
BearerToken: env.Token,
TLSClientConfig: rest.TLSClientConfig{
Insecure: true,
},
}
controller, err := ctrl.NewManager(conf, ctrl.Options{})
if err != nil {
log.Error(err, "Couldn't init a controller")
return consts.ErrSystemError
}
vars, err := env.Data.buildVars()
if err != nil {
log.Error(err, "Couldn't build the environment's dotenv file", "environment_id", env.Data.UUID)
return consts.ErrSystemError
}
obj := corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: env.Data.UUID,
Namespace: env.UserID,
Labels: map[string]string{
"component": "bootstrap",
"kind": "environment",
},
},
Data: map[string]string{
"name": env.Data.Name,
"description": env.Data.Description,
"vars": vars,
},
}
if err := kube.Create(ctx, controller.GetClient(), &obj, false); err != nil {
log.Error(err, "Couln't create the environment's configmap", "environment_id", env.Data.UUID)
return consts.ErrSystemError
}
return nil
}
func (env *Environemnt) Update(ctx context.Context) error {
log, err := logr.FromContext(ctx)
if err != nil {
zapLog, err := zap.NewDevelopment()
if err != nil {
panic(fmt.Sprintf("who watches the watchmen (%v)?", err))
}
log = zapr.NewLogger(zapLog)
}
env.Controller.GetClient()
conf := &rest.Config{
Host: "https://kubernetes.default.svc.cluster.local:443",
BearerToken: env.Token,
TLSClientConfig: rest.TLSClientConfig{
Insecure: true,
},
}
controller, err := ctrl.NewManager(conf, ctrl.Options{})
if err != nil {
log.Error(err, "Couldn't init a controller")
return consts.ErrSystemError
}
oldEnv := &Environemnt{
Controller: env.Controller,
UserID: env.UserID,
Token: env.Token,
Data: &EnvironemntData{
UUID: env.Data.UUID,
},
}
if err := oldEnv.Get(ctx); err != nil {
log.Error(err, "Couldn't get environment's configmap", "environment_id", env.Data.UUID)
return consts.ErrSystemError
}
// Check whter immutable fields are changed
if oldEnv.Data.Provider != env.Data.Provider {
return errors.New("provider can't be changed")
}
if oldEnv.Data.Location != env.Data.Location {
return errors.New("location can't be changed")
}
vars, err := env.Data.buildVars()
if err != nil {
log.Error(err, "Couldn't build the environment's dotenv file", "environment_id", env.Data.UUID)
return consts.ErrSystemError
}
obj := corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: env.Data.UUID,
Namespace: env.UserID,
Labels: map[string]string{
"component": "bootstrap",
"kind": "environment",
},
},
Data: map[string]string{
"name": env.Data.Name,
"description": env.Data.Description,
"vars": vars,
},
}
if err := kube.Update(ctx, controller.GetClient(), &obj); err != nil {
log.Error(err, "Couln't update the environment's configmap", "environment_id", env.Data.UUID)
return consts.ErrSystemError
}
return nil
}
func (env *Environemnt) Delete(ctx context.Context) error {
log, err := logr.FromContext(ctx)
if err != nil {
zapLog, err := zap.NewDevelopment()
if err != nil {
panic(fmt.Sprintf("who watches the watchmen (%v)?", err))
}
log = zapr.NewLogger(zapLog)
}
env.Controller.GetClient()
conf := &rest.Config{
Host: "https://kubernetes.default.svc.cluster.local:443",
BearerToken: env.Token,
TLSClientConfig: rest.TLSClientConfig{
Insecure: true,
},
}
controller, err := ctrl.NewManager(conf, ctrl.Options{})
if err != nil {
log.Error(err, "couldn't init a controller")
return consts.ErrSystemError
}
obj := corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: env.Data.UUID,
Namespace: env.UserID,
Labels: map[string]string{
"component": "bootstrap",
},
},
}
if err := kube.Delete(ctx, controller.GetClient(), &obj, false); err != nil {
log.Error(err, "Couln't remove environment's configmap", "environment_id", env.Data.UUID)
return consts.ErrSystemError
}
return nil
}
func (env *Environemnt) List(ctx context.Context, searchString string) ([]*Environemnt, error) {
log, err := logr.FromContext(ctx)
if err != nil {
zapLog, err := zap.NewDevelopment()
if err != nil {
panic(fmt.Sprintf("who watches the watchmen (%v)?", err))
}
log = zapr.NewLogger(zapLog)
}
env.Controller.GetClient()
conf := &rest.Config{
Host: "https://kubernetes.default.svc.cluster.local:443",
BearerToken: env.Token,
TLSClientConfig: rest.TLSClientConfig{
Insecure: true,
},
}
clientset, err := kubernetes.NewForConfig(conf)
if err != nil {
log.Error(err, "Couldn't create a new clientset")
return nil, consts.ErrSystemError
}
configmaps, err := clientset.CoreV1().ConfigMaps(env.UserID).List(ctx, metav1.ListOptions{LabelSelector: "kind=environment"})
if err != nil {
log.Error(err, "Couldn't list configmaps")
return nil, consts.ErrSystemError
}
result := []*Environemnt{}
for _, cm := range configmaps.Items {
i := &Environemnt{}
data := &EnvironemntData{
UUID: cm.GetName(),
}
i.Token = env.Token
i.UserID = env.UserID
i.Data = data
i.Controller = env.Controller
if err := i.Get(ctx); err != nil {
log.Error(err, "Couldn't get an environment", "environment_id", i.Data.UUID)
return nil, consts.ErrSystemError
}
if len(searchString) > 0 {
if strings.Contains(i.Data.Name, searchString) {
result = append(result, i)
}
if strings.Contains(i.Data.Description, searchString) {
result = append(result, i)
}
} else {
result = append(result, i)
}
}
return result, nil
}
func (env *Environemnt) Get(ctx context.Context) error {
log, err := logr.FromContext(ctx)
if err != nil {
zapLog, err := zap.NewDevelopment()
if err != nil {
panic(fmt.Sprintf("who watches the watchmen (%v)?", err))
}
log = zapr.NewLogger(zapLog)
}
env.Controller.GetClient()
conf := &rest.Config{
Host: "https://kubernetes.default.svc.cluster.local:443",
BearerToken: env.Token,
TLSClientConfig: rest.TLSClientConfig{
Insecure: true,
},
}
clientset, err := kubernetes.NewForConfig(conf)
if err != nil {
log.Error(err, "Couldn't create a new clientset")
return consts.ErrSystemError
}
envData, err := clientset.CoreV1().ConfigMaps(env.UserID).Get(ctx, env.Data.UUID, metav1.GetOptions{})
if err != nil {
log.Error(err, "Couldn't get an environment's configmap", "environment_id", env.Data.UUID)
return consts.ErrSystemError
}
res, err := godotenv.Unmarshal(envData.Data["vars"])
if err != nil {
log.Error(err, "Couldn't parse environment's dotenv file from a configmap", "environment_id", env.Data.UUID)
return consts.ErrSystemError
}
if val, ok := envData.Data["name"]; ok {
env.Data.Name = val
} else {
env.Data.Name = ""
}
if val, ok := envData.Data["description"]; ok {
env.Data.Description = val
} else {
env.Data.Description = ""
}
if val, ok := res["SP_PROVIDER"]; ok {
env.Data.Provider = val
} else {
env.Data.Provider = ""
}
if val, ok := res["SP_KUBERNETES"]; ok {
env.Data.Kubernetes = val
} else {
env.Data.Kubernetes = ""
}
if val, ok := res["SP_SERVER_TYPE"]; ok {
env.Data.ServerType = val
} else {
env.Data.ServerType = ""
}
if val, ok := res["SP_SERVER_LOCATION"]; ok {
env.Data.Location = val
} else {
env.Data.Location = ""
}
if val, ok := res["SP_DISK_SIZE"]; ok {
intVal, err := strconv.Atoi(val)
if err != nil {
log.Error(err, "Couldn't parse disk size")
intVal = 0
}
env.Data.DiskSize = intVal
} else {
env.Data.Location = ""
}
return nil
}