A lot of thigs are going on
All checks were successful
ci/woodpecker/push/build Pipeline was successful
All checks were successful
ci/woodpecker/push/build Pipeline was successful
Signed-off-by: Nikolai Rodionov <allanger@badhouseplants.net>
This commit is contained in:
@@ -3,7 +3,6 @@ package v1
|
||||
import (
|
||||
"context"
|
||||
|
||||
"gitea.badhouseplants.net/softplayer/softplayer-backend/internal/authorization"
|
||||
"gitea.badhouseplants.net/softplayer/softplayer-backend/internal/controllers"
|
||||
accounts "gitea.badhouseplants.net/softplayer/softplayer-go-proto/pkg/accounts/v1"
|
||||
"github.com/golang/protobuf/ptypes/empty"
|
||||
@@ -16,27 +15,27 @@ import (
|
||||
|
||||
func NewAccountAuthRPCImpl(
|
||||
accountsCtrl *controllers.AccountController,
|
||||
authorizationCtrl *authorization.AuthController,
|
||||
) *AccountsAuthServer {
|
||||
return &AccountsAuthServer{
|
||||
authorizationCtrl *controllers.AuthController,
|
||||
) *AccountsServer {
|
||||
return &AccountsServer{
|
||||
accountsCtrl: accountsCtrl,
|
||||
authorizationCtrl: authorizationCtrl,
|
||||
}
|
||||
}
|
||||
|
||||
type AccountsAuthServer struct {
|
||||
accounts.UnimplementedAccountsAuthServiceServer
|
||||
type AccountsServer struct {
|
||||
accounts.UnimplementedAccountsServiceServer
|
||||
accountsCtrl *controllers.AccountController
|
||||
authorizationCtrl *authorization.AuthController
|
||||
authorizationCtrl *controllers.AuthController
|
||||
}
|
||||
|
||||
func (a *AccountsAuthServer) RefreshToken(ctx context.Context, in *empty.Empty) (*empty.Empty, error) {
|
||||
func (a *AccountsServer) RefreshToken(ctx context.Context, in *empty.Empty) (*empty.Empty, error) {
|
||||
claims, err := a.authorizationCtrl.ClaimsFromContext(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Aborted, "Context is invalid")
|
||||
}
|
||||
|
||||
if claims.TokenType != authorization.TokenTypeRefresh {
|
||||
if claims.TokenType != controllers.TokenTypeRefresh {
|
||||
return nil, status.Error(codes.Unauthenticated, "Invalid token")
|
||||
}
|
||||
|
||||
@@ -49,17 +48,17 @@ func (a *AccountsAuthServer) RefreshToken(ctx context.Context, in *empty.Empty)
|
||||
return nil, status.Error(codes.Unauthenticated, "Invalid session")
|
||||
}
|
||||
|
||||
accessToken, _, err := a.authorizationCtrl.GenerateToken(session.UserID, authorization.TokenTypeAccess)
|
||||
accessToken, _, err := a.authorizationCtrl.GenerateToken(session.UserID, controllers.TokenTypeAccess)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Aborted, "Couldn't generate an access token")
|
||||
}
|
||||
|
||||
refreshToken, tokenID, err := a.authorizationCtrl.GenerateToken(session.UserID, authorization.TokenTypeRefresh)
|
||||
refreshToken, tokenID, err := a.authorizationCtrl.GenerateToken(session.UserID, controllers.TokenTypeRefresh)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Aborted, "Couldn't generate an access token")
|
||||
}
|
||||
|
||||
newSession := &authorization.Session{UserID: session.UserID}
|
||||
newSession := &controllers.Session{UserID: session.UserID}
|
||||
|
||||
if err := a.authorizationCtrl.SaveSession(ctx, tokenID, newSession); err != nil {
|
||||
return nil, status.Error(codes.Aborted, "Couldn't store session")
|
||||
@@ -3,7 +3,6 @@ package v1
|
||||
import (
|
||||
"context"
|
||||
|
||||
"gitea.badhouseplants.net/softplayer/softplayer-backend/internal/authorization"
|
||||
"gitea.badhouseplants.net/softplayer/softplayer-backend/internal/controllers"
|
||||
accounts "gitea.badhouseplants.net/softplayer/softplayer-go-proto/pkg/accounts/v1"
|
||||
"github.com/golang/protobuf/ptypes/empty"
|
||||
@@ -16,36 +15,36 @@ import (
|
||||
|
||||
func NewAccountNoAuthRPCImpl(
|
||||
accountsCtrl *controllers.AccountController,
|
||||
authorizationCtrl *authorization.AuthController,
|
||||
) *AccountsNoAuthServer {
|
||||
return &AccountsNoAuthServer{
|
||||
authorizationCtrl *controllers.AuthController,
|
||||
) *PublicAccountService {
|
||||
return &PublicAccountService{
|
||||
accountsCtrl: accountsCtrl,
|
||||
authorizationCtrl: authorizationCtrl,
|
||||
}
|
||||
}
|
||||
|
||||
type AccountsNoAuthServer struct {
|
||||
accounts.UnimplementedAccountsNoAuthServiceServer
|
||||
type PublicAccountService struct {
|
||||
accounts.UnimplementedPublicAccountsServiceServer
|
||||
accountsCtrl *controllers.AccountController
|
||||
authorizationCtrl *authorization.AuthController
|
||||
authorizationCtrl *controllers.AuthController
|
||||
}
|
||||
|
||||
func (a *AccountsNoAuthServer) SignIn(ctx context.Context, in *accounts.SignInRequest) (*empty.Empty, error) {
|
||||
func (a *PublicAccountService) SignIn(ctx context.Context, in *accounts.SignInRequest) (*empty.Empty, error) {
|
||||
id, err := a.accountsCtrl.Login(ctx, in.GetEmail(), in.GetPassword())
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Aborted, "Couldn't create a user")
|
||||
}
|
||||
accessToken, _, err := a.authorizationCtrl.GenerateToken(id, authorization.TokenTypeAccess)
|
||||
accessToken, _, err := a.authorizationCtrl.GenerateToken(id, controllers.TokenTypeAccess)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Aborted, "Couldn't generate an access token")
|
||||
}
|
||||
|
||||
refreshToken, tokenID, err := a.authorizationCtrl.GenerateToken(id, authorization.TokenTypeRefresh)
|
||||
refreshToken, tokenID, err := a.authorizationCtrl.GenerateToken(id, controllers.TokenTypeRefresh)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Aborted, "Couldn't generate an access token")
|
||||
}
|
||||
|
||||
session := &authorization.Session{UserID: id}
|
||||
session := &controllers.Session{UserID: id}
|
||||
|
||||
if err := a.authorizationCtrl.SaveSession(ctx, tokenID, session); err != nil {
|
||||
return nil, status.Error(codes.Aborted, "Couldn't store session")
|
||||
@@ -61,7 +60,7 @@ func (a *AccountsNoAuthServer) SignIn(ctx context.Context, in *accounts.SignInRe
|
||||
}
|
||||
|
||||
// Create a new account in Softplayer
|
||||
func (a *AccountsNoAuthServer) SignUp(ctx context.Context, in *accounts.SignUpRequest) (*empty.Empty, error) {
|
||||
func (a *PublicAccountService) SignUp(ctx context.Context, in *accounts.SignUpRequest) (*empty.Empty, error) {
|
||||
data := &controllers.AccountData{
|
||||
Password: in.GetPassword(),
|
||||
Email: in.GetEmail(),
|
||||
@@ -71,17 +70,17 @@ func (a *AccountsNoAuthServer) SignUp(ctx context.Context, in *accounts.SignUpRe
|
||||
return nil, status.Error(codes.Aborted, "Couldn't create a user")
|
||||
}
|
||||
|
||||
accessToken, _, err := a.authorizationCtrl.GenerateToken(id, authorization.TokenTypeAccess)
|
||||
accessToken, _, err := a.authorizationCtrl.GenerateToken(id, controllers.TokenTypeAccess)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Aborted, "Couldn't generate an access token")
|
||||
}
|
||||
|
||||
refreshToken, tokenID, err := a.authorizationCtrl.GenerateToken(id, authorization.TokenTypeRefresh)
|
||||
refreshToken, tokenID, err := a.authorizationCtrl.GenerateToken(id, controllers.TokenTypeRefresh)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Aborted, "Couldn't generate an access token")
|
||||
}
|
||||
|
||||
session := &authorization.Session{UserID: id}
|
||||
session := &controllers.Session{UserID: id}
|
||||
|
||||
if err := a.authorizationCtrl.SaveSession(ctx, tokenID, session); err != nil {
|
||||
return nil, status.Error(codes.Aborted, "Couldn't store session")
|
||||
@@ -3,7 +3,7 @@ package v1
|
||||
import (
|
||||
"context"
|
||||
|
||||
"gitea.badhouseplants.net/softplayer/softplayer-backend/internal/tools/logger"
|
||||
"gitea.badhouseplants.net/softplayer/softplayer-backend/internal/helpers/logger"
|
||||
test "gitea.badhouseplants.net/softplayer/softplayer-go-proto/pkg/test/v1"
|
||||
)
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ package v1
|
||||
import (
|
||||
"context"
|
||||
|
||||
"gitea.badhouseplants.net/softplayer/softplayer-backend/internal/tools/logger"
|
||||
"gitea.badhouseplants.net/softplayer/softplayer-backend/internal/helpers/logger"
|
||||
test "gitea.badhouseplants.net/softplayer/softplayer-go-proto/pkg/test/v1"
|
||||
)
|
||||
|
||||
|
||||
2
go.mod
2
go.mod
@@ -139,7 +139,7 @@ require (
|
||||
)
|
||||
|
||||
require (
|
||||
gitea.badhouseplants.net/softplayer/softplayer-go-proto v0.0.0-20260509192230-bf7467c36f59
|
||||
gitea.badhouseplants.net/softplayer/softplayer-go-proto v0.0.0-20260510170341-d06c827862ad
|
||||
github.com/golang/protobuf v1.5.4
|
||||
golang.org/x/net v0.49.0 // indirect
|
||||
golang.org/x/sys v0.40.0 // indirect
|
||||
|
||||
4
go.sum
4
go.sum
@@ -6,8 +6,8 @@ dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s=
|
||||
dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
|
||||
filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
|
||||
filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
|
||||
gitea.badhouseplants.net/softplayer/softplayer-go-proto v0.0.0-20260509192230-bf7467c36f59 h1:pI25/wjcfvX62PcxyZ/i7XPTxdyCV9tV34JFSWQxYNw=
|
||||
gitea.badhouseplants.net/softplayer/softplayer-go-proto v0.0.0-20260509192230-bf7467c36f59/go.mod h1:AgOh1lkPHyRgBf3/s1btKcAqke/33LbKYarTD13qeAg=
|
||||
gitea.badhouseplants.net/softplayer/softplayer-go-proto v0.0.0-20260510170341-d06c827862ad h1:kJPBhJxrCR5ttn0AtAK9NgUSixyFAHOH0Kcjo8Y+ijU=
|
||||
gitea.badhouseplants.net/softplayer/softplayer-go-proto v0.0.0-20260510170341-d06c827862ad/go.mod h1:AgOh1lkPHyRgBf3/s1btKcAqke/33LbKYarTD13qeAg=
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU=
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg=
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
package consts
|
||||
|
||||
import (
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
const (
|
||||
USERNAME_LABEL_KEY = "username"
|
||||
EMAIL_VERIFIED_LABEL_KEY = "email-verified"
|
||||
EMAIL_VERIFIED_LABEL_TRUE = "true"
|
||||
EMAIL_VERIFIED_LABEL_FALSE = "false"
|
||||
SOFTPLAYER_ACCOUNTS_NAMESPACE = "softplayer-accounts"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrSystemError = status.Error(codes.Internal, "a system error occured, we will try to fix it as soon as possible")
|
||||
)
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
"time"
|
||||
|
||||
"gitea.badhouseplants.net/softplayer/softplayer-backend/internal/helpers/hash"
|
||||
"gitea.badhouseplants.net/softplayer/softplayer-backend/internal/tools/logger"
|
||||
"gitea.badhouseplants.net/softplayer/softplayer-backend/internal/helpers/logger"
|
||||
"github.com/golang-jwt/jwt/v5"
|
||||
"github.com/google/uuid"
|
||||
"github.com/redis/go-redis/v9"
|
||||
|
||||
@@ -1,420 +0,0 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
b64 "encoding/base64"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
"gitea.badhouseplants.net/softplayer/softplayer-backend/internal/consts"
|
||||
"gitea.badhouseplants.net/softplayer/softplayer-backend/internal/helpers/helm"
|
||||
"gitea.badhouseplants.net/softplayer/softplayer-backend/internal/helpers/kube"
|
||||
"gitea.badhouseplants.net/softplayer/softplayer-backend/internal/types/helmrelease"
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/go-logr/zapr"
|
||||
"github.com/google/uuid"
|
||||
"go.uber.org/zap"
|
||||
"gopkg.in/yaml.v2"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
)
|
||||
|
||||
type Application struct {
|
||||
Controller ctrl.Manager
|
||||
UserID string
|
||||
Data *ApplicationData
|
||||
Token string
|
||||
}
|
||||
|
||||
type ApplicationData struct {
|
||||
UUID string
|
||||
Name string
|
||||
Description string
|
||||
Application string
|
||||
Version string
|
||||
Environemnt string
|
||||
Config map[string]string
|
||||
RawConfig string
|
||||
}
|
||||
|
||||
// Create environment should create a new configmap in the user's namespace
|
||||
// using a token that belongs to the user.
|
||||
func (app *Application) Create(ctx context.Context) error {
|
||||
log, err := logr.FromContext(ctx)
|
||||
if err != nil {
|
||||
zapLog, err := zap.NewDevelopment()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("who watches the watchmen (%v)?", err))
|
||||
}
|
||||
log = zapr.NewLogger(zapLog)
|
||||
}
|
||||
|
||||
app.Data.UUID = uuid.New().String()
|
||||
|
||||
app.Controller.GetClient()
|
||||
conf := &rest.Config{
|
||||
Host: "https://kubernetes.default.svc.cluster.local:443",
|
||||
BearerToken: app.Token,
|
||||
TLSClientConfig: rest.TLSClientConfig{
|
||||
Insecure: true,
|
||||
},
|
||||
}
|
||||
|
||||
controller, err := ctrl.NewManager(conf, ctrl.Options{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
helmEntry := helm.NewHelm()
|
||||
// TODO: It should be possible to use other repos
|
||||
release := &helm.ReleaseData{
|
||||
Name: app.Data.Name,
|
||||
Chart: app.Data.Application,
|
||||
Version: app.Data.Version,
|
||||
RepositoryURL: "oci://registry.badhouseplants.net/softplayer/helm",
|
||||
RepositoryKind: "oci",
|
||||
RepositoryName: "softplayer",
|
||||
}
|
||||
formattedName := strings.ToLower(
|
||||
b64.StdEncoding.EncodeToString(
|
||||
[]byte(app.Data.Application + app.Data.Name + app.Data.Name + app.Data.Environemnt),
|
||||
),
|
||||
)[0:20]
|
||||
|
||||
goPath := os.TempDir() + "/softplayer/" + formattedName
|
||||
if err := os.MkdirAll(goPath, 0o777); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
path, err := helmEntry.PullChart(goPath, release)
|
||||
if err != nil {
|
||||
log.Error(err, "Couldn't pull the chart")
|
||||
return consts.ErrSystemError
|
||||
}
|
||||
|
||||
prettyCfgSupport := true
|
||||
cfgSchema := map[string]*helmrelease.PrettyConfigSchema{}
|
||||
cfgSchemaPath, err := os.ReadFile(fmt.Sprintf("%s/%s/config.yaml", goPath, path))
|
||||
if err != nil {
|
||||
log.Error(err, "Couldn't find the config file")
|
||||
prettyCfgSupport = false
|
||||
} else {
|
||||
err = yaml.Unmarshal(cfgSchemaPath, cfgSchema)
|
||||
if err != nil {
|
||||
log.Error(err, "Couldn't parse the pretty config")
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
cfg := &helmrelease.HelmRelease{
|
||||
Helm: helmrelease.Helm{
|
||||
Release: app.Data.Name,
|
||||
Chart: helmrelease.Chart{
|
||||
Name: app.Data.Application,
|
||||
Version: app.Data.Version,
|
||||
},
|
||||
Repo: helmrelease.Repo{
|
||||
URL: release.RepositoryURL,
|
||||
Type: release.RepositoryKind,
|
||||
},
|
||||
},
|
||||
Config: helmrelease.Config{},
|
||||
}
|
||||
|
||||
if len(app.Data.Config) > 0 && prettyCfgSupport {
|
||||
for key, val := range app.Data.Config {
|
||||
value, ok := cfgSchema[key]
|
||||
if !ok {
|
||||
return fmt.Errorf("unsuported config entry: %s", key)
|
||||
}
|
||||
tmpl, err := template.New("prettyconfig").Parse(val)
|
||||
if err != nil {
|
||||
log.Error(err, "Coudln't build a tempalte for prettyconfig")
|
||||
return consts.ErrSystemError
|
||||
}
|
||||
var tmplRes bytes.Buffer
|
||||
if err := tmpl.Execute(&tmplRes, app.Data); err != nil {
|
||||
log.Error(err, "Couldn't execute the prettyconfig template")
|
||||
return consts.ErrSystemError
|
||||
}
|
||||
|
||||
cfg.Config.Pretty = append(cfg.Config.Pretty, helmrelease.PrettyConfig{
|
||||
Key: key,
|
||||
Path: value.Path,
|
||||
Value: tmplRes.String(),
|
||||
})
|
||||
}
|
||||
} else if len(app.Data.RawConfig) > 0 {
|
||||
cfg.Config.Raw = app.Data.RawConfig
|
||||
}
|
||||
|
||||
cfgYaml, err := yaml.Marshal(cfg)
|
||||
if err != nil {
|
||||
log.Error(err, "Couldn't marshall a pretty config into a struct")
|
||||
return consts.ErrSystemError
|
||||
}
|
||||
|
||||
appSecret := corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: formattedName[0:20],
|
||||
Namespace: app.UserID,
|
||||
Labels: map[string]string{
|
||||
"component": "install",
|
||||
"kind": "action",
|
||||
"environment": app.Data.Environemnt,
|
||||
"uuid": app.Data.UUID,
|
||||
},
|
||||
},
|
||||
StringData: map[string]string{
|
||||
"values.yaml": string(cfgYaml),
|
||||
},
|
||||
}
|
||||
|
||||
if err := kube.Create(ctx, controller.GetClient(), &appSecret, false); err != nil {
|
||||
log.Error(err, "Couldn't create a configmap")
|
||||
return consts.ErrSystemError
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (app *Application) Delete(ctx context.Context) error {
|
||||
log, err := logr.FromContext(ctx)
|
||||
if err != nil {
|
||||
zapLog, err := zap.NewDevelopment()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("who watches the watchmen (%v)?", err))
|
||||
}
|
||||
log = zapr.NewLogger(zapLog)
|
||||
}
|
||||
|
||||
app.Controller.GetClient()
|
||||
conf := &rest.Config{
|
||||
Host: "https://kubernetes.default.svc.cluster.local:443",
|
||||
BearerToken: app.Token,
|
||||
TLSClientConfig: rest.TLSClientConfig{
|
||||
Insecure: true,
|
||||
},
|
||||
}
|
||||
|
||||
clientset, err := kubernetes.NewForConfig(conf)
|
||||
if err != nil {
|
||||
log.Error(err, "Couldn't create a new clientset")
|
||||
return consts.ErrSystemError
|
||||
}
|
||||
|
||||
configmaps, err := clientset.CoreV1().ConfigMaps(app.UserID).List(ctx, metav1.ListOptions{LabelSelector: fmt.Sprintf("uuid=%s", app.Data.UUID)})
|
||||
if err != nil {
|
||||
log.Error(err, "Couldn't list configmaps")
|
||||
return consts.ErrSystemError
|
||||
}
|
||||
for _, cm := range configmaps.Items {
|
||||
if err := clientset.CoreV1().ConfigMaps(app.UserID).Delete(ctx, cm.GetName(), *metav1.NewDeleteOptions(100)); err != nil {
|
||||
log.Error(err, "Couldn't remove configmap", "name", cm.GetName(), "namespace", cm.GetNamespace())
|
||||
return consts.ErrSystemError
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// func (env *Environemnt) Update(ctx context.Context) error {
|
||||
// if err := env.isNsVerified(ctx); err != nil {
|
||||
// log.Println("Can't verify ns")
|
||||
// return err
|
||||
// }
|
||||
|
||||
// env.Controller.GetClient()
|
||||
// conf := &rest.Config{
|
||||
// Host: "https://kubernetes.default.svc.cluster.local:443",
|
||||
// BearerToken: env.Token,
|
||||
// TLSClientConfig: rest.TLSClientConfig{
|
||||
// Insecure: true,
|
||||
// },
|
||||
// }
|
||||
|
||||
// controller, err := ctrl.NewManager(conf, ctrl.Options{})
|
||||
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// oldEnv := &Environemnt{
|
||||
// Controller: env.Controller,
|
||||
// UserID: env.UserID,
|
||||
// Token: env.Token,
|
||||
// Data: &ApplicationData{
|
||||
// UUID: env.Data.UUID,
|
||||
// },
|
||||
// }
|
||||
|
||||
// if err := oldEnv.Get(ctx); err != nil {
|
||||
// return err
|
||||
// }
|
||||
|
||||
// // Check whter immutable fields are changed
|
||||
|
||||
// if oldEnv.Data.Provider != env.Data.Provider {
|
||||
// return errors.New("provider can't be changed")
|
||||
// }
|
||||
// if oldEnv.Data.Location != env.Data.Location {
|
||||
// return errors.New("location can't be changed")
|
||||
// }
|
||||
|
||||
// vars, err := env.Data.buildVars()
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// obj := corev1.ConfigMap{
|
||||
// ObjectMeta: metav1.ObjectMeta{
|
||||
// Name: env.Data.UUID,
|
||||
// Namespace: env.UserID,
|
||||
// Labels: map[string]string{
|
||||
// "component": "bootstrap",
|
||||
// "kind": "environment",
|
||||
// },
|
||||
// },
|
||||
// Data: map[string]string{
|
||||
// "name": env.Data.Name,
|
||||
// "description": env.Data.Description,
|
||||
// "vars": vars,
|
||||
// },
|
||||
// }
|
||||
|
||||
// if err := kube.Update(ctx, controller.GetClient(), &obj); err != nil {
|
||||
// return err
|
||||
// }
|
||||
|
||||
// return nil
|
||||
// }
|
||||
// func (*Environemnt) Delete(ctx context.Context) error {
|
||||
// env.Controller.GetClient()
|
||||
// conf := &rest.Config{
|
||||
// Host: "https://kubernetes.default.svc.cluster.local:443",
|
||||
// BearerToken: env.Token,
|
||||
// TLSClientConfig: rest.TLSClientConfig{
|
||||
// Insecure: true,
|
||||
// },
|
||||
// }
|
||||
|
||||
// controller, err := ctrl.NewManager(conf, ctrl.Options{})
|
||||
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// obj := corev1.ConfigMap{
|
||||
// ObjectMeta: metav1.ObjectMeta{
|
||||
// Name: env.Data.UUID,
|
||||
// Namespace: env.UserID,
|
||||
// Labels: map[string]string{
|
||||
// "component": "bootstrap",
|
||||
// },
|
||||
// },
|
||||
// }
|
||||
// if err := kube.Delete(ctx, controller.GetClient(), &obj, false); err != nil {
|
||||
// return err
|
||||
// }
|
||||
|
||||
// return nil
|
||||
// }
|
||||
|
||||
// func (env *Environemnt) ListEnvs(ctx context.Context) ([]*Environemnt, error) {
|
||||
// env.Controller.GetClient()
|
||||
// conf := &rest.Config{
|
||||
// Host: "https://kubernetes.default.svc.cluster.local:443",
|
||||
// BearerToken: env.Token,
|
||||
// TLSClientConfig: rest.TLSClientConfig{
|
||||
// Insecure: true,
|
||||
// },
|
||||
// }
|
||||
// clientset, err := kubernetes.NewForConfig(conf)
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
// configmaps, err := clientset.CoreV1().ConfigMaps(env.UserID).List(ctx, metav1.ListOptions{LabelSelector: "kind=environment"})
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
|
||||
// result := []*Environemnt{}
|
||||
// for _, cm := range configmaps.Items {
|
||||
// i := &Environemnt{}
|
||||
// data := &ApplicationData{
|
||||
// UUID: cm.GetName(),
|
||||
// }
|
||||
// i.Token = env.Token
|
||||
// i.UserID = env.UserID
|
||||
// i.Data = data
|
||||
// i.Controller = env.Controller
|
||||
// if err := i.Get(ctx); err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
// result = append(result, i)
|
||||
// }
|
||||
// return result, nil
|
||||
// }
|
||||
|
||||
// func (env *Environemnt) Get(ctx context.Context) error {
|
||||
// env.Controller.GetClient()
|
||||
// conf := &rest.Config{
|
||||
// Host: "https://kubernetes.default.svc.cluster.local:443",
|
||||
// BearerToken: env.Token,
|
||||
// TLSClientConfig: rest.TLSClientConfig{
|
||||
// Insecure: true,
|
||||
// },
|
||||
// }
|
||||
// clientset, err := kubernetes.NewForConfig(conf)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// envData, err := clientset.CoreV1().ConfigMaps(env.UserID).Get(ctx, env.Data.UUID, metav1.GetOptions{})
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
|
||||
// res, err := godotenv.Unmarshal(envData.Data["vars"])
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
|
||||
// if val, ok := envData.Data["name"]; ok {
|
||||
// env.Data.Name = val
|
||||
// } else {
|
||||
// env.Data.Name = ""
|
||||
// }
|
||||
|
||||
// if val, ok := envData.Data["description"]; ok {
|
||||
// env.Data.Description = val
|
||||
// } else {
|
||||
// env.Data.Description = ""
|
||||
// }
|
||||
|
||||
// if val, ok := res["SP_PROVIDER"]; ok {
|
||||
// env.Data.Provider = val
|
||||
// } else {
|
||||
// env.Data.Provider = ""
|
||||
// }
|
||||
// if val, ok := res["SP_KUBERNETES"]; ok {
|
||||
// env.Data.Kubernetes = val
|
||||
// } else {
|
||||
// env.Data.Kubernetes = ""
|
||||
// }
|
||||
// if val, ok := res["SP_SERVER_TYPE"]; ok {
|
||||
// env.Data.ServerType = val
|
||||
// } else {
|
||||
// env.Data.ServerType = ""
|
||||
// }
|
||||
// if val, ok := res["SP_SERVER_LOCATION"]; ok {
|
||||
// env.Data.Location = val
|
||||
// } else {
|
||||
// env.Data.Location = ""
|
||||
// }
|
||||
|
||||
// return nil
|
||||
// }
|
||||
@@ -1,4 +1,4 @@
|
||||
package authorization
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -1,10 +1,10 @@
|
||||
package authorization_test
|
||||
package controllers_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"gitea.badhouseplants.net/softplayer/softplayer-backend/internal/authorization"
|
||||
"gitea.badhouseplants.net/softplayer/softplayer-backend/internal/controllers"
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
@@ -16,16 +16,16 @@ var (
|
||||
)
|
||||
|
||||
func TestGenerateInvalidTokenType(t *testing.T) {
|
||||
authCtrl := authorization.NewAuthController([]byte("test"), testAccessTTL, testRefreshTTL, nil)
|
||||
authCtrl := controllers.NewAuthController([]byte("test"), testAccessTTL, testRefreshTTL, nil)
|
||||
token, _, err := authCtrl.GenerateToken(testUserID, "invalid_type")
|
||||
assert.Equal(t, "", token)
|
||||
assert.ErrorIs(t, authorization.ErrUnknownTokenType, err)
|
||||
assert.ErrorIs(t, controllers.ErrUnknownTokenType, err)
|
||||
}
|
||||
|
||||
func TestGenerateValidateAccessToken(t *testing.T) {
|
||||
authCtrl := authorization.NewAuthController([]byte("test"), testAccessTTL, testRefreshTTL, nil)
|
||||
authCtrl := controllers.NewAuthController([]byte("test"), testAccessTTL, testRefreshTTL, nil)
|
||||
now := time.Now()
|
||||
token, _, err := authCtrl.GenerateToken(testUserID, authorization.TokenTypeAccess)
|
||||
token, _, err := authCtrl.GenerateToken(testUserID, controllers.TokenTypeAccess)
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, token)
|
||||
|
||||
@@ -33,16 +33,16 @@ func TestGenerateValidateAccessToken(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, testUserID, claims.UserID)
|
||||
assert.NotEmpty(t, claims.TokenID)
|
||||
assert.Equal(t, authorization.TokenTypeAccess, claims.TokenType)
|
||||
assert.Equal(t, controllers.TokenTypeAccess, claims.TokenType)
|
||||
assert.Equal(t, now.Add(testAccessTTL).Unix(), claims.ExpiresAt.Unix())
|
||||
assert.Equal(t, now.Unix(), claims.IssuedAt.Unix())
|
||||
assert.Equal(t, now.Unix(), claims.NotBefore.Unix())
|
||||
}
|
||||
|
||||
func TestGenerateValidateRefreshToken(t *testing.T) {
|
||||
authCtrl := authorization.NewAuthController([]byte("test"), testAccessTTL, testRefreshTTL, nil)
|
||||
authCtrl := controllers.NewAuthController([]byte("test"), testAccessTTL, testRefreshTTL, nil)
|
||||
now := time.Now()
|
||||
token, _, err := authCtrl.GenerateToken(testUserID, authorization.TokenTypeRefresh)
|
||||
token, _, err := authCtrl.GenerateToken(testUserID, controllers.TokenTypeRefresh)
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, token)
|
||||
|
||||
@@ -50,7 +50,7 @@ func TestGenerateValidateRefreshToken(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, testUserID, claims.UserID)
|
||||
assert.NotEmpty(t, claims.TokenID)
|
||||
assert.Equal(t, authorization.TokenTypeRefresh, claims.TokenType)
|
||||
assert.Equal(t, controllers.TokenTypeRefresh, claims.TokenType)
|
||||
assert.Equal(t, now.Add(testRefreshTTL).Unix(), claims.ExpiresAt.Unix())
|
||||
assert.Equal(t, now.Unix(), claims.IssuedAt.Unix())
|
||||
assert.Equal(t, now.Unix(), claims.NotBefore.Unix())
|
||||
@@ -1,127 +0,0 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
|
||||
"gitea.badhouseplants.net/softplayer/softplayer-backend/internal/helpers/email"
|
||||
"gitea.badhouseplants.net/softplayer/softplayer-backend/internal/helpers/kube"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
type EmailSvc struct {
|
||||
Controller ctrl.Manager
|
||||
Data EmailData
|
||||
EmailConfig email.EmailConf
|
||||
DevMode bool
|
||||
}
|
||||
|
||||
type EmailData struct {
|
||||
UserID string
|
||||
Code string
|
||||
}
|
||||
|
||||
func (svc *EmailSvc) SendVerification(ctx context.Context) error {
|
||||
client := svc.Controller.GetClient()
|
||||
userns := &corev1.Namespace{}
|
||||
if err := client.Get(ctx, types.NamespacedName{
|
||||
Name: svc.Data.UserID,
|
||||
}, userns); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
userName, ok := userns.Labels["username"]
|
||||
if !ok {
|
||||
return errors.New("user not found")
|
||||
}
|
||||
accountData := &corev1.Secret{}
|
||||
if err := client.Get(ctx, types.NamespacedName{
|
||||
Namespace: "softplayer-accounts",
|
||||
Name: userName,
|
||||
}, accountData); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if val, ok := userns.Labels["email-verified"]; ok && val == "true" {
|
||||
return errors.New("email is already verified")
|
||||
}
|
||||
|
||||
number := encodeToString(6)
|
||||
svc.Data.Code = number
|
||||
if !svc.DevMode {
|
||||
emailContent := "Subject: Softplayer verification code\r\n" + "\r\n" + fmt.Sprintf("Your verification code is %s", number)
|
||||
email := string(accountData.Data["email"])
|
||||
if err := svc.EmailConfig.SendEmail(email, emailContent); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
emailCode := corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "email-verification-code",
|
||||
Namespace: svc.Data.UserID,
|
||||
},
|
||||
Data: map[string]string{
|
||||
"code": number,
|
||||
},
|
||||
}
|
||||
|
||||
if err := kube.Create(ctx, client, &emailCode, true); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (svc *EmailSvc) ConfirmVerification(ctx context.Context) error {
|
||||
client := svc.Controller.GetClient()
|
||||
emailCode := &corev1.ConfigMap{}
|
||||
if err := client.Get(ctx, types.NamespacedName{
|
||||
Namespace: svc.Data.UserID,
|
||||
Name: "email-verification-code",
|
||||
}, emailCode); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if svc.Data.Code != emailCode.Data["code"] {
|
||||
log.Println(svc.Data.Code)
|
||||
log.Println(emailCode.Data["code"])
|
||||
return errors.New("wrong verification code")
|
||||
}
|
||||
if err := client.Delete(ctx, emailCode); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
userns := &corev1.Namespace{}
|
||||
if err := client.Get(ctx, types.NamespacedName{
|
||||
Name: svc.Data.UserID,
|
||||
}, userns); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
userns.Labels["email-verified"] = "true"
|
||||
if err := client.Update(ctx, userns); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func encodeToString(max int) string {
|
||||
b := make([]byte, max)
|
||||
n, err := io.ReadAtLeast(rand.Reader, b, max)
|
||||
if n != max {
|
||||
panic(err)
|
||||
}
|
||||
for i := 0; i < len(b); i++ {
|
||||
b[i] = table[int(b[i])%len(table)]
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
var table = [...]byte{'1', '2', '3', '4', '5', '6', '7', '8', '9', '0'}
|
||||
@@ -1,413 +0,0 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/go-logr/zapr"
|
||||
"github.com/google/uuid"
|
||||
"github.com/joho/godotenv"
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
|
||||
"gitea.badhouseplants.net/softplayer/softplayer-backend/internal/consts"
|
||||
"gitea.badhouseplants.net/softplayer/softplayer-backend/internal/helpers/kube"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
)
|
||||
|
||||
type Environemnt struct {
|
||||
Controller ctrl.Manager
|
||||
Config *rest.Config
|
||||
UserID string
|
||||
Data *EnvironemntData
|
||||
Token string
|
||||
}
|
||||
|
||||
type EnvironemntData struct {
|
||||
UUID string
|
||||
Name string
|
||||
Description string
|
||||
Provider string
|
||||
Kubernetes string
|
||||
Location string
|
||||
ServerType string
|
||||
DiskSize int
|
||||
}
|
||||
|
||||
func (e *EnvironemntData) buildVars() (string, error) {
|
||||
// Please make sure that the same variables are used by ansible
|
||||
vars := fmt.Sprintf(`# -- Generated by the softplayer controller
|
||||
SP_PROVIDER=%s
|
||||
SP_KUBERNETES=%s
|
||||
SP_SERVER_TYPE=%s
|
||||
SP_SERVER_LOCATION=%s
|
||||
SP_DISK_SIZE=%d`,
|
||||
e.Provider,
|
||||
e.Kubernetes,
|
||||
e.ServerType,
|
||||
e.Location,
|
||||
e.DiskSize,
|
||||
)
|
||||
|
||||
return vars, nil
|
||||
}
|
||||
|
||||
// Check whether used has passed the email verification
|
||||
func (env *Environemnt) isNsVerified(ctx context.Context) error {
|
||||
log, err := logr.FromContext(ctx)
|
||||
if err != nil {
|
||||
zapLog, err := zap.NewDevelopment()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("who watches the watchmen (%v)?", err))
|
||||
}
|
||||
log = zapr.NewLogger(zapLog)
|
||||
}
|
||||
|
||||
clientset, err := kubernetes.NewForConfig(env.Config)
|
||||
if err != nil {
|
||||
log.Error(err, "Couldn't create a new clientset")
|
||||
return consts.ErrSystemError
|
||||
}
|
||||
ns, err := clientset.CoreV1().Namespaces().Get(ctx, env.UserID, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
log.Error(err, "Couldn't get a user's namespace")
|
||||
if k8serrors.IsNotFound(err) {
|
||||
err := errors.New("user not found by ID")
|
||||
return status.Error(codes.NotFound, err.Error())
|
||||
}
|
||||
return consts.ErrSystemError
|
||||
}
|
||||
|
||||
val, ok := ns.GetLabels()["email-verified"]
|
||||
if !ok || val == "false" {
|
||||
return errors.New("user email is not verified, can't create an new env")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create environment should create a new configmap in the user's namespace
|
||||
// using a token that belongs to the user.
|
||||
func (env *Environemnt) Create(ctx context.Context) error {
|
||||
log, err := logr.FromContext(ctx)
|
||||
if err != nil {
|
||||
zapLog, err := zap.NewDevelopment()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("who watches the watchmen (%v)?", err))
|
||||
}
|
||||
log = zapr.NewLogger(zapLog)
|
||||
}
|
||||
|
||||
if err := env.isNsVerified(ctx); err != nil {
|
||||
return status.Error(codes.Unauthenticated, err.Error())
|
||||
}
|
||||
|
||||
// Prepare a new ID for a enironment
|
||||
env.Data.UUID = uuid.New().String()
|
||||
|
||||
env.Controller.GetClient()
|
||||
conf := &rest.Config{
|
||||
Host: "https://kubernetes.default.svc.cluster.local:443",
|
||||
BearerToken: env.Token,
|
||||
TLSClientConfig: rest.TLSClientConfig{
|
||||
Insecure: true,
|
||||
},
|
||||
}
|
||||
|
||||
controller, err := ctrl.NewManager(conf, ctrl.Options{})
|
||||
if err != nil {
|
||||
log.Error(err, "Couldn't init a controller")
|
||||
return consts.ErrSystemError
|
||||
}
|
||||
|
||||
vars, err := env.Data.buildVars()
|
||||
if err != nil {
|
||||
log.Error(err, "Couldn't build the environment's dotenv file", "environment_id", env.Data.UUID)
|
||||
return consts.ErrSystemError
|
||||
}
|
||||
obj := corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: env.Data.UUID,
|
||||
Namespace: env.UserID,
|
||||
Labels: map[string]string{
|
||||
"component": "bootstrap",
|
||||
"kind": "environment",
|
||||
},
|
||||
},
|
||||
Data: map[string]string{
|
||||
"name": env.Data.Name,
|
||||
"description": env.Data.Description,
|
||||
"vars": vars,
|
||||
},
|
||||
}
|
||||
if err := kube.Create(ctx, controller.GetClient(), &obj, false); err != nil {
|
||||
log.Error(err, "Couln't create the environment's configmap", "environment_id", env.Data.UUID)
|
||||
return consts.ErrSystemError
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (env *Environemnt) Update(ctx context.Context) error {
|
||||
log, err := logr.FromContext(ctx)
|
||||
if err != nil {
|
||||
zapLog, err := zap.NewDevelopment()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("who watches the watchmen (%v)?", err))
|
||||
}
|
||||
log = zapr.NewLogger(zapLog)
|
||||
}
|
||||
|
||||
env.Controller.GetClient()
|
||||
conf := &rest.Config{
|
||||
Host: "https://kubernetes.default.svc.cluster.local:443",
|
||||
BearerToken: env.Token,
|
||||
TLSClientConfig: rest.TLSClientConfig{
|
||||
Insecure: true,
|
||||
},
|
||||
}
|
||||
|
||||
controller, err := ctrl.NewManager(conf, ctrl.Options{})
|
||||
if err != nil {
|
||||
log.Error(err, "Couldn't init a controller")
|
||||
return consts.ErrSystemError
|
||||
}
|
||||
oldEnv := &Environemnt{
|
||||
Controller: env.Controller,
|
||||
UserID: env.UserID,
|
||||
Token: env.Token,
|
||||
Data: &EnvironemntData{
|
||||
UUID: env.Data.UUID,
|
||||
},
|
||||
}
|
||||
|
||||
if err := oldEnv.Get(ctx); err != nil {
|
||||
log.Error(err, "Couldn't get environment's configmap", "environment_id", env.Data.UUID)
|
||||
return consts.ErrSystemError
|
||||
}
|
||||
|
||||
// Check whter immutable fields are changed
|
||||
|
||||
if oldEnv.Data.Provider != env.Data.Provider {
|
||||
return errors.New("provider can't be changed")
|
||||
}
|
||||
if oldEnv.Data.Location != env.Data.Location {
|
||||
return errors.New("location can't be changed")
|
||||
}
|
||||
|
||||
vars, err := env.Data.buildVars()
|
||||
if err != nil {
|
||||
log.Error(err, "Couldn't build the environment's dotenv file", "environment_id", env.Data.UUID)
|
||||
return consts.ErrSystemError
|
||||
}
|
||||
obj := corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: env.Data.UUID,
|
||||
Namespace: env.UserID,
|
||||
Labels: map[string]string{
|
||||
"component": "bootstrap",
|
||||
"kind": "environment",
|
||||
},
|
||||
},
|
||||
Data: map[string]string{
|
||||
"name": env.Data.Name,
|
||||
"description": env.Data.Description,
|
||||
"vars": vars,
|
||||
},
|
||||
}
|
||||
|
||||
if err := kube.Update(ctx, controller.GetClient(), &obj); err != nil {
|
||||
log.Error(err, "Couln't update the environment's configmap", "environment_id", env.Data.UUID)
|
||||
return consts.ErrSystemError
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
func (env *Environemnt) Delete(ctx context.Context) error {
|
||||
log, err := logr.FromContext(ctx)
|
||||
if err != nil {
|
||||
zapLog, err := zap.NewDevelopment()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("who watches the watchmen (%v)?", err))
|
||||
}
|
||||
log = zapr.NewLogger(zapLog)
|
||||
}
|
||||
|
||||
env.Controller.GetClient()
|
||||
conf := &rest.Config{
|
||||
Host: "https://kubernetes.default.svc.cluster.local:443",
|
||||
BearerToken: env.Token,
|
||||
TLSClientConfig: rest.TLSClientConfig{
|
||||
Insecure: true,
|
||||
},
|
||||
}
|
||||
|
||||
controller, err := ctrl.NewManager(conf, ctrl.Options{})
|
||||
if err != nil {
|
||||
log.Error(err, "couldn't init a controller")
|
||||
return consts.ErrSystemError
|
||||
}
|
||||
obj := corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: env.Data.UUID,
|
||||
Namespace: env.UserID,
|
||||
Labels: map[string]string{
|
||||
"component": "bootstrap",
|
||||
},
|
||||
},
|
||||
}
|
||||
if err := kube.Delete(ctx, controller.GetClient(), &obj, false); err != nil {
|
||||
log.Error(err, "Couln't remove environment's configmap", "environment_id", env.Data.UUID)
|
||||
return consts.ErrSystemError
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (env *Environemnt) List(ctx context.Context, searchString string) ([]*Environemnt, error) {
|
||||
log, err := logr.FromContext(ctx)
|
||||
if err != nil {
|
||||
zapLog, err := zap.NewDevelopment()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("who watches the watchmen (%v)?", err))
|
||||
}
|
||||
log = zapr.NewLogger(zapLog)
|
||||
}
|
||||
|
||||
env.Controller.GetClient()
|
||||
conf := &rest.Config{
|
||||
Host: "https://kubernetes.default.svc.cluster.local:443",
|
||||
BearerToken: env.Token,
|
||||
TLSClientConfig: rest.TLSClientConfig{
|
||||
Insecure: true,
|
||||
},
|
||||
}
|
||||
clientset, err := kubernetes.NewForConfig(conf)
|
||||
if err != nil {
|
||||
log.Error(err, "Couldn't create a new clientset")
|
||||
return nil, consts.ErrSystemError
|
||||
}
|
||||
configmaps, err := clientset.CoreV1().ConfigMaps(env.UserID).List(ctx, metav1.ListOptions{LabelSelector: "kind=environment"})
|
||||
if err != nil {
|
||||
log.Error(err, "Couldn't list configmaps")
|
||||
return nil, consts.ErrSystemError
|
||||
}
|
||||
|
||||
result := []*Environemnt{}
|
||||
for _, cm := range configmaps.Items {
|
||||
i := &Environemnt{}
|
||||
data := &EnvironemntData{
|
||||
UUID: cm.GetName(),
|
||||
}
|
||||
i.Token = env.Token
|
||||
i.UserID = env.UserID
|
||||
i.Data = data
|
||||
i.Controller = env.Controller
|
||||
if err := i.Get(ctx); err != nil {
|
||||
log.Error(err, "Couldn't get an environment", "environment_id", i.Data.UUID)
|
||||
return nil, consts.ErrSystemError
|
||||
}
|
||||
if len(searchString) > 0 {
|
||||
if strings.Contains(i.Data.Name, searchString) {
|
||||
result = append(result, i)
|
||||
}
|
||||
if strings.Contains(i.Data.Description, searchString) {
|
||||
result = append(result, i)
|
||||
}
|
||||
} else {
|
||||
result = append(result, i)
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (env *Environemnt) Get(ctx context.Context) error {
|
||||
log, err := logr.FromContext(ctx)
|
||||
if err != nil {
|
||||
zapLog, err := zap.NewDevelopment()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("who watches the watchmen (%v)?", err))
|
||||
}
|
||||
log = zapr.NewLogger(zapLog)
|
||||
}
|
||||
|
||||
env.Controller.GetClient()
|
||||
conf := &rest.Config{
|
||||
Host: "https://kubernetes.default.svc.cluster.local:443",
|
||||
BearerToken: env.Token,
|
||||
TLSClientConfig: rest.TLSClientConfig{
|
||||
Insecure: true,
|
||||
},
|
||||
}
|
||||
clientset, err := kubernetes.NewForConfig(conf)
|
||||
if err != nil {
|
||||
log.Error(err, "Couldn't create a new clientset")
|
||||
return consts.ErrSystemError
|
||||
}
|
||||
envData, err := clientset.CoreV1().ConfigMaps(env.UserID).Get(ctx, env.Data.UUID, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
log.Error(err, "Couldn't get an environment's configmap", "environment_id", env.Data.UUID)
|
||||
return consts.ErrSystemError
|
||||
}
|
||||
|
||||
res, err := godotenv.Unmarshal(envData.Data["vars"])
|
||||
if err != nil {
|
||||
log.Error(err, "Couldn't parse environment's dotenv file from a configmap", "environment_id", env.Data.UUID)
|
||||
return consts.ErrSystemError
|
||||
}
|
||||
|
||||
if val, ok := envData.Data["name"]; ok {
|
||||
env.Data.Name = val
|
||||
} else {
|
||||
env.Data.Name = ""
|
||||
}
|
||||
|
||||
if val, ok := envData.Data["description"]; ok {
|
||||
env.Data.Description = val
|
||||
} else {
|
||||
env.Data.Description = ""
|
||||
}
|
||||
|
||||
if val, ok := res["SP_PROVIDER"]; ok {
|
||||
env.Data.Provider = val
|
||||
} else {
|
||||
env.Data.Provider = ""
|
||||
}
|
||||
if val, ok := res["SP_KUBERNETES"]; ok {
|
||||
env.Data.Kubernetes = val
|
||||
} else {
|
||||
env.Data.Kubernetes = ""
|
||||
}
|
||||
if val, ok := res["SP_SERVER_TYPE"]; ok {
|
||||
env.Data.ServerType = val
|
||||
} else {
|
||||
env.Data.ServerType = ""
|
||||
}
|
||||
if val, ok := res["SP_SERVER_LOCATION"]; ok {
|
||||
env.Data.Location = val
|
||||
} else {
|
||||
env.Data.Location = ""
|
||||
}
|
||||
if val, ok := res["SP_DISK_SIZE"]; ok {
|
||||
intVal, err := strconv.Atoi(val)
|
||||
if err != nil {
|
||||
log.Error(err, "Couldn't parse disk size")
|
||||
intVal = 0
|
||||
}
|
||||
env.Data.DiskSize = intVal
|
||||
} else {
|
||||
env.Data.Location = ""
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,22 +0,0 @@
|
||||
package email
|
||||
|
||||
import (
|
||||
"net/smtp"
|
||||
)
|
||||
|
||||
type EmailConf struct {
|
||||
From string
|
||||
Password string
|
||||
SmtpHost string
|
||||
SmtpPort string
|
||||
}
|
||||
|
||||
func (e *EmailConf) SendEmail(to string, message string) error {
|
||||
messageByte := []byte(message)
|
||||
auth := smtp.PlainAuth("", e.From, e.Password, e.SmtpHost)
|
||||
|
||||
if err := smtp.SendMail(e.SmtpHost+":"+e.SmtpPort, auth, e.From, []string{to}, messageByte); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,178 +0,0 @@
|
||||
package helm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"gopkg.in/yaml.v2"
|
||||
"helm.sh/helm/v3/pkg/action"
|
||||
"helm.sh/helm/v3/pkg/chart/loader"
|
||||
"helm.sh/helm/v3/pkg/chartutil"
|
||||
"helm.sh/helm/v3/pkg/cli"
|
||||
"helm.sh/helm/v3/pkg/engine"
|
||||
"helm.sh/helm/v3/pkg/getter"
|
||||
"helm.sh/helm/v3/pkg/registry"
|
||||
"helm.sh/helm/v3/pkg/repo"
|
||||
)
|
||||
|
||||
type Helm struct{}
|
||||
|
||||
func NewHelm() Helmhelper {
|
||||
return &Helm{}
|
||||
}
|
||||
|
||||
func getDownloadDirPath(workdirPath string) string {
|
||||
return fmt.Sprintf("%s/.charts", workdirPath)
|
||||
}
|
||||
|
||||
func getChartDirPath(downloadDirPath string, release *ReleaseData) string {
|
||||
return fmt.Sprintf("%s/%s-%s-%s", downloadDirPath, release.RepositoryName, release.Chart, release.Version)
|
||||
|
||||
}
|
||||
|
||||
func (h *Helm) PullChart(workdirPath string, release *ReleaseData) (path string, err error) {
|
||||
downloadDirPath := getDownloadDirPath(workdirPath)
|
||||
if err := os.MkdirAll(downloadDirPath, 0777); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
config := new(action.Configuration)
|
||||
cl := cli.New()
|
||||
chartDir := getChartDirPath(downloadDirPath, release)
|
||||
_, err = os.Stat(chartDir)
|
||||
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return "", nil
|
||||
} else if os.IsNotExist(err) {
|
||||
if err := os.Mkdir(chartDir, 0777); err != nil {
|
||||
return "", err
|
||||
}
|
||||
registry, err := registry.NewClient()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var path string
|
||||
// Download the chart to the workdir
|
||||
if release.RepositoryKind != "oci" {
|
||||
r, err := repo.NewChartRepository(&repo.Entry{
|
||||
Name: release.RepositoryName,
|
||||
URL: release.RepositoryURL,
|
||||
}, getter.All(cl))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
path = r.Config.Name
|
||||
|
||||
} else {
|
||||
path = release.RepositoryURL
|
||||
}
|
||||
|
||||
client := action.NewPullWithOpts(action.WithConfig(config))
|
||||
|
||||
client.Untar = true
|
||||
client.UntarDir = workdirPath
|
||||
client.SetRegistryClient(registry)
|
||||
client.DestDir = workdirPath
|
||||
client.Settings = cl
|
||||
|
||||
chartRemote := fmt.Sprintf("%s/%s", path, release.Chart)
|
||||
logrus.Infof("trying to pull: %s", chartRemote)
|
||||
if _, err = client.Run(chartRemote); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
return release.Chart, nil
|
||||
}
|
||||
|
||||
func (h *Helm) FindLatestVersion(workdirPath string, release *ReleaseData) (version string, err error) {
|
||||
downloadDirPath := getDownloadDirPath(workdirPath)
|
||||
if err := os.MkdirAll(downloadDirPath, 0777); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
config := new(action.Configuration)
|
||||
cl := cli.New()
|
||||
chartDir := getChartDirPath(downloadDirPath, release)
|
||||
chartPath, err := h.PullChart(workdirPath, release)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
showAction := action.NewShowWithConfig(action.ShowChart, config)
|
||||
|
||||
res, err := showAction.LocateChart(fmt.Sprintf("%s/%s", chartDir, chartPath), cl)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
res, err = showAction.Run(res)
|
||||
if err != nil {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
chartData, err := chartFromString(res)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
logrus.Infof("the latest version of %s is %s", release.Chart, chartData.Version)
|
||||
versionedChartDir := getChartDirPath(downloadDirPath, release)
|
||||
os.Rename(chartDir, versionedChartDir)
|
||||
return chartData.Version, err
|
||||
}
|
||||
|
||||
func (h *Helm) RenderChart(workdirPath string, release *ReleaseData) error {
|
||||
downloadDirPath := getDownloadDirPath(workdirPath)
|
||||
chartDirPath := getChartDirPath(downloadDirPath, release)
|
||||
chartPath, err := getChartPathFromDir(chartDirPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Info(fmt.Sprintf("%s/%s", chartDirPath, chartPath))
|
||||
chartObj, err := loader.Load(fmt.Sprintf("%s/%s", chartDirPath, chartPath))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
values := chartutil.Values{}
|
||||
values["Values"] = chartObj.Values
|
||||
values["Release"] = map[string]string{
|
||||
"Name": release.Name,
|
||||
"Namespace": release.Namespace,
|
||||
}
|
||||
values["Capabilities"] = map[string]map[string]string{
|
||||
"KubeVersion": {
|
||||
"Version": "v1.27.9",
|
||||
"GitVersion": "v1.27.9",
|
||||
},
|
||||
}
|
||||
files, err := engine.Engine{Strict: false}.Render(chartObj, values)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Info(files)
|
||||
for file, data := range files {
|
||||
logrus.Infof("%s - %s", file, data)
|
||||
}
|
||||
logrus.Info("I'm here")
|
||||
return nil
|
||||
}
|
||||
|
||||
func getChartPathFromDir(downloadDir string) (file string, err error) {
|
||||
files, err := os.ReadDir(downloadDir)
|
||||
if err != nil {
|
||||
return "", err
|
||||
} else if len(files) == 0 {
|
||||
return "", fmt.Errorf("expected to have one file, got zero in a dir %s", downloadDir)
|
||||
} else if len(files) > 1 {
|
||||
return "", fmt.Errorf("expected to have only one file in a dir %s", downloadDir)
|
||||
}
|
||||
return files[0].Name(), nil
|
||||
}
|
||||
|
||||
func chartFromString(info string) (*ReleaseData, error) {
|
||||
releaseData := new(ReleaseData)
|
||||
if err := yaml.Unmarshal([]byte(info), &releaseData); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return releaseData, nil
|
||||
}
|
||||
@@ -1,18 +0,0 @@
|
||||
package helm
|
||||
|
||||
type Helmhelper interface {
|
||||
FindLatestVersion(workdirPath string, release *ReleaseData) (string, error)
|
||||
PullChart(workdirPath string, release *ReleaseData) (string, error)
|
||||
RenderChart(workdirPath string, release *ReleaseData) error
|
||||
}
|
||||
|
||||
type ReleaseData struct {
|
||||
Name string
|
||||
Chart string
|
||||
Namespace string
|
||||
Version string
|
||||
RepositoryName string
|
||||
RepositoryURL string
|
||||
RepositoryKind string
|
||||
ValuesData string
|
||||
}
|
||||
@@ -1,68 +0,0 @@
|
||||
package kube
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
func Create(ctx context.Context, client client.Client, obj client.Object, wait bool) error {
|
||||
if err := client.Create(ctx, obj); err != nil {
|
||||
return err
|
||||
}
|
||||
if wait {
|
||||
if err := WaitUntilCreated(ctx, client, obj, 10, time.Millisecond*50); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func Update(ctx context.Context, client client.Client, obj client.Object) error {
|
||||
if err := client.Update(ctx, obj); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func SetOwnerRef(ctx context.Context, client client.Client, obj client.Object, owner client.Object) client.Object {
|
||||
apiVersion := fmt.Sprintf("%s/%s", owner.GetObjectKind().GroupVersionKind().Group, owner.GetObjectKind().GroupVersionKind().Version)
|
||||
ownerReference := []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: apiVersion,
|
||||
Kind: owner.GetObjectKind().GroupVersionKind().GroupKind().Kind,
|
||||
Name: owner.GetName(),
|
||||
UID: owner.GetUID(),
|
||||
},
|
||||
}
|
||||
obj.SetOwnerReferences(ownerReference)
|
||||
return obj
|
||||
}
|
||||
|
||||
func WaitUntilCreated(ctx context.Context, client client.Client, obj client.Object, attemps int, timeout time.Duration) error {
|
||||
if err := client.Get(ctx, types.NamespacedName{
|
||||
Namespace: obj.GetNamespace(),
|
||||
Name: obj.GetName(),
|
||||
}, obj); err != nil {
|
||||
if attemps > 0 {
|
||||
time.Sleep(timeout)
|
||||
if err := WaitUntilCreated(ctx, client, obj, attemps-1, timeout); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func Delete(ctx context.Context, client client.Client, obj client.Object, wait bool) error {
|
||||
if err := client.Delete(ctx, obj); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
package kube_test
|
||||
@@ -1,25 +0,0 @@
|
||||
package infra
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
proto "gitea.badhouseplants.net/softplayer/softplayer-go-proto/pkg/environments/v1"
|
||||
)
|
||||
|
||||
type Providers interface {
|
||||
GetProviderName() string
|
||||
RawProviderName() string
|
||||
GetServerType(string) (string, error)
|
||||
GetServerLocation(string) (string, error)
|
||||
RawServerType(string) string
|
||||
RawServerLocation(string) string
|
||||
}
|
||||
|
||||
func GetProvider(provider string) (Providers, error) {
|
||||
switch provider {
|
||||
case proto.Provider_PROVIDER_HETZNER.String(), "hetzner":
|
||||
return &Hetzner{}, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown provider: %s", provider)
|
||||
}
|
||||
}
|
||||
@@ -1,100 +0,0 @@
|
||||
package infra
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
proto "gitea.badhouseplants.net/softplayer/softplayer-go-proto/pkg/environments/v1"
|
||||
)
|
||||
|
||||
type Hetzner struct{}
|
||||
|
||||
// GetProviderName implements Providers.
|
||||
func (h *Hetzner) GetProviderName() string {
|
||||
return "hetzner"
|
||||
}
|
||||
|
||||
// RawProviderName implements Providers.
|
||||
func (h *Hetzner) RawProviderName() string {
|
||||
return proto.Provider_PROVIDER_HETZNER.String()
|
||||
}
|
||||
|
||||
// RawServerLocation implements Providers.
|
||||
func (h *Hetzner) RawServerLocation(location string) string {
|
||||
switch location {
|
||||
case "ash":
|
||||
return proto.Location_LOCATION_HETZNER_ASHBURN.String()
|
||||
case "hil":
|
||||
return proto.Location_LOCATION_HETZNER_HILLSBORO.String()
|
||||
case "fsn1":
|
||||
return proto.Location_LOCATION_HETZNER_FALKENSTEIN.String()
|
||||
case "nbg1":
|
||||
return proto.Location_LOCATION_HETZNER_NUREMBERG.String()
|
||||
case "hel1":
|
||||
return proto.Location_LOCATION_HETZNER_HELSINKI.String()
|
||||
default:
|
||||
return proto.Location_LOCATION_UNSPECIFIED.String()
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// RawServerType implements Providers.
|
||||
func (h *Hetzner) RawServerType(kind string) string {
|
||||
switch kind {
|
||||
case "cpx21":
|
||||
return proto.ServerType_SERVER_TYPE_STARTER.String()
|
||||
case "cpx31":
|
||||
return proto.ServerType_SERVER_TYPE_REGULAR.String()
|
||||
case "cpx41":
|
||||
return proto.ServerType_SERVER_TYPE_PLUS.String()
|
||||
case "cpx51":
|
||||
return proto.ServerType_SERVER_TYPE_PRO.String()
|
||||
default:
|
||||
return proto.ServerType_SERVER_TYPE_UNSPECIFIED.String()
|
||||
}
|
||||
}
|
||||
|
||||
// GetServerLocation implements Providers.
|
||||
func (h *Hetzner) GetServerLocation(location string) (string, error) {
|
||||
if !strings.Contains(location, "HETZNER") {
|
||||
return "", fmt.Errorf("location isn't supported by hetzner: %s", location)
|
||||
}
|
||||
switch location {
|
||||
case proto.Location_LOCATION_HETZNER_ASHBURN.String():
|
||||
return "ash", nil
|
||||
case proto.Location_LOCATION_HETZNER_HILLSBORO.String():
|
||||
return "hil", nil
|
||||
case proto.Location_LOCATION_HETZNER_FALKENSTEIN.String():
|
||||
return "fsn1", nil
|
||||
case proto.Location_LOCATION_HETZNER_NUREMBERG.String():
|
||||
return "nbg1", nil
|
||||
case proto.Location_LOCATION_HETZNER_HELSINKI.String():
|
||||
return "hel1", nil
|
||||
default:
|
||||
return "", fmt.Errorf("unknown location: %s", location)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Hetzner) GetServerType(kind string) (serverType string, err error) {
|
||||
switch kind {
|
||||
case proto.ServerType_SERVER_TYPE_STARTER.String():
|
||||
serverType = "cpx21"
|
||||
return
|
||||
case proto.ServerType_SERVER_TYPE_REGULAR.String():
|
||||
serverType = "cpx31"
|
||||
return
|
||||
case proto.ServerType_SERVER_TYPE_PLUS.String():
|
||||
serverType = "cpx41"
|
||||
return
|
||||
case proto.ServerType_SERVER_TYPE_PRO.String():
|
||||
serverType = "cpx51"
|
||||
return
|
||||
case proto.ServerType_SERVER_TYPE_CUSTOM.String():
|
||||
err = errors.New("custom server types are not supported yet")
|
||||
return
|
||||
default:
|
||||
err = fmt.Errorf("unknown server type: %s", kind)
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
proto "gitea.badhouseplants.net/softplayer/softplayer-go-proto/pkg/environments/v1"
|
||||
)
|
||||
|
||||
type Kubernetes interface {
|
||||
GetKubernetesName() string
|
||||
RawKubernetesName() string
|
||||
}
|
||||
|
||||
func GetKubernetes(k8s string) (Kubernetes, error) {
|
||||
switch k8s {
|
||||
case proto.Kubernetes_KUBERNETES_K3S.String(), "k3s":
|
||||
return &K3s{}, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown provider: %s", k8s)
|
||||
}
|
||||
}
|
||||
@@ -1,15 +0,0 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
proto "gitea.badhouseplants.net/softplayer/softplayer-go-proto/pkg/environments/v1"
|
||||
)
|
||||
|
||||
type K3s struct{}
|
||||
|
||||
func (k *K3s) GetKubernetesName() string {
|
||||
return "k3s"
|
||||
}
|
||||
|
||||
func (k *K3s) RawKubernetesName() string {
|
||||
return proto.Kubernetes_KUBERNETES_K3S.String()
|
||||
}
|
||||
@@ -1,37 +0,0 @@
|
||||
package helmrelease
|
||||
|
||||
type Chart struct {
|
||||
Name string
|
||||
Version string
|
||||
}
|
||||
|
||||
type Repo struct {
|
||||
URL string
|
||||
Type string
|
||||
}
|
||||
|
||||
type PrettyConfig struct {
|
||||
Key string
|
||||
Path string
|
||||
Value string
|
||||
}
|
||||
|
||||
type Helm struct {
|
||||
Release string
|
||||
Chart Chart
|
||||
Repo Repo
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
Pretty []PrettyConfig
|
||||
Raw string
|
||||
}
|
||||
type HelmRelease struct {
|
||||
Helm Helm
|
||||
Config Config
|
||||
}
|
||||
|
||||
type PrettyConfigSchema struct {
|
||||
Description string
|
||||
Path string
|
||||
}
|
||||
18
main.go
18
main.go
@@ -10,9 +10,8 @@ import (
|
||||
"time"
|
||||
|
||||
v1 "gitea.badhouseplants.net/softplayer/softplayer-backend/api/v1"
|
||||
"gitea.badhouseplants.net/softplayer/softplayer-backend/internal/authorization"
|
||||
"gitea.badhouseplants.net/softplayer/softplayer-backend/internal/controllers"
|
||||
"gitea.badhouseplants.net/softplayer/softplayer-backend/internal/tools/logger"
|
||||
"gitea.badhouseplants.net/softplayer/softplayer-backend/internal/helpers/logger"
|
||||
accounts "gitea.badhouseplants.net/softplayer/softplayer-go-proto/pkg/accounts/v1"
|
||||
test "gitea.badhouseplants.net/softplayer/softplayer-go-proto/pkg/test/v1"
|
||||
"github.com/alecthomas/kong"
|
||||
@@ -160,13 +159,19 @@ func server(ctx context.Context, params Serve) error {
|
||||
}
|
||||
|
||||
authReqServices := func(ctx context.Context, callMeta interceptors.CallMeta) bool {
|
||||
return !strings.Contains(callMeta.Service, "NoAuth")
|
||||
serviceParts := strings.Split(callMeta.Service, ".")
|
||||
if len(serviceParts) == 0 {
|
||||
return false
|
||||
}
|
||||
serviceName := serviceParts[len(serviceParts)-1]
|
||||
fmt.Println(serviceName)
|
||||
return !strings.HasPrefix(serviceName, "Public")
|
||||
}
|
||||
rdb := redis.NewClient(&redis.Options{
|
||||
Addr: params.RedisHost,
|
||||
})
|
||||
|
||||
authInterceptor := authorization.NewAuthController(
|
||||
authInterceptor := controllers.NewAuthController(
|
||||
[]byte(params.JWTSecret),
|
||||
params.AccessTokenTTL,
|
||||
params.RefrestTokenTTL,
|
||||
@@ -195,8 +200,9 @@ func server(ctx context.Context, params Serve) error {
|
||||
JWTSecret: []byte(params.JWTSecret),
|
||||
Redis: rdb,
|
||||
}
|
||||
accounts.RegisterAccountsNoAuthServiceServer(grpcServer, v1.NewAccountNoAuthRPCImpl(accountCtrl, authInterceptor))
|
||||
accounts.RegisterAccountsAuthServiceServer(grpcServer, v1.NewAccountAuthRPCImpl(accountCtrl, authInterceptor))
|
||||
|
||||
accounts.RegisterPublicAccountsServiceServer(grpcServer, v1.NewAccountNoAuthRPCImpl(accountCtrl, authInterceptor))
|
||||
accounts.RegisterAccountsServiceServer(grpcServer, v1.NewAccountAuthRPCImpl(accountCtrl, authInterceptor))
|
||||
test.RegisterTestAuthServiceServer(grpcServer, v1.NewTestAuthRPCImpl())
|
||||
test.RegisterTestNoAuthServiceServer(grpcServer, v1.NewTestNoAuthRPCImpl())
|
||||
if err := grpcServer.Serve(lis); err != nil {
|
||||
|
||||
@@ -4,5 +4,5 @@ CREATE TABLE IF NOT EXISTS accounts (
|
||||
CHECK (email ~* '^[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,}$') UNIQUE,
|
||||
password_hash TEXT NOT NULL,
|
||||
email_verified BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
1
migrations/20260510174348_tokens_init.down.sql
Normal file
1
migrations/20260510174348_tokens_init.down.sql
Normal file
@@ -0,0 +1 @@
|
||||
DROP TABLE IF EXISTS tokens;
|
||||
10
migrations/20260510174348_tokens_init.up.sql
Normal file
10
migrations/20260510174348_tokens_init.up.sql
Normal file
@@ -0,0 +1,10 @@
|
||||
CREATE TABLE IF NOT EXISTS tokens (
|
||||
uuid UUID PRIMARY KEY,
|
||||
token_hash TEXT NOT NULL,
|
||||
user_id UUID NOT NULL,
|
||||
scopes JSONB NOT NULL DEFAULT '[]',
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
|
||||
last_used_at TIMESTAMPTZ,
|
||||
revoked_at TIMESTAMPTZ,
|
||||
expires_at TIMESTAMPTZ
|
||||
);
|
||||
3
migrations/20260510175121_accounts_timestamptz.down.sql
Normal file
3
migrations/20260510175121_accounts_timestamptz.down.sql
Normal file
@@ -0,0 +1,3 @@
|
||||
ALTER TABLE accounts
|
||||
ALTER COLUMN created_at TYPE TIMESTAMP
|
||||
USING created_at AT TIME ZONE 'UTC';
|
||||
3
migrations/20260510175121_accounts_timestamptz.up.sql
Normal file
3
migrations/20260510175121_accounts_timestamptz.up.sql
Normal file
@@ -0,0 +1,3 @@
|
||||
ALTER TABLE accounts
|
||||
ALTER COLUMN created_at TYPE TIMESTAMPTZ
|
||||
USING created_at AT TIME ZONE 'UTC';
|
||||
Binary file not shown.
Reference in New Issue
Block a user