Skip to content

Commit

Permalink
tmp
Browse files Browse the repository at this point in the history
  • Loading branch information
stuartwdouglas committed Nov 27, 2024
1 parent 1edf244 commit 34e3416
Show file tree
Hide file tree
Showing 5 changed files with 15 additions and 17 deletions.
4 changes: 2 additions & 2 deletions backend/provisioner/runner_scaling_provisioner.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ func provisionRunner(scaling scaling.RunnerScaling, client ftlv1connect.Controll
return nil, fmt.Errorf("failed to parse schema: %w", err)
}
logger.Debugf("provisioning runner: %s.%s for deployment %s", module, id, deployment)
err = scaling.StartDeployment(module, deployment, schema)
err = scaling.StartDeployment(ctx, module, deployment, schema)
if err != nil {
return nil, fmt.Errorf("failed to start deployment: %w", err)
}
Expand All @@ -63,7 +63,7 @@ func provisionRunner(scaling scaling.RunnerScaling, client ftlv1connect.Controll
DeploymentKey: deployment,
}
if previous != nil && previous.GetRunner().GetOutput().GetDeploymentKey() != deployment {
err := scaling.TerminateDeployment(module, previous.GetRunner().GetOutput().GetDeploymentKey())
err := scaling.TerminateDeployment(ctx, module, previous.GetRunner().GetOutput().GetDeploymentKey())
if err != nil {
logger.Errorf(err, "failed to terminate previous deployment")
}
Expand Down
12 changes: 4 additions & 8 deletions backend/provisioner/scaling/k8sscaling/k8s_scaling.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,6 @@ const serviceAccountTemplate = "serviceAccountTemplate"
var _ scaling.RunnerScaling = &k8sScaling{}

type k8sScaling struct {
ctx context.Context
disableIstio bool
controller string

Expand All @@ -52,8 +51,7 @@ type k8sScaling struct {
istioSecurity optional.Option[istioclient.Clientset]
}

func (r k8sScaling) StartDeployment(module string, deploymentKey string, sch *schema.Module) error {
ctx := r.ctx
func (r k8sScaling) StartDeployment(ctx context.Context, module string, deploymentKey string, sch *schema.Module) error {
logger := log.FromContext(ctx)
logger = logger.Module(module)
ctx = log.ContextWithLogger(ctx, logger)
Expand Down Expand Up @@ -81,10 +79,10 @@ func (r k8sScaling) StartDeployment(module string, deploymentKey string, sch *sc
}
}

func (r k8sScaling) TerminateDeployment(module string, deploymentKey string) error {
logger := log.FromContext(r.ctx)
func (r k8sScaling) TerminateDeployment(ctx context.Context, module string, deploymentKey string) error {
logger := log.FromContext(ctx)
logger = logger.Module(module)
ctx := log.ContextWithLogger(r.ctx, logger)
ctx = log.ContextWithLogger(ctx, logger)
logger.Debugf("Handling schema change for %s", deploymentKey)
deploymentClient := r.client.AppsV1().Deployments(r.namespace)
_, err := deploymentClient.Get(ctx, deploymentKey, v1.GetOptions{})
Expand Down Expand Up @@ -117,7 +115,6 @@ func (r k8sScaling) TerminateDeployment(module string, deploymentKey string) err
}

func (r k8sScaling) Start(ctx context.Context) error {

logger := log.FromContext(ctx).Scope("K8sScaling")
ctx = log.ContextWithLogger(ctx, logger)
clientset, err := CreateClientSet()
Expand Down Expand Up @@ -150,7 +147,6 @@ func (r k8sScaling) Start(ctx context.Context) error {
}

logger.Debugf("Using namespace %s", namespace)
r.ctx = ctx
r.client = clientset
r.namespace = namespace
r.knownDeployments = xsync.NewMapOf[string, bool]()
Expand Down
4 changes: 2 additions & 2 deletions backend/provisioner/scaling/localscaling/local_scaling.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ type localScaling struct {
devModeEndpoints map[string]*devModeRunner
}

func (l *localScaling) StartDeployment(module string, deployment string, sch *schema.Module) error {
func (l *localScaling) StartDeployment(ctx context.Context, module string, deployment string, language *schema.Module) error {
if sch.Runtime == nil {
return nil
}
Expand Down Expand Up @@ -83,7 +83,7 @@ func (l *localScaling) setReplicas(module string, deployment string, language st
return l.reconcileRunners(ctx, deploymentRunners)
}

func (l *localScaling) TerminateDeployment(module string, deployment string) error {
func (l *localScaling) TerminateDeployment(ctx context.Context, module string, deployment string) error {
return l.setReplicas(module, deployment, "", 0)
}

Expand Down
4 changes: 2 additions & 2 deletions backend/provisioner/scaling/scaling.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ type RunnerScaling interface {

GetEndpointForDeployment(ctx context.Context, module string, deployment string) (optional.Option[url.URL], error)

StartDeployment(module string, deployment string, language *schema.Module) error
StartDeployment(ctx context.Context, module string, deployment string, sch *schema.Module) error

TerminateDeployment(module string, deployment string) error
TerminateDeployment(ctx context.Context, module string, deployment string) error
}
8 changes: 5 additions & 3 deletions deployment/Justfile
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,9 @@ setup-cluster: setup-registry
if [ -z "$(k3d cluster list | grep ftl)" ]; then
k3d cluster create ftl --api-port 6550 -p "8892:80@loadbalancer" -p "8891:80@loadbalancer" -p "8893:80@loadbalancer" --agents 2 \
--registry-use {{registry_full}} \
--registry-config '{{mirrors}}'
--registry-config '{{mirrors}}'\
--k3s-arg '--kubelet-arg=eviction-hard=imagefs.available<1%,nodefs.available<1%@agent:*' \
--k3s-arg '--kubelet-arg=eviction-minimum-reclaim=imagefs.available=1%,nodefs.available=1%@agent:*'
fi
kubectl config set-context --current --namespace=default

Expand All @@ -60,8 +62,8 @@ setup-istio-cluster: setup-cluster
kubectl create namespace istio-system
helm repo add istio https://istio-release.storage.googleapis.com/charts
helm repo update
helm install istio-base istio/base -n istio-system --wait
helm install istiod istio/istiod -n istio-system --wait
helm install istio-base istio/base -n istio-system --wait --version 1.23.3
helm install istiod istio/istiod -n istio-system --wait --version 1.23.3
fi
kubectl label namespace default istio-injection=enabled --overwrite
kubectl apply -f istio-access-logs.yaml
Expand Down

0 comments on commit 34e3416

Please sign in to comment.