diff --git a/config-ui/config_ui/api.py b/config-ui/config_ui/api.py index aab31e8..ff3fe25 100644 --- a/config-ui/config_ui/api.py +++ b/config-ui/config_ui/api.py @@ -19,6 +19,9 @@ def __init__(self, **config): self.app = web.Application(middlewares=[]) self.app.add_routes([web.post("/api/generate", self.generate)]) + self.app.add_routes([ + web.post("/api/generate/{platform}/{version}", self.generate) + ]) self.app.add_routes([web.get("/{tail:.*}", self.everything)]) self.ui = importlib.resources.files().joinpath("ui") @@ -84,12 +87,15 @@ async def everything(self, request): raise web.HTTPInternalServerError() def process( - self, config, version="0.12.5", platform="docker-compose", + self, config, version="0.0.0", platform="docker-compose", ): config = config.encode("utf-8") - gen = Generator(config, base=self.templates, version=version) + gen = Generator( + config, templates=self.templates, resources=self.resources, + version=version + ) path = self.templates.joinpath( f"config-to-{platform}.jsonnet" @@ -104,6 +110,18 @@ async def generate(self, request): logger.info("Generate...") + try: + platform = request.match_info["platform"] + except: + platform = "docker-compose" + + try: + version = request.match_info["version"] + except: + version = "0.0.0" + + logger.info(f"Generating for platform={platform} version={version}") + try: config = await request.text() @@ -115,62 +133,108 @@ async def generate(self, request): config = json.dumps(dec) except: # Incorrectly formatted stuff is not our problem, + logger.info(f"Bad JSON") return web.HTTPBadRequest() logger.info(f"Config: {config}") - processed = self.process(config) - y = yaml.dump(processed) - mem = BytesIO() + if platform in set(["docker-compose", "podman-compose"]): + return await self.generate_docker_compose( + "docker-compose", version, config + ) + elif platform in set(["minikube-k8s", "gcp-k8s"]): + return await self.generate_k8s( + platform, version, config + ) + else: + return web.HTTPBadRequest() - with zipfile.ZipFile(mem, mode='w') as out: + except Exception as e: + logging.error(f"Exception: {e}") + return web.HTTPInternalServerError() - def output(name, content): - logger.info(f"Adding {name}...") - out.writestr(name, content) + async def generate_docker_compose(self, platform, version, config): - fname = "docker-compose.yaml" + processed = self.process( + config, platform=platform, version=version + ) - output(fname, y) + y = yaml.dump(processed) - # Grafana config - path = self.resources.joinpath( - "grafana/dashboards/dashboard.json" - ) - res = path.read_text() - output("grafana/dashboards/dashboard.json", res) + mem = BytesIO() - path = self.resources.joinpath( - "grafana/provisioning/dashboard.yml" - ) - res = path.read_text() - output("grafana/provisioning/dashboard.yml", res) + with zipfile.ZipFile(mem, mode='w') as out: - path = self.resources.joinpath( - "grafana/provisioning/datasource.yml" - ) - res = path.read_text() - output("grafana/provisioning/datasource.yml", res) + def output(name, content): + logger.info(f"Adding {name}...") + out.writestr(name, content) - # Prometheus config - path = self.resources.joinpath( - "prometheus/prometheus.yml" - ) - res = path.read_text() - output("prometheus/prometheus.yml", res) + fname = "docker-compose.yaml" - logger.info("Generation complete.") + output(fname, y) - return web.Response( - body=mem.getvalue(), - content_type = "application/octet-stream" + # Grafana config + path = self.resources.joinpath( + "grafana/dashboards/dashboard.json" ) + res = path.read_text() + output("grafana/dashboards/dashboard.json", res) - except Exception as e: - logging.error(f"Exception: {e}") - return web.HTTPInternalServerError() + path = self.resources.joinpath( + "grafana/provisioning/dashboard.yml" + ) + res = path.read_text() + output("grafana/provisioning/dashboard.yml", res) + + path = self.resources.joinpath( + "grafana/provisioning/datasource.yml" + ) + res = path.read_text() + output("grafana/provisioning/datasource.yml", res) + + # Prometheus config + path = self.resources.joinpath( + "prometheus/prometheus.yml" + ) + res = path.read_text() + output("prometheus/prometheus.yml", res) + + logger.info("Generation complete.") + + return web.Response( + body=mem.getvalue(), + content_type = "application/octet-stream" + ) + + async def generate_k8s(self, platform, version, config): + + processed = self.process( + config, platform=platform, version=version + ) + + y = yaml.dump(processed) + + mem = BytesIO() + + with zipfile.ZipFile(mem, mode='w') as out: + + def output(name, content): + logger.info(f"Adding {name}...") + out.writestr(name, content) + + fname = "resources.yaml" + + output(fname, y) + + logger.info("Generation complete.") + + return web.Response( + body=mem.getvalue(), + content_type = "application/octet-stream" + ) def run(self): web.run_app(self.app, port=self.port) + diff --git a/config-ui/config_ui/generator.py b/config-ui/config_ui/generator.py index 0d0ec4c..2ef3f5c 100644 --- a/config-ui/config_ui/generator.py +++ b/config-ui/config_ui/generator.py @@ -11,14 +11,19 @@ class Generator: def __init__( - self, config, base=None, resources=None, + self, config, templates=None, resources=None, version="0.0.0" ): - if base: - self.base = base + if templates: + self.templates = templates else: - self.base = pathlib.Path("templates") + self.templates = pathlib.Path("templates") + + if resources: + self.resources = resources + else: + self.resources = pathlib.Path("resources") self.config = config self.version = f"\"{version}\"".encode("utf-8") @@ -33,21 +38,23 @@ def load(self, dir, filename): logger.debug("Request jsonnet: %s %s", dir, filename) if filename == "config.json" and dir == "": - path = self.base.joinpath(dir, filename) + path = self.templates.joinpath(dir, filename) return str(path), self.config if filename == "version.jsonnet": - path = self.base.joinpath(dir, filename) + path = self.templates.joinpath(dir, filename) return str(path), self.version if dir: candidates = [ - self.base.joinpath(dir, filename), - self.base.joinpath(filename) + self.templates.joinpath(dir, filename), + self.templates.joinpath(filename), + self.resources.joinpath(dir, filename), + self.resources.joinpath(filename), ] else: candidates = [ - self.base.joinpath(filename) + self.templates.joinpath(filename) ] try: @@ -70,7 +77,7 @@ def load(self, dir, filename): except: - path = os.path.join(self.base, filename) + path = os.path.join(self.templates, filename) logger.debug("Try: %s", path) with open(path, "rb") as f: logger.debug("Loaded: %s", path) diff --git a/src/simple-editor/SimpleEditor.scss b/src/simple-editor/SimpleEditor.scss index fc7f1e7..f735be1 100644 --- a/src/simple-editor/SimpleEditor.scss +++ b/src/simple-editor/SimpleEditor.scss @@ -17,6 +17,7 @@ } .deployment { + max-width: 55rem; } .variable { @@ -28,3 +29,4 @@ pre { padding: 1rem; border: 1px solid #e0e0e0; } + diff --git a/src/simple-editor/deployment/Deployment.tsx b/src/simple-editor/deployment/Deployment.tsx index b9e5114..c6cd7fe 100644 --- a/src/simple-editor/deployment/Deployment.tsx +++ b/src/simple-editor/deployment/Deployment.tsx @@ -5,6 +5,7 @@ import { Box } from '@mui/material'; //import { useModelParamsStore } from '../state/ModelParams'; +import DeploymentPlatform from './DeploymentPlatform'; import DeploymentModel from './DeploymentModel'; import DeploymentConfig from './DeploymentConfig'; import DeploymentInstructions from './DeploymentInstructions'; @@ -19,16 +20,24 @@ const Deployment: React.FC = ({ <> - - - + - - - + + + + + + + + + + + + + + + - - diff --git a/src/simple-editor/deployment/DeploymentCode.tsx b/src/simple-editor/deployment/DeploymentCode.tsx new file mode 100644 index 0000000..102f966 --- /dev/null +++ b/src/simple-editor/deployment/DeploymentCode.tsx @@ -0,0 +1,18 @@ + +import React from 'react' + +interface DeploymentCodeProps extends React.PropsWithChildren { + children : React.ReactNode; +}; + +const DeploymentCode : React.FC = +({children}) => { + return ( +
+            {children}
+        
+ ); +} + +export default DeploymentCode; + diff --git a/src/simple-editor/deployment/DeploymentEnvVars.tsx b/src/simple-editor/deployment/DeploymentEnvVars.tsx new file mode 100644 index 0000000..d087fb7 --- /dev/null +++ b/src/simple-editor/deployment/DeploymentEnvVars.tsx @@ -0,0 +1,35 @@ + +import React from 'react' + +interface DeploymentEnvVarsProps { + variables : { + name : string; + value : string; + }[]; +}; + +const DeploymentEnvVars : React.FC = +({variables}) => { + + return ( +
+            {
+                variables.map(
+                    (va) => {
+                        return (
+                            
+                                export {va.name}=
+                                    {va.value}
+                                
+                                
+
+ ); + } + ) + } +
+ ); +} + +export default DeploymentEnvVars; + diff --git a/src/simple-editor/deployment/DeploymentInstructions.tsx b/src/simple-editor/deployment/DeploymentInstructions.tsx index ff68360..1b00434 100644 --- a/src/simple-editor/deployment/DeploymentInstructions.tsx +++ b/src/simple-editor/deployment/DeploymentInstructions.tsx @@ -4,25 +4,26 @@ import React from 'react'; import { Typography, Box, Paper, Stack, } from '@mui/material'; - import { RocketLaunch } from '@mui/icons-material'; -interface DeploymentInstructionsProps { -} +import { useModelParamsStore } from '../state/ModelParams'; +import DeploymentCode from './DeploymentCode'; +import DeploymentStep from './DeploymentStep'; -const DeploymentInstructions: React.FC = ({ -}) => { +const DeploymentInstructionsCompose = () => { + + const platform = useModelParamsStore((state) => state.platform); return ( <> - @@ -30,42 +31,99 @@ const DeploymentInstructions: React.FC = ({ - + When you download the deploy configuration, you will have a ZIP file containing all the configuration - needed to launch TrustGraph in Docker Compose or - Podman Compose. + needed to launch TrustGraph in + { + platform == "podman-compose" ? + " Podman Compose" : + " Docker Compose" + }. Unzip the ZIP file... - + -
unzip deploy.zip
-                    
+ + unzip deploy.zip + - + and launch... + - - -
docker compose -f docker-compose.yaml up -d
-                    
+ + {platform == "podman-compose" ? "podman compose" : "docker compose" } -f docker-compose.yaml up -d + - + If you are on Linux, running SELinux, you may need to change permissions on files in the deploy bundle so that they are accessible from within containers. This affects the grafana and prometheus directories. - + -
chcon -Rt svirt_sandbox_file_t grafana prometheus
- chmod 755 prometheus/ grafana/ grafana/*/
- chmod 644 prometheus/* grafana/*/* -
+ + chcon -Rt svirt_sandbox_file_t grafana prometheus
+ chmod 755 prometheus/ grafana/ grafana/*/
+ chmod 644 prometheus/* grafana/*/* +
+ +
+
+ + + + ); + +} + +const DeploymentInstructionsKube = () => { + + return ( + + <> + + + + + + + Launch + + + + + + When you download the deploy configuration, you will + have a ZIP file containing all the configuration + needed to launch TrustGraph on Kubernetes. + Unzip the ZIP file... + + + + + unzip deploy.zip + + + + + and launch... + + + + + kubectl apply -f resources.yaml + @@ -74,6 +132,18 @@ const DeploymentInstructions: React.FC = ({ ); +} + +const DeploymentInstructions: React.FC<{}> = ({ +}) => { + + const platform = useModelParamsStore((state) => state.platform); + + if (platform == "docker-compose" || platform == "podman-compose") + return ; + else + return ; + }; export default DeploymentInstructions; diff --git a/src/simple-editor/deployment/DeploymentModel.tsx b/src/simple-editor/deployment/DeploymentModel.tsx index 8eaf093..10dfb05 100644 --- a/src/simple-editor/deployment/DeploymentModel.tsx +++ b/src/simple-editor/deployment/DeploymentModel.tsx @@ -8,124 +8,29 @@ import { import { useModelParamsStore } from '../state/ModelParams'; -const getInstructions = (model : string) => { - if (model == "claude") { - return <> - - To use Anthropic Claude, you need a Claude API key. - Provide the Claude API key in an environment variable - when runnging the Docker Compose configuration. - -
export CLAUDE_KEY=
-            TOKEN-GOES-HERE
-            
- ; - } else if (model == "bedrock") { - return <> - - To use AWS Bedrock, you must have enabled models in the - AWS Bedrock console. You must also provide an - AWS access key ID and secret key. - -
export AWS_ID_KEY=
-            ID-KEY-HERE
-            
- export AWS_SECRET_KEY= - TOKEN-GOES-HERE -
- ; - } else if (model == "azure") { - return <> - - To use Azure Serverless APIs, you need to have a serverless - endpoint deployed, and you must also provide an endpoint - token as an environment variable. - -
export AZURE_ENDPOINT=
-            https://ENDPOINT.API.HOST.GOES.HERE/
-            
- export AZURE_TOKEN= - TOKEN-GOES-HERE -
- ; - } else if (model == "cohere") { - return <> - To use Cohere APIs, you need an API token which must - be provided in an environment variable. -
export COHERE_KEY=
-            TOKEN-GOES-HERE
-            
- ; - } else if (model == "llamafile") { - return <> - To use Llamafile, you must have a Llamafile services running - on an accessible host. The Llamafile host must be provided - in an environment variable. -
export LLAMAFILE_URL=
-            LLAMAFILE-URL
-            
- ; - } else if (model == "ollama") { - return <> - - The power of Ollama is the flexibility it provides in - Language Model deployments. Being able to run LMs with - Ollama enables fully secure AI TrustGraph pipelines - that aren't relying on any external APIs. No data is - leaving the host environment or network. - The Ollama service must be running, and have required - models available using ollama pull. - The Ollama service URL must be provided in an environment - variable. - -
export OLLAMA_HOST=
-            http://ollama-host:11434
-            
- - Replace the URL with the URL of your Ollama service. - - ; - } else if (model == "openai") { - return <> - To use OpenAI APIs, you need an API token which must - be provided in an environment variable. -
export OPENAI_KEY=
-            TOKEN-GOES-HERE
-            
- ; - } else if (model == "vertexai") { - return <> - To use VertexAI, you need to have a Google Cloud credential - file provisioned for a service account which has access to the - VertexAI services. This means signing up to GCP and using - an existing, or launching a new GCP project. - The GCP credential will be a JSON file - which should be stored in vertexai/private.json. - - - The credential file is mounted as a volume in Docker Compose, - which can cause issues with SELinux if you are running on Linux. - Make sure that Docker has access to volume files if this - affects you. - -
chcon -Rt svirt_sandbox_file_t vertexai/
-            
- ; +import DeploymentModelCompose from './DeploymentModelCompose'; +import DeploymentModelKube from './DeploymentModelKube'; + +interface DeploymentModelProps { +} + +const Platform = () => { + + const platform = useModelParamsStore((state) => state.platform); + + if (platform == "docker-compose" || platform == "podman-compose") { + return ; + } else if (platform == "minikube-k8s" || platform == "gcp-k8s") { + return ; } else { - return <> ; + return
Bunch
; } - -} -interface DeploymentModelProps { } const DeploymentModel: React.FC = ({ }) => { - const modelDeployment = useModelParamsStore((state) => state.modelDeployment); - - const instructions = getInstructions(modelDeployment); return ( @@ -136,12 +41,13 @@ const DeploymentModel: React.FC = ({ Model credentials - {instructions} +
diff --git a/src/simple-editor/deployment/DeploymentModelCompose.tsx b/src/simple-editor/deployment/DeploymentModelCompose.tsx new file mode 100644 index 0000000..e3c988f --- /dev/null +++ b/src/simple-editor/deployment/DeploymentModelCompose.tsx @@ -0,0 +1,194 @@ + +import React from 'react'; + +import { Typography } from '@mui/material'; + +import { useModelParamsStore } from '../state/ModelParams'; + +import DeploymentEnvVars from './DeploymentEnvVars'; +import DeploymentCode from './DeploymentCode'; +import DeploymentStep from './DeploymentStep'; + +const getInstructions = (model : string) => { + if (model == "claude") { + return <> + + To use Anthropic Claude, you need a Claude API key. + Provide the Claude API key in an environment variable + when runnging the Docker Compose configuration. + + + + + ; + } else if (model == "bedrock") { + return <> + + To use AWS Bedrock, you must have enabled models in the + AWS Bedrock console. You must also provide an + AWS access key ID and secret key. + + + + + ; + } else if (model == "azure") { + return <> + + To use Azure Serverless APIs, you need to have a serverless + endpoint deployed, and you must also provide an endpoint + token as an environment variable. + + + + + + ; + } else if (model == "cohere") { + return <> + To use Cohere APIs, you need an API token which must + be provided in an environment variable. + + + + + ; + } else if (model == "llamafile") { + return <> + To use Llamafile, you must have a Llamafile services running + on an accessible host. The Llamafile host must be provided + in an environment variable. + + + + + ; + } else if (model == "ollama") { + return <> + + The power of Ollama is the flexibility it provides in + Language Model deployments. Being able to run LMs with + Ollama enables fully secure AI TrustGraph pipelines + that aren't relying on any external APIs. No data is + leaving the host environment or network. + The Ollama service must be running, and have required + models available using ollama pull. + The Ollama service URL must be provided in an environment + variable. + + + + + + Replace the URL with the URL of your Ollama service. + + ; + } else if (model == "openai") { + return <> + To use OpenAI APIs, you need an API token which must + be provided in an environment variable. + + + + ; + } else if (model == "vertexai") { + return <> + + To use VertexAI, you need to have a Google Cloud credential + file provisioned for a service account which has access to the + VertexAI services. This means signing up to GCP and using + an existing, or launching a new GCP project. + The GCP credential will be a JSON file + which should be stored in vertexai/private.json. + + + The credential file is mounted as a volume in Docker Compose, + which can cause issues with SELinux if you are running on Linux. + Make sure that Docker has access to volume files if this + affects you. + + + chcon -Rt svirt_sandbox_file_t vertexai/ + + ; + } else { + return <> ; + } + +} + +const DeploymentModel: React.FC<{}> = ({ +}) => { + + const modelDeployment = useModelParamsStore((state) => state.modelDeployment); + + const instructions = getInstructions(modelDeployment); + + return ( + + <> + {instructions} + + + ); +}; + +export default DeploymentModel; + diff --git a/src/simple-editor/deployment/DeploymentModelKube.tsx b/src/simple-editor/deployment/DeploymentModelKube.tsx new file mode 100644 index 0000000..dd6f8e1 --- /dev/null +++ b/src/simple-editor/deployment/DeploymentModelKube.tsx @@ -0,0 +1,164 @@ + +import React from 'react'; + +import { Typography, Alert } from '@mui/material'; + +import { useModelParamsStore } from '../state/ModelParams'; + +import DeploymentCode from './DeploymentCode'; +import DeploymentStep from './DeploymentStep'; + +const getInstructions = (model : string) => { + if (model == "claude") { + return <> + + To use Anthropic Claude, you need a Claude API key. + Provide the Claude API key in a Kubernetes secret + before deploying the application. + + + + + kubectl -n trustgraph create secret \
+ {' '}generic claude-credentials \
+ {' '}--from-literal=claude-key=CLAUDE_KEY +
+ + ; + } else if (model == "bedrock") { + return <> + + To use AWS Bedrock, you must have enabled models in the + AWS Bedrock console. You must also provide an + AWS access key ID and secret key as a Kubernetes secret + before deploying the application. + + + + kubectl -n trustgraph create secret \
+ {' '}generic bedrock-credentials \
+ {' '}--from-literal=aws-id-key=AWS-ID-KEY \
+ {' '}--from-literal=aws-secret-key=AWS-SECRET-KEY +
+ + ; + } else if (model == "azure") { + return <> + + To use Azure Serverless APIs, you need to have a serverless + endpoint deployed. You must also provide + an Azure endpoint and token in a Kubernetes secret before + launching the application. + + + + kubectl -n trustgraph create secret \
+ {' '}generic azure-credentials \
+ {' '}--from-literal=azure-endpoint=AZURE-ENDPOINT \
+ {' '}--from-literal=azure-token=AZURE-TOKEN +
+ + ; + } else if (model == "cohere") { + return <> + To use Cohere APIs, you need an API token which must + be provided in a Kubernetes secret. + + + kubectl -n trustgraph create secret \
+ {' '}generic cohere-credentials \
+ {' '}--from-literal=cohere-key=COHERE-KEY +
+ + ; + } else if (model == "llamafile") { + return <> + To use Llamafile, you must have a Llamafile services running + on an accessible host. The Llamafile host must be provided + in a Kubernetes secret. + + + kubectl -n trustgraph create secret \
+ {' '}generic llamafile-credentials \
+ {' '}--from-literal=llamafile-url=http://llamafile:1234/ +
+ + ; + } else if (model == "ollama") { + return <> + + The Ollama service URL must be provided in a Kubernetes + secret. + + + + kubectl -n trustgraph \
+ {' '}create secret generic ollama-credentials \
+ {' '}--from-literal=ollama-host=http://ollama:11434/ +
+ + + Replace the URL with the URL of your Ollama service. + + ; + } else if (model == "openai") { + return <> + To use OpenAI APIs, you need an API token which must + be provided in a Kubernetes secret. + + + kubectl -n trustgraph create secret \
+ {' '}generic openai-credentials \
+ {' '}--from-literal=openai-key=OPENAI-KEY +
+ + ; + } else if (model == "vertexai") { + return <> + + To use VertexAI, you need to have a Google Cloud credential + file provisioned for a service account which has access to the + VertexAI services. This means signing up to GCP and using + an existing, or launching a new GCP project. + The GCP credential will be a JSON file + which would arrive in a file called private.json. + + + The private.json file should be loaded into Kubernetes as a + secret. + + + kubectl -n trustgraph create secret \
+ {' '}generic vertexai-creds --from-file=private.json=private.json +
+ + Google Cloud private.json files are secrets which potentially + provide access to all of your Google Cloud resources. + Take great care to ensure that the permissions of the + account are minimal, ideally scope to just AI services. + + ; + } else { + return <> ; + } + +} + +const DeploymentModelKube: React.FC<{}> = ({ +}) => { + + const modelDeployment = useModelParamsStore((state) => state.modelDeployment); + + const instructions = getInstructions(modelDeployment); + + return ( + + <> + {instructions} + + + ); +}; + +export default DeploymentModelKube; + diff --git a/src/simple-editor/deployment/DeploymentPlatform.tsx b/src/simple-editor/deployment/DeploymentPlatform.tsx new file mode 100644 index 0000000..617114c --- /dev/null +++ b/src/simple-editor/deployment/DeploymentPlatform.tsx @@ -0,0 +1,97 @@ + +import React from 'react'; + +import { + Alert +} from '@mui/material'; +import { Hub } from '@mui/icons-material'; + +import { useModelParamsStore } from '../state/ModelParams'; + +import DeploymentStep from './DeploymentStep'; +import DeploymentSection from './DeploymentSection'; + +const getSteps = (model : string) => { + if (model == "docker-compose") { + return <> + + You need to have Docker Compose installed. + See + Installing Docker Compose + . + + ; + } else if (model == "podman-compose") { + return <> + + You need to have the Podman environment and Podman Compose + installed. This should be available with your Linux + distriubution + See + Beginner's Guide to Using Podman Compose + . + + ; + } else if (model == "minikube-k8s") { + return <> + + You need to have the Minikube cluster installed and + running. + See + Minikube - Get Started! + . There is TrustGraph documentation on + Minikube here. + + ; + } else if (model == "gcp-k8s") { + return <> + + You need to have a Google Cloud account, and a + running GKE cluster. You also need to be authenticated + with the cluster and be able to see the cluster + state. + See Google Kubernetes Engine (GKE). + + ; + } else if (model == "pulumi-aws-ecs") { + return <> + + This is not properly implemented or documented. + + ; + } else { + return <> ; + } + +} + +interface DeploymentInstructionsProps { +} + +const DeploymentInstructions: React.FC = ({ +}) => { + + const platform = useModelParamsStore((state) => state.platform); + + const instructions = getSteps(platform); + + return ( + + <> + } + title="Platform preparation" + children={instructions} + /> + + + ); + +}; + +export default DeploymentInstructions; + diff --git a/src/simple-editor/deployment/DeploymentSection.tsx b/src/simple-editor/deployment/DeploymentSection.tsx new file mode 100644 index 0000000..eee2fce --- /dev/null +++ b/src/simple-editor/deployment/DeploymentSection.tsx @@ -0,0 +1,42 @@ + +import React from 'react' +import { Typography, Box, Paper, Stack } from '@mui/material'; + +interface DeploymentSectionProps extends React.PropsWithChildren { + avatar : React.ReactNode; + title : string; + children : React.ReactNode; +}; + +const DeploymentSection : React.FC = ({ + avatar, title, children +}) => { + return ( + + + + + {avatar} + {title} + + + {children} + + + ); + +} + +export default DeploymentSection; + diff --git a/src/simple-editor/deployment/DeploymentStep.tsx b/src/simple-editor/deployment/DeploymentStep.tsx new file mode 100644 index 0000000..e467940 --- /dev/null +++ b/src/simple-editor/deployment/DeploymentStep.tsx @@ -0,0 +1,19 @@ + +import React from 'react' + +import { Typography } from '@mui/material'; + +interface DeploymentStepProps extends React.PropsWithChildren { + children : React.ReactNode; +}; + +const DeploymentStep : React.FC = ({children}) => { + return ( + + {children} + + ); +} + +export default DeploymentStep; + diff --git a/src/simple-editor/deployment/PreparedConfig.tsx b/src/simple-editor/deployment/PreparedConfig.tsx index d0658e0..44d252d 100644 --- a/src/simple-editor/deployment/PreparedConfig.tsx +++ b/src/simple-editor/deployment/PreparedConfig.tsx @@ -47,20 +47,22 @@ const PreparedConfig = () => { Deployment configuration -
    -
  • Model deployment: {modelDeployment}
  • -
  • Model name: {modelName}
  • -
  • Graph store: {graphStore}
  • -
  • Vector DB: {vectorDB}
  • -
+ } sx={{ mt: 1, mb: 2}} severity="success" > Configuration generation was successful - diff --git a/src/simple-editor/generate-config.ts b/src/simple-editor/generate-config.ts index 8d620f2..716e0b7 100644 --- a/src/simple-editor/generate-config.ts +++ b/src/simple-editor/generate-config.ts @@ -104,8 +104,11 @@ export const generateConfig = const cnf = JSON.stringify(config, null, 4) + const platform = params.platform; + const version = params.trustgraphVersion; + return fetch( - "/api/generate", { + "/api/generate/" + platform + "/" + version, { body: cnf, method: "POST", headers: { diff --git a/src/simple-editor/model-params/ParamsForm.tsx b/src/simple-editor/model-params/ParamsForm.tsx index 78ff5e7..11c5a02 100644 --- a/src/simple-editor/model-params/ParamsForm.tsx +++ b/src/simple-editor/model-params/ParamsForm.tsx @@ -6,6 +6,7 @@ import { Box } from '@mui/material'; import GraphStore from './GraphStore'; import VectorDB from './VectorDB'; import Chunker from './Chunker'; +import Platform from './Platform'; import ModelDeployment from './ModelDeployment'; import ModelParameters from './ModelParameters'; @@ -52,6 +53,9 @@ const ParamsForm: React.FC = ({ const temperature = useModelParamsStore((state) => state.temperature); + const platform + = useModelParamsStore((state) => state.platform); + const maxOutputTokens = useModelParamsStore((state) => state.maxOutputTokens); @@ -82,6 +86,9 @@ const ParamsForm: React.FC = ({ const setMaxOutputTokens = useModelParamsStore((state) => state.setMaxOutputTokens); + const setPlatform + = useModelParamsStore((state) => state.setPlatform); + useModelParamsStore.subscribe( (n, o) => { @@ -103,6 +110,12 @@ const ParamsForm: React.FC = ({ + + + + void; +} + +const Platform: React.FC = ({ + value, onChange +}) => { + + return ( + + + + Platform + + + + + ); +}; + +export default Platform; + diff --git a/src/simple-editor/state/ModelParams.ts b/src/simple-editor/state/ModelParams.ts index 6a5f786..02aac84 100644 --- a/src/simple-editor/state/ModelParams.ts +++ b/src/simple-editor/state/ModelParams.ts @@ -1,4 +1,6 @@ +const TRUSTGRAPH_VERSION = "0.13.0"; + import { create } from 'zustand' export interface ModelParams { @@ -12,6 +14,8 @@ export interface ModelParams { modelName : string; temperature : number; maxOutputTokens : number; + platform : string; + trustgraphVersion : string; setGraphStore : (v : string) => void; setVectorDB : (v : string) => void; @@ -22,6 +26,7 @@ export interface ModelParams { setModelName : (v : string) => void; setTemperature : (v : number) => void; setMaxOutputTokens : (v : number) => void; + setPlatform : (v : string) => void; } @@ -37,6 +42,8 @@ export const useModelParamsStore = create()( modelName: "gemma2:9b", temperature: 0.3, maxOutputTokens: 1000, + platform: "docker-compose", + trustgraphVersion: TRUSTGRAPH_VERSION, setGraphStore: (v) => set(() => ({ graphStore: v, @@ -74,6 +81,10 @@ export const useModelParamsStore = create()( maxOutputTokens: v, })), + setPlatform: (v) => set(() => ({ + platform: v + })), + }) ); diff --git a/templates/components/azure.jsonnet b/templates/components/azure.jsonnet index 3ee819e..aacbeac 100644 --- a/templates/components/azure.jsonnet +++ b/templates/components/azure.jsonnet @@ -5,8 +5,6 @@ local prompts = import "prompts/mixtral.jsonnet"; { - "azure-token":: "${AZURE_TOKEN}", - "azure-endpoint":: "${AZURE_ENDPOINT}", "azure-max-output-tokens":: 4096, "azure-temperature":: 0.0, @@ -14,6 +12,10 @@ local prompts = import "prompts/mixtral.jsonnet"; create:: function(engine) + local envSecrets = engine.envSecrets("azure-credentials") + .with_env_var("AZURE_TOKEN", "azure-token") + .with_env_var("AZURE_ENDPOINT", "azure-endpoint"); + local container = engine.container("text-completion") .with_image(images.trustgraph) @@ -21,48 +23,22 @@ local prompts = import "prompts/mixtral.jsonnet"; "text-completion-azure", "-p", url.pulsar, - "-k", - $["azure-token"], - "-e", - $["azure-endpoint"], "-x", std.toString($["azure-max-output-tokens"]), "-t", std.toString($["azure-temperature"]), ]) + .with_env_var_secrets(envSecrets) .with_limits("0.5", "128M") .with_reservations("0.1", "128M"); - local containerSet = engine.containers( - "text-completion", [ container ] - ); - - local service = - engine.internalService(containerSet) - .with_port(8000, 8000, "metrics"); - - engine.resources([ - containerSet, - service, - ]) - - }, - - "text-completion-rag" +: { - - create:: function(engine) - - local container = + local containerRag = engine.container("text-completion-rag") .with_image(images.trustgraph) .with_command([ "text-completion-azure", "-p", url.pulsar, - "-k", - $["azure-token"], - "-e", - $["azure-endpoint"], "-x", std.toString($["azure-max-output-tokens"]), "-t", @@ -72,23 +48,34 @@ local prompts = import "prompts/mixtral.jsonnet"; "-o", "non-persistent://tg/response/text-completion-rag-response", ]) + .with_env_var_secrets(envSecrets) .with_limits("0.5", "128M") .with_reservations("0.1", "128M"); local containerSet = engine.containers( - "text-completion-rag", [ container ] + "text-completion", [ container ] + ); + + local containerSetRag = engine.containers( + "text-completion-rag", [ containerRag ] ); local service = engine.internalService(containerSet) .with_port(8000, 8000, "metrics"); + local serviceRag = + engine.internalService(containerSetRag) + .with_port(8000, 8000, "metrics"); + engine.resources([ + envSecrets, containerSet, + containerSetRag, service, + serviceRag, ]) - } } + prompts diff --git a/templates/components/bedrock.jsonnet b/templates/components/bedrock.jsonnet index 1c37562..11f112e 100644 --- a/templates/components/bedrock.jsonnet +++ b/templates/components/bedrock.jsonnet @@ -6,9 +6,6 @@ local chunker = import "chunker-recursive.jsonnet"; { - "aws-id-key":: "${AWS_ID_KEY}", - "aws-secret-key":: "${AWS_SECRET_KEY}", - "aws-region":: "us-west-2", "bedrock-max-output-tokens":: 4096, "bedrock-temperature":: 0.0, "bedrock-model":: "mistral.mixtral-8x7b-instruct-v0:1", @@ -17,6 +14,11 @@ local chunker = import "chunker-recursive.jsonnet"; create:: function(engine) + local envSecrets = engine.envSecrets("bedrock-credentials") + .with_env_var("AWS_ID_KEY", "aws-id-key") + .with_env_var("AWS_SECRET", "aws-secret") + .with_env_var("AWS_REGION", "aws-region"); + local container = engine.container("text-completion") .with_image(images.trustgraph) @@ -24,12 +26,6 @@ local chunker = import "chunker-recursive.jsonnet"; "text-completion-bedrock", "-p", url.pulsar, - "-z", - $["aws-id-key"], - "-k", - $["aws-secret-key"], - "-r", - $["aws-region"], "-x", std.toString($["bedrock-max-output-tokens"]), "-t", @@ -37,41 +33,17 @@ local chunker = import "chunker-recursive.jsonnet"; "-m", $["bedrock-model"], ]) + .with_env_var_secrets(envSecrets) .with_limits("0.5", "128M") .with_reservations("0.1", "128M"); - local containerSet = engine.containers( - "text-completion", [ container ] - ); - - local service = - engine.internalService(containerSet) - .with_port(8000, 8000, "metrics"); - - engine.resources([ - containerSet, - service, - ]) - - }, - - "text-completion-rag" +: { - - create:: function(engine) - - local container = + local containerRag = engine.container("text-completion-rag") .with_image(images.trustgraph) .with_command([ "text-completion-bedrock", "-p", url.pulsar, - "-z", - $["aws-id-key"], - "-k", - $["aws-secret-key"], - "-r", - $["aws-region"], "-x", std.toString($["bedrock-max-output-tokens"]), "-t", @@ -83,24 +55,35 @@ local chunker = import "chunker-recursive.jsonnet"; "-o", "non-persistent://tg/response/text-completion-rag-response", ]) + .with_env_var_secrets(envSecrets) .with_limits("0.5", "128M") .with_reservations("0.1", "128M"); local containerSet = engine.containers( - "text-completion-rag", [ container ] + "text-completion", [ container ] + ); + + local containerSetRag = engine.containers( + "text-completion-rag", [ containerRag ] ); local service = engine.internalService(containerSet) .with_port(8000, 8000, "metrics"); + local serviceRag = + engine.internalService(containerSetRag) + .with_port(8000, 8000, "metrics"); + engine.resources([ + envSecrets, containerSet, + containerSetRag, service, + serviceRag, ]) - - } + }, } + prompts + chunker diff --git a/templates/components/claude.jsonnet b/templates/components/claude.jsonnet index 0cd190d..b723a16 100644 --- a/templates/components/claude.jsonnet +++ b/templates/components/claude.jsonnet @@ -5,7 +5,6 @@ local prompts = import "prompts/mixtral.jsonnet"; { - "claude-key":: "${CLAUDE_KEY}", "claude-max-output-tokens":: 4096, "claude-temperature":: 0.0, @@ -13,6 +12,9 @@ local prompts = import "prompts/mixtral.jsonnet"; create:: function(engine) + local envSecrets = engine.envSecrets("claude-credentials") + .with_env_var("CLAUDE_KEY_TOKEN", "claude-key"); + local container = engine.container("text-completion") .with_image(images.trustgraph) @@ -20,44 +22,22 @@ local prompts = import "prompts/mixtral.jsonnet"; "text-completion-claude", "-p", url.pulsar, - "-k", - $["claude-key"], "-x", std.toString($["claude-max-output-tokens"]), "-t", std.toString($["claude-temperature"]), ]) + .with_env_var_secrets(envSecrets) .with_limits("0.5", "128M") .with_reservations("0.1", "128M"); - local containerSet = engine.containers( - "text-completion", [ container ] - ); - - local service = - engine.internalService(containerSet) - .with_port(8000, 8000, "metrics"); - - engine.resources([ - containerSet, - service, - ]) - - }, - - "text-completion-rag" +: { - - create:: function(engine) - - local container = + local containerRag = engine.container("text-completion-rag") .with_image(images.trustgraph) .with_command([ "text-completion-claude", "-p", url.pulsar, - "-k", - $["claude-key"], "-x", std.toString($["claude-max-output-tokens"]), "-t", @@ -67,24 +47,35 @@ local prompts = import "prompts/mixtral.jsonnet"; "-o", "non-persistent://tg/response/text-completion-rag-response", ]) + .with_env_var_secrets(envSecrets) .with_limits("0.5", "128M") .with_reservations("0.1", "128M"); local containerSet = engine.containers( - "text-completion-rag", [ container ] + "text-completion", [ container ] + ); + + local containerSetRag = engine.containers( + "text-completion-rag", [ containerRag ] ); local service = engine.internalService(containerSet) .with_port(8000, 8000, "metrics"); + local serviceRag = + engine.internalService(containerSetRag) + .with_port(8000, 8000, "metrics"); + engine.resources([ + envSecrets, containerSet, + containerSetRag, service, + serviceRag, ]) - - } + }, } + prompts diff --git a/templates/components/cohere.jsonnet b/templates/components/cohere.jsonnet index f05cb63..c2027f3 100644 --- a/templates/components/cohere.jsonnet +++ b/templates/components/cohere.jsonnet @@ -9,13 +9,15 @@ local prompts = import "prompts/mixtral.jsonnet"; "chunk-size":: 150, "chunk-overlap":: 10, - "cohere-key":: "${COHERE_KEY}", "cohere-temperature":: 0.0, "text-completion" +: { create:: function(engine) + local envSecrets = engine.envSecrets("cohere-credentials") + .with_env_var("COHERE_KEY", "cohere-key"); + local container = engine.container("text-completion") .with_image(images.trustgraph) @@ -23,42 +25,19 @@ local prompts = import "prompts/mixtral.jsonnet"; "text-completion-cohere", "-p", url.pulsar, - "-k", - $["cohere-key"], "-t", std.toString($["cohere-temperature"]), ]) .with_limits("0.5", "128M") .with_reservations("0.1", "128M"); - local containerSet = engine.containers( - "text-completion", [ container ] - ); - - local service = - engine.internalService(containerSet) - .with_port(8000, 8000, "metrics"); - - engine.resources([ - containerSet, - service, - ]) - - }, - - "text-completion-rag" +: { - - create:: function(engine) - - local container = + local containerRag = engine.container("text-completion-rag") .with_image(images.trustgraph) .with_command([ "text-completion-cohere", "-p", url.pulsar, - "-k", - $["cohere-key"], "-t", std.toString($["cohere-temperature"]), "-i", @@ -70,20 +49,30 @@ local prompts = import "prompts/mixtral.jsonnet"; .with_reservations("0.1", "128M"); local containerSet = engine.containers( - "text-completion-rag", [ container ] + "text-completion", [ container ] + ); + + local containerSetRag = engine.containers( + "text-completion-rag", [ containerRag ] ); local service = engine.internalService(containerSet) .with_port(8000, 8000, "metrics"); + local serviceRag = + engine.internalService(containerSetRag) + .with_port(8000, 8000, "metrics"); + engine.resources([ + envSecrets, containerSet, + containerSetRag, service, + serviceRag, ]) - - } + }, } + prompts diff --git a/templates/components/llamafile.jsonnet b/templates/components/llamafile.jsonnet index 93163a1..d51cda6 100644 --- a/templates/components/llamafile.jsonnet +++ b/templates/components/llamafile.jsonnet @@ -6,12 +6,14 @@ local prompts = import "prompts/slm.jsonnet"; { "llamafile-model":: "LLaMA_CPP", - "llamafile-url":: "${LLAMAFILE_URL}", "text-completion" +: { create:: function(engine) + local envSecrets = engine.envSecrets("llamafile-credentials") + .with_env_var("LLAMAFILE_URL", "llamafile-url"); + local container = engine.container("text-completion") .with_image(images.trustgraph) @@ -21,27 +23,12 @@ local prompts = import "prompts/slm.jsonnet"; url.pulsar, "-m", $["llamafile-model"], - "-r", - $["llamafile-url"], ]) + .with_env_var_secrets(envSecrets) .with_limits("0.5", "128M") .with_reservations("0.1", "128M"); - local containerSet = engine.containers( - "text-completion", [ container ] - ); - - engine.resources([ - containerSet, - ]) - - }, - - "text-completion-rag" +: { - - create:: function(engine) - - local container = + local containerRag = engine.container("text-completion-rag") .with_image(images.trustgraph) .with_command([ @@ -50,26 +37,40 @@ local prompts = import "prompts/slm.jsonnet"; url.pulsar, "-m", $["llamafile-model"], - "-r", - $["llamafile-url"], "-i", "non-persistent://tg/request/text-completion-rag", "-o", "non-persistent://tg/response/text-completion-rag-response", ]) + .with_env_var_secrets(envSecrets) .with_limits("0.5", "128M") .with_reservations("0.1", "128M"); local containerSet = engine.containers( - "text-completion-rag", [ container ] + "text-completion", [ container ] ); + local containerSetRag = engine.containers( + "text-completion-rag", [ containerRag ] + ); + + local service = + engine.internalService(containerSet) + .with_port(8080, 8080, "metrics"); + + local serviceRag = + engine.internalService(containerSetRag) + .with_port(8080, 8080, "metrics"); + engine.resources([ + envSecrets, containerSet, + containerSetRag, + service, + serviceRag, ]) - - } + }, } + prompts diff --git a/templates/components/ollama.jsonnet b/templates/components/ollama.jsonnet index 5b8f55d..2ae696b 100644 --- a/templates/components/ollama.jsonnet +++ b/templates/components/ollama.jsonnet @@ -6,12 +6,14 @@ local prompts = import "prompts/mixtral.jsonnet"; { "ollama-model":: "gemma2:9b", - "ollama-url":: "${OLLAMA_HOST}", "text-completion" +: { create:: function(engine) + local envSecrets = engine.envSecrets("ollama-credentials") + .with_env_var("OLLAMA_HOST", "ollama-host"); + local container = engine.container("text-completion") .with_image(images.trustgraph) @@ -21,32 +23,12 @@ local prompts = import "prompts/mixtral.jsonnet"; url.pulsar, "-m", $["ollama-model"], - "-r", - $["ollama-url"], ]) + .with_env_var_secrets(envSecrets) .with_limits("0.5", "128M") .with_reservations("0.1", "128M"); - local containerSet = engine.containers( - "text-completion", [ container ] - ); - - local service = - engine.internalService(containerSet) - .with_port(8080, 8080, "metrics"); - - engine.resources([ - containerSet, - service, - ]) - - }, - - "text-completion-rag" +: { - - create:: function(engine) - - local container = + local containerRag = engine.container("text-completion-rag") .with_image(images.trustgraph) .with_command([ @@ -55,31 +37,40 @@ local prompts = import "prompts/mixtral.jsonnet"; url.pulsar, "-m", $["ollama-model"], - "-r", - $["ollama-url"], "-i", "non-persistent://tg/request/text-completion-rag", "-o", "non-persistent://tg/response/text-completion-rag-response", ]) + .with_env_var_secrets(envSecrets) .with_limits("0.5", "128M") .with_reservations("0.1", "128M"); local containerSet = engine.containers( - "text-completion-rag", [ container ] + "text-completion", [ container ] + ); + + local containerSetRag = engine.containers( + "text-completion-rag", [ containerRag ] ); local service = engine.internalService(containerSet) .with_port(8080, 8080, "metrics"); + local serviceRag = + engine.internalService(containerSetRag) + .with_port(8080, 8080, "metrics"); + engine.resources([ + envSecrets, containerSet, + containerSetRag, service, + serviceRag, ]) - - } + }, } + prompts diff --git a/templates/components/openai.jsonnet b/templates/components/openai.jsonnet index 3d1a2b7..7429042 100644 --- a/templates/components/openai.jsonnet +++ b/templates/components/openai.jsonnet @@ -5,7 +5,6 @@ local prompts = import "prompts/mixtral.jsonnet"; { - "openai-key":: "${OPENAI_KEY}", "openai-max-output-tokens":: 4096, "openai-temperature":: 0.0, "openai-model":: "GPT-3.5-Turbo", @@ -14,6 +13,9 @@ local prompts = import "prompts/mixtral.jsonnet"; create:: function(engine) + local envSecrets = engine.envSecrets("openai-credentials") + .with_env_var("OPENAI_TOKEN", "openai-token"); + local container = engine.container("text-completion") .with_image(images.trustgraph) @@ -21,8 +23,6 @@ local prompts = import "prompts/mixtral.jsonnet"; "text-completion-openai", "-p", url.pulsar, - "-k", - $["openai-key"], "-x", std.toString($["openai-max-output-tokens"]), "-t", @@ -30,37 +30,17 @@ local prompts = import "prompts/mixtral.jsonnet"; "-m", $["openai-model"], ]) + .with_env_var_secrets(envSecrets) .with_limits("0.5", "128M") .with_reservations("0.1", "128M"); - local containerSet = engine.containers( - "text-completion", [ container ] - ); - - local service = - engine.internalService(containerSet) - .with_port(8080, 8080, "metrics"); - - engine.resources([ - containerSet, - service, - ]) - - }, - - "text-completion-rag" +: { - - create:: function(engine) - - local container = + local containerRag = engine.container("text-completion-rag") .with_image(images.trustgraph) .with_command([ "text-completion-openai", "-p", url.pulsar, - "-k", - $["openai-key"], "-x", std.toString($["openai-max-output-tokens"]), "-t", @@ -72,24 +52,35 @@ local prompts = import "prompts/mixtral.jsonnet"; "-o", "non-persistent://tg/response/text-completion-rag-response", ]) + .with_env_var_secrets(envSecrets) .with_limits("0.5", "128M") .with_reservations("0.1", "128M"); local containerSet = engine.containers( - "text-completion-rag", [ container ] + "text-completion", [ container ] + ); + + local containerSetRag = engine.containers( + "text-completion-rag", [ containerRag ] ); local service = engine.internalService(containerSet) .with_port(8080, 8080, "metrics"); + local serviceRag = + engine.internalService(containerSetRag) + .with_port(8080, 8080, "metrics"); + engine.resources([ + envSecrets, containerSet, + containerSetRag, service, + serviceRag, ]) - - } + }, } + prompts diff --git a/templates/engine/docker-compose.jsonnet b/templates/engine/docker-compose.jsonnet index 4f837ff..c37f1df 100644 --- a/templates/engine/docker-compose.jsonnet +++ b/templates/engine/docker-compose.jsonnet @@ -18,12 +18,15 @@ reservations: {}, ports: [], volumes: [], + environment: {}, with_image:: function(x) self + { image: x }, with_command:: function(x) self + { command: x }, - with_environment:: function(x) self + { environment: x }, + with_environment:: function(x) self + { + environment: super.environment + x, + }, with_limits:: function(c, m) self + { limits: { cpus: c, memory: m } }, @@ -45,6 +48,16 @@ ] }, + with_env_var_secrets:: + function(vars) + std.foldl( + function(obj, x) obj.with_environment( + { [x]: "${" + x + "}" } + ), + vars.variables, + self + ), + add:: function() { services +: { [container.name]: { @@ -62,7 +75,7 @@ { command: container.command } else {}) + - (if std.objectHas(container, "environment") then + (if ! std.isEmpty(container.environment) then { environment: container.environment } else {}) + @@ -170,6 +183,27 @@ }, + envSecrets:: function(name) + { + + local volume = self, + + name: name, + + volid:: name, + + variables:: [], + + with_env_var:: + function(name, key) self + { + variables: super.variables + [name], + }, + + add:: function() { + } + + }, + containers:: function(name, containers) { diff --git a/templates/engine/k8s.jsonnet b/templates/engine/k8s.jsonnet index 69aabfd..2fec0d1 100644 --- a/templates/engine/k8s.jsonnet +++ b/templates/engine/k8s.jsonnet @@ -10,12 +10,20 @@ reservations: {}, ports: [], volumes: [], + environment: [], with_image:: function(x) self + { image: x }, with_command:: function(x) self + { command: x }, - with_environment:: function(x) self + { environment: x }, + with_environment:: function(x) self + { + environment: super.environment + [ + { + name: v.key, value: v.value + } + for v in std.objectKeysValues(x) + ], + }, with_limits:: function(c, m) self + { limits: { cpu: c, memory: m } }, @@ -37,6 +45,24 @@ ] }, + with_env_var_secrets:: + function(vars) + std.foldl( + function(obj, x) obj + { + environment: super.environment + [{ + name: x, + valueFrom: { + secretKeyRef: { + name: vars.name, + key: vars.keyMap[x], + } + } + }] + }, + vars.variables, + self + ), + add:: function() [ { @@ -97,16 +123,11 @@ (if std.objectHas(container, "command") then { command: container.command } else {}) + - (if std.objectHas(container, "environment") then - { env: [ { - name: e.key, value: e.value - } - for e in - std.objectKeysValues( - container.environment - ) - ] - } + + (if ! std.isEmpty(container.environment) then + { + env: container.environment, + } else {}) + (if std.length(container.volumes) > 0 then @@ -283,6 +304,34 @@ }, + envSecrets:: function(name) + { + + local volume = self, + + name: name, + + variables: [], + keyMap: {}, + + with_size:: function(size) self + { size: size }, + + add:: function() [ + ], + + volRef:: function() { + name: volume.name, + secret: { secretName: volume.name }, + }, + + with_env_var:: + function(name, key) self + { + variables: super.variables + [name], + keyMap: super.keyMap + { [name]: key }, + }, + + }, + containers:: function(name, containers) {