Temperature: {temperature}
-unzip deploy.zip -+
unzip deploy.zip ++ +
docker compose -f docker-compose.yaml up -d -
docker compose -f docker-compose.yaml up -d ++
grafana
and prometheus
directories.
-
- chcon -Rt svirt_sandbox_file_t grafana prometheus-
- chmod 755 prometheus/ grafana/ grafana/*/
- chmod 644 prometheus/* grafana/*/* -
chcon -Rt svirt_sandbox_file_t grafana prometheus-
+ chmod 755 prometheus/ grafana/ grafana/*/
+ chmod 644 prometheus/* grafana/*/* +
+
export CLAUDE_KEY=
TOKEN-GOES-HERE
>;
} else if (model == "bedrock") {
return <>
-
+
export AWS_ID_KEY= ID-KEY-HERE
@@ -36,11 +36,11 @@ const getInstructions = (model : string) => { >; } else if (model == "azure") { return <> -+
+To use Azure Serverless APIs, you need to have a serverless endpoint deployed, and you must also provide an endpoint token as an environment variable. - export AZURE_ENDPOINT= https://ENDPOINT.API.HOST.GOES.HERE/
@@ -50,24 +50,24 @@ const getInstructions = (model : string) => { >; } else if (model == "cohere") { return <> -To use Cohere APIs, you need an API token which must - be provided in an environment variable.
+To use Cohere APIs, you need an API token which must + be provided in an environment variable. export COHERE_KEY= TOKEN-GOES-HERE
>; } else if (model == "llamafile") { return <> -To use Llamafile, you must have a Llamafile services running +
+ in an environment variable.To use Llamafile, you must have a Llamafile services running on an accessible host. The Llamafile host must be provided - in an environment variable. export LLAMAFILE_URL= LLAMAFILE-URL
>; } else if (model == "ollama") { return <> -+
+The power of Ollama is the flexibility it provides in Language Model deployments. Being able to run LMs with Ollama enables fully secure AI TrustGraph pipelines @@ -77,37 +77,37 @@ const getInstructions = (model : string) => { models available using ollama pull
. The Ollama service URL must be provided in an environment variable. -export OLLAMA_HOST= http://ollama-host:11434
-+
+ >; } else if (model == "openai") { return <> -Replace the URL with the URL of your Ollama service. - To use OpenAI APIs, you need an API token which must - be provided in an environment variable.
+To use OpenAI APIs, you need an API token which must + be provided in an environment variable. export OPENAI_KEY= TOKEN-GOES-HERE
>; } else if (model == "vertexai") { return <> -To use VertexAI, you need to have a Google Cloud credential +
-To use VertexAI, you need to have a Google Cloud credential file provisioned for a service account which has access to the VertexAI services. This means signing up to GCP and using an existing, or launching a new GCP project. The GCP credential will be a JSON file which should be stored in vertexai/private.json
. -+ +
+The credential file is mounted as a volume in Docker Compose, which can cause issues with SELinux if you are running on Linux. Make sure that Docker has access to volume files if this affects you. - chcon -Rt svirt_sandbox_file_t vertexai/>; @@ -130,19 +130,20 @@ const DeploymentModel: React.FC= ({ return ( <> - - - - + {instructions} + + > ); diff --git a/src/simple-editor/deployment/PreparedConfig.tsx b/src/simple-editor/deployment/PreparedConfig.tsx new file mode 100644 index 0000000..d0658e0 --- /dev/null +++ b/src/simple-editor/deployment/PreparedConfig.tsx @@ -0,0 +1,74 @@ + +import { Plumbing } from '@mui/icons-material'; + +import { + Button, Typography, Alert, Paper, Box, Stack, +} from '@mui/material'; + +import { Check } from '@mui/icons-material'; + +import { useModelParamsStore } from '../state/ModelParams'; +import { useDeploymentStore } from '../state/Deployment'; + +const PreparedConfig = () => { + + const graphStore + = useModelParamsStore((state) => state.graphStore); + const vectorDB + = useModelParamsStore((state) => state.vectorDB); + const modelDeployment + = useModelParamsStore((state) => state.modelDeployment); + const modelName + = useModelParamsStore((state) => state.modelName); + + const configUrl + = useDeploymentStore((state) => state.configUrl); + + const download = () => { + + if (!configUrl) return; + let alink = document.createElement("a"); + alink.href = configUrl; + alink.download = "deploy.zip"; + alink.click(); + }; + + return ( + <> + +} - title="Model credentials" - /> - - -- {instructions} + + + + -+ + Model credentials ++ + + > + ); +} + +export default PreparedConfig; + diff --git a/src/simple-editor/generate-config.ts b/src/simple-editor/generate-config.ts index f7897a0..8d620f2 100644 --- a/src/simple-editor/generate-config.ts +++ b/src/simple-editor/generate-config.ts @@ -1,16 +1,22 @@ +import { ModelParams } from './state/ModelParams'; +import { Prompts } from './state/Prompts'; +import { + Options, DEFINITIONS_PROMPT, RELATIONSHIPS_PROMPT, TOPICS_PROMPT, + KNOWLEDGE_QUERY_PROMPT, DOCUMENT_QUERY_PROMPT, ROWS_PROMPT, +} from './state/Options'; + export const generateConfig = ( - graphStore : string, modelDeployment : string, vectorDB : string, - chunkSize : number, chunkOverlap : number, - maxOutputTokens : number, modelName : string, - chunkerType : string, temperature : number, + params : ModelParams, prompts : Prompts, options : Options, ) => { + const depl = params.modelDeployment; + let config = [ { - "name": graphStore, + "name": params.graphStore, "parameters": {} }, { @@ -18,7 +24,7 @@ export const generateConfig = "parameters": {} }, { - "name": vectorDB, + "name": params.vectorDB, "parameters": {} }, { @@ -38,38 +44,64 @@ export const generateConfig = "parameters": {} }, { - "name": modelDeployment, + "name": depl, + "parameters": {} + }, + { + "name": "prompt-template", "parameters": {} }, ]; - if (chunkerType == "chunker-recursive") { + // Will collate some various parameters to apply to the config. + // These get put into the 'null' pattern. + + if (params.chunkerType == "chunker-recursive") { config.push({ "name": "override-recursive-chunker", - "parameters": { - "chunk-size": chunkSize, - "chunk-overlap": chunkOverlap, - } + "parameters": {} }); - } else { + } + + let parameters : { [k : string] : string | number } = {}; + + parameters["chunk-size"] = params.chunkSize; + parameters["chunk-overlap"] = params.chunkOverlap; + parameters[depl + "-temperature"] = params.temperature; + parameters[depl + "-max-output-tokens"] = params.maxOutputTokens; + parameters[depl + "-model"] = params.modelName; + + if (options.options.has(DEFINITIONS_PROMPT)) { + parameters["prompt-definition-template"] = prompts.definitions; + } + + if (options.options.has(RELATIONSHIPS_PROMPT)) { + parameters["prompt-relationship-template"] = prompts.relationships;; + } + + if (options.options.has(TOPICS_PROMPT)) { + parameters["prompt-topic-template"] = prompts.topics; + } + + if (options.options.has(KNOWLEDGE_QUERY_PROMPT)) { + parameters["prompt-knowledge-query-template"] = prompts.knowledgeQuery; + } + + if (options.options.has(DOCUMENT_QUERY_PROMPT)) { + parameters["prompt-document-query-template"] = prompts.documentQuery; + } + + if (options.options.has(ROWS_PROMPT)) { + parameters["prompt-rows-template"] = prompts.rows; + } + + if (params.chunkerType == "chunker-recursive") { config.push({ "name": "null", - "parameters": { - "chunk-size": chunkSize, - "chunk-overlap": chunkOverlap, - } + "parameters": parameters, }); } - config.push({ - name: "null", - parameters: { - [modelDeployment + "-temperature"]: temperature, - [modelDeployment + "-max-output-tokens"]: maxOutputTokens, - [modelDeployment + "-model"]: modelName, - } - }); - const cnf = JSON.stringify(config, null, 4) return fetch( diff --git a/src/simple-editor/model-params/Chunker.tsx b/src/simple-editor/model-params/Chunker.tsx new file mode 100644 index 0000000..cb169aa --- /dev/null +++ b/src/simple-editor/model-params/Chunker.tsx @@ -0,0 +1,137 @@ + +import React from 'react'; + +import { + FormControl, InputLabel, Select, MenuItem, Box, Stack, Divider, + Typography, TextField, +} from '@mui/material'; + +interface ChunkerProps { + type: string; + chunkSize: number; + chunkOverlap: number; + onTypeChange: (value: string) => void; + onChunkSizeChange: (value: number) => void; + onChunkOverlapChange: (value: number) => void; +} + +const Chunker: React.FC+ ++ ++ + Deployment configuration + ++
+- Model deployment: {modelDeployment}
+- Model name: {modelName}
+- Graph store: {graphStore}
+- Vector DB: {vectorDB}
+} + sx={{ mt: 1, mb: 2}} + severity="success" + > + Configuration generation was successful + + + = ({ + type, + chunkSize, + chunkOverlap, + onTypeChange, + onChunkSizeChange, + onChunkOverlapChange, +}) => { + return ( + + + + ++ + ); +}; + +export default Chunker; + diff --git a/src/simple-editor/model-params/GraphStore.tsx b/src/simple-editor/model-params/GraphStore.tsx new file mode 100644 index 0000000..43a43ac --- /dev/null +++ b/src/simple-editor/model-params/GraphStore.tsx @@ -0,0 +1,107 @@ +import React from 'react'; + +import { + FormControl, InputLabel, Select, MenuItem, Box, Stack, Divider, + Typography, +} from '@mui/material'; + +interface GraphStoreProps { + value: string; + onChange: (value: string) => void; +} + +const GraphStore: React.FC+ + + + +Chunker Type + + +onChunkSizeChange(parseInt(e.target.value))} + margin="normal" + /> + onChunkOverlapChange(parseInt(e.target.value))} + margin="normal" + /> + + = ({ value, onChange }) => { + + return ( + <> + + + + + + > + + ); +}; + +export default GraphStore; + diff --git a/src/simple-editor/model-params/ModelDeployment.tsx b/src/simple-editor/model-params/ModelDeployment.tsx new file mode 100644 index 0000000..ddd0e4d --- /dev/null +++ b/src/simple-editor/model-params/ModelDeployment.tsx @@ -0,0 +1,288 @@ +import React from 'react'; + +import { + FormControl, InputLabel, Select, MenuItem, Box, Stack, Divider, + Typography, +} from '@mui/material'; + +interface ModelDeploymentProps { + value : string; + onChange: (value: string) => void; +} + +const ModelDeployment: React.FCGraph Store + + += ({ + value, onChange +}) => { + + return ( + + + + + + ); +}; + +export default ModelDeployment; + diff --git a/src/simple-editor/model-params/ModelParameters.tsx b/src/simple-editor/model-params/ModelParameters.tsx new file mode 100644 index 0000000..3b34152 --- /dev/null +++ b/src/simple-editor/model-params/ModelParameters.tsx @@ -0,0 +1,121 @@ +import React from 'react'; +import { + FormControl, InputLabel, Select, MenuItem, TextField, Slider +} from '@mui/material'; + +import modelsRaw from './models.json'; +const models = modelsRaw as { [ix : string ] : string[] }; + +interface ModelParametersProps { + modelName: string; + temperature: number; + maxOutputTokens: number; + onModelNameChange: (value: string) => void; + onTemperatureChange: (value: number) => void; + onMaxOutputTokensChange: (value: number) => void; + modelDeployment : string; +} + +const ModelParameters: React.FCModel deployment + + += ({ + modelName, + temperature, + modelDeployment, + maxOutputTokens, + onModelNameChange, + onTemperatureChange, + onMaxOutputTokensChange, +}) => { + + const availModels = models[modelDeployment]; + + const ModelList : React.FC<{ + modelName : string; + availModels : string[]; + onModelNameChange : (value: string) => void; + }> = ({ modelName, availModels, onModelNameChange}) => { + + const readOnly = (availModels.length == 0); + + if (availModels.length == 0) { + return ( + + + + ); + + } + + return ( +Model + + ++ + + + ); + } + + return ( +Model + + + ++ ++ ); +}; + +export default ModelParameters; + diff --git a/src/simple-editor/model-params/ParamsForm.tsx b/src/simple-editor/model-params/ParamsForm.tsx new file mode 100644 index 0000000..78ff5e7 --- /dev/null +++ b/src/simple-editor/model-params/ParamsForm.tsx @@ -0,0 +1,151 @@ + +import React from 'react'; + +import { Box } from '@mui/material'; + +import GraphStore from './GraphStore'; +import VectorDB from './VectorDB'; +import Chunker from './Chunker'; +import ModelDeployment from './ModelDeployment'; +import ModelParameters from './ModelParameters'; + +import { useModelParamsStore } from '../state/ModelParams'; +import { useDeploymentStore } from '../state/Deployment'; + +import modelsRaw from './models.json'; +const models = modelsRaw as { [ix : string ] : string[] }; + +interface ParamsFormProps { +} + +const ParamsForm: React.FC+ + ++Temperature: {temperature}
+onTemperatureChange(value as number) + } + min={0} + max={1} + step={0.1} + /> + onMaxOutputTokensChange(parseInt(e.target.value)) + } + margin="normal" + /> + = ({ +}) => { + + const setConfigUrl = + useDeploymentStore((state) => state.setConfigUrl); + + useModelParamsStore.subscribe(() => { + setConfigUrl(""); + }); + + const graphStore + = useModelParamsStore((state) => state.graphStore); + + const vectorDB + = useModelParamsStore((state) => state.vectorDB); + + const chunkerType + = useModelParamsStore((state) => state.chunkerType); + + const chunkSize + = useModelParamsStore((state) => state.chunkSize); + + const chunkOverlap + = useModelParamsStore((state) => state.chunkOverlap); + + const modelDeployment + = useModelParamsStore((state) => state.modelDeployment); + + const modelName + = useModelParamsStore((state) => state.modelName); + + const temperature + = useModelParamsStore((state) => state.temperature); + + const maxOutputTokens + = useModelParamsStore((state) => state.maxOutputTokens); + + const setGraphStore + = useModelParamsStore((state) => state.setGraphStore); + + const setVectorDB + = useModelParamsStore((state) => state.setVectorDB); + + const setChunkerType + = useModelParamsStore((state) => state.setChunkerType); + + const setChunkSize + = useModelParamsStore((state) => state.setChunkSize); + + const setChunkOverlap + = useModelParamsStore((state) => state.setChunkOverlap); + + const setModelDeployment + = useModelParamsStore((state) => state.setModelDeployment); + + const setModelName + = useModelParamsStore((state) => state.setModelName); + + const setTemperature + = useModelParamsStore((state) => state.setTemperature); + + const setMaxOutputTokens + = useModelParamsStore((state) => state.setMaxOutputTokens); + + useModelParamsStore.subscribe( + (n, o) => { + + if (n.modelDeployment == o.modelDeployment) return; + + if (n.modelName in models[n.modelDeployment]) return; + + if (models[n.modelDeployment].length == 0) + setModelName(""); + else + setModelName(models[n.modelDeployment][0]); + + } + ); + + return ( + + <> + + + + + + > + + ); +}; + +export default ParamsForm; + diff --git a/src/simple-editor/model-params/VectorDB.tsx b/src/simple-editor/model-params/VectorDB.tsx new file mode 100644 index 0000000..d2ecfc3 --- /dev/null +++ b/src/simple-editor/model-params/VectorDB.tsx @@ -0,0 +1,97 @@ +import React from 'react'; + +import { + FormControl, InputLabel, Select, MenuItem, Box, Stack, Divider, + Typography, +} from '@mui/material'; + +interface VectorDBProps { + value: string; + onChange: (value: string) => void; +} + +const VectorDB: React.FC+ + ++ + + ++ + + ++ + + ++ + + ++ = ({ value, onChange }) => { + return ( + + + + + + ); +}; + +export default VectorDB; \ No newline at end of file diff --git a/src/simple-editor/model-params/models.json b/src/simple-editor/model-params/models.json new file mode 100644 index 0000000..f16f94c --- /dev/null +++ b/src/simple-editor/model-params/models.json @@ -0,0 +1,102 @@ +{ + "claude": [ + "claude-3-5-sonnet-20240620", + "claude-3-opus-20240229", + "claude-3-sonnet-20240229", + "claude-3-haiku-20240307" + ], + "bedrock": [ + "anthropic.claude-3-haiku-20240307-v1:0", + "anthropic.claude-3-sonnet-20240229-v1:0", + "anthropic.claude-3-opus-20240229-v1:0", + "anthropic.claude-3-5-sonnet-20240620-v1:0", + "cohere.command-r-plus-v1:0", + "cohere.command-text-v14", + "cohere.command-light-text-v14", + "cohere.command-r-v1:0", + "meta.llama3-1-405b-instruct-v1:0", + "meta.llama3-1-70b-instruct-v1:0", + "meta.llama3-1-8b-instruct-v1:0", + "mistral.mixtral-8x7b-instruct-v0:1", + "mistral.mistral-large-2407-v1:0", + "mistral.mistral-7b-instruct-v0:2", + "ai21.jamba-instruct-v1:0", + "ai21.jamba-1-5-large-v1:0", + "ai21.jamba-1-5-mini-v1:0" + ], + "cohere": [ + "command-r-08-2024", + "command-r-plus-08-2024", + "command-r-plus", + "command-r", + "c4ai-aya-23-35b", + "c4ai-aya-23-8b", + "command", + "command-light", + "command-nightly", + "command-light-nightly" + ], + "googleaistudio": [ + "gemini-1.5-flash-002", + "gemini-1.5-flash-001", + "gemini-1.5-flash-8b-exp-0924", + "gemini-1.5-flash-8b-exp-0827", + "gemini-1.5-flash-exp-0827", + "gemini-1.5-flash-8b-001", + "gemini-1.5-flash-8b", + "gemini-1.5-pro-001", + "gemini-1.5-pro-002", + "gemini-1.5-pro-exp-0827", + "gemini-1.0-pro-latest", + "gemini-1.0-pro-001" + ], + "ollama": [ + "llama3.1:405b", + "llama3.1:70b", + "llama3.1:8b", + "gemma2:2b", + "gemma2:9b", + "gemma2:27b", + "qwen2.5:0.5b", + "qwen2.5:1.5b", + "qwen2.5:3b", + "qwen2.5:7b", + "qwen2.5:14b", + "qwen2.5:32b", + "qwen2.5:72b", + "phi3.5:3.8b", + "mistral-small:22b", + "mistral-nemo:12b", + "mistral:7b", + "mixtral:8x7b", + "mixtral:8x22b", + "command-r:35b", + "command-r-plus:104b" + ], + "openai": [ + "gpt-4o", + "gpt-4o-2024-08-06", + "gpt-4o-2024-05-13", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", + "o1-preview", + "o1-preview-2024-09-12", + "o1-mini", + "o1-mini-2024-09-12" + ], + "vertexai": [ + "gemini-1.5-flash-002", + "gemini-1.5-pro-002", + "gemini-1.5-flash-001", + "gemini-1.5-pro-001", + "gemini-1.0-pro-002", + "gemini-1.0-pro-001", + "gemini-flash-experimental", + "gemini-pro-experimental", + "gemini-experimental" + ], + "llamafile": [ + ], + "azure": [ + ] +} diff --git a/src/simple-editor/options/Option.tsx b/src/simple-editor/options/Option.tsx new file mode 100644 index 0000000..2d12cf9 --- /dev/null +++ b/src/simple-editor/options/Option.tsx @@ -0,0 +1,89 @@ + +import { Card, CardHeader, CardContent, CardActionArea } from '@mui/material'; +import { Typography } from '@mui/material'; +import { blue } from '@mui/material/colors'; + +const Option = ({enabled, onChange, avatar, title, content} : { + enabled : boolean; + onChange : () => void, + avatar : any; + title : string; + content : any; +}) => { + + const Header = () => { + + if (enabled) { + return ( +Vector DB + + ++ active + + } + /> + ); + } else { + return ( + + available + + } + /> + ); + } + + } + + const Content = () => { + + if (enabled) { + return ( + + + ); + } else { + return ( ++ {content} + ++ + ); + } + + } + + return ( ++ {content} + ++ + ); +}; + +export default Option; + diff --git a/src/simple-editor/options/Options.tsx b/src/simple-editor/options/Options.tsx new file mode 100644 index 0000000..aff8f10 --- /dev/null +++ b/src/simple-editor/options/Options.tsx @@ -0,0 +1,163 @@ + + +import { Stack } from '@mui/material'; +import { + Psychology, +// Spoke, +// Plumbing, +// Engineering, +// Hub, +// ChatBubble, +// VerticalSplit, +// MonitorHeart, +// Polyline, +} from '@mui/icons-material'; +import { useDeploymentStore } from '../state/Deployment'; + +import { + useOptionsStore, DEFINITIONS_PROMPT, RELATIONSHIPS_PROMPT, + TOPICS_PROMPT, KNOWLEDGE_QUERY_PROMPT, DOCUMENT_QUERY_PROMPT, + ROWS_PROMPT, +} from '../state/Options'; + +import Option from './Option'; + +const ParamsForm: React.FC = ({ +}) => { + + const options = useOptionsStore((state) => state.options); + + const setOptions = useOptionsStore((state) => state.setOptions); + + const setConfigUrl = + useDeploymentStore((state) => state.setConfigUrl); + + useOptionsStore.subscribe(() => { + setConfigUrl(""); + }); + + const definitions = options.has(DEFINITIONS_PROMPT); + const relationships = options.has(RELATIONSHIPS_PROMPT); + const topics = options.has(TOPICS_PROMPT); + const kgQuery = options.has(KNOWLEDGE_QUERY_PROMPT); + const docQuery = options.has(DOCUMENT_QUERY_PROMPT); + const rows = options.has(ROWS_PROMPT); + + const set = (o : string, value : boolean) => { + if (value) { + const opts = new Set(options); + opts.add(o); + setOptions(opts); + } else { + const opts = new Set(options); + opts.delete(o); + setOptions(opts); + } + } + + const onDefinitions = () => { + set(DEFINITIONS_PROMPT, !definitions); + }; + + const onRelationships = () => { + set(RELATIONSHIPS_PROMPT, !relationships); + }; + + const onTopics = () => { + set(TOPICS_PROMPT, !topics); + }; + + const onKgQuery = () => { + set(KNOWLEDGE_QUERY_PROMPT, !kgQuery); + }; + + const onDocQuery = () => { + set(DOCUMENT_QUERY_PROMPT, !docQuery); + }; + + const onRows = () => { + set(ROWS_PROMPT, !rows); + }; + + return ( + <> + +onChange()}> + ++ + + + } + title="Definitions prompt" + content={ + 'Tailor the default definitions-extraction prompt' + } + + /> + + } + title="Relationships prompt" + content={ + 'Tailor the default relationships-extraction prompt' + } + + /> + + } + title="Topics prompt" + content={ + 'Tailor the default topics-extraction prompt' + } + + /> + + } + title="Knowledge graph prompt" + content={ + 'Tailor the default knowledge-graph query prompt' + } + + /> + + } + title="Document prompt" + content={ + 'Tailor the default document query prompt' + } + + /> + + } + title="Row extraction prompt" + content={ + 'Tailor the default row extraction prompt' + } + + /> + + + + > + + ); +}; + +export default ParamsForm; + diff --git a/src/simple-editor/options/Prompt.tsx b/src/simple-editor/options/Prompt.tsx new file mode 100644 index 0000000..efcd2a9 --- /dev/null +++ b/src/simple-editor/options/Prompt.tsx @@ -0,0 +1,36 @@ + +import { + Box, TextField, +} from '@mui/material'; + +interface PromptProps { + value : string; + onChange : (value : string) => void; +} + +const Prompt : React.FC= ({value, onChange}) => { + + return ( + <> + + + + + > + ); +} + +export default Prompt; + diff --git a/src/simple-editor/prompts.ts b/src/simple-editor/prompts.ts new file mode 100644 index 0000000..877cb95 --- /dev/null +++ b/src/simple-editor/prompts.ts @@ -0,0 +1,14 @@ + + +export const default_definition_prompt = ") => { + onChange(event.target.value); + } + } + /> + \nStudy the following text and derive definitions for any discovered entities.\nDo not provide definitions for entities whose definitions are incomplete\nor unknown.\nOutput relationships in JSON format as an arary of objects with fields:\n- entity: the name of the entity\n- definition: English text which defines the entity\n \n\n\n{text}\n \n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not use special characters in the abstract text. The\nabstract will be written as plain text. Do not add markdown formatting\nor headers or prefixes. Do not include null or unknown definitions.\n "; + +export const default_relationship_prompt = "\nStudy the following text and derive entity relationships. For each\nrelationship, derive the subject, predicate and object of the relationship.\nOutput relationships in JSON format as an arary of objects with fields:\n- subject: the subject of the relationship\n- predicate: the predicate\n- object: the object of the relationship\n- object-entity: false if the object is a simple data type: name, value or date. true if it is an entity.\n \n\n\n{text}\n \n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not use special characters in the abstract text. The\nabstract must be written as plain text. Do not add markdown formatting\nor headers or prefixes.\n "; + +export const default_topics_prompt = "You are a helpful assistant that performs information extraction tasks for a provided text.\nRead the provided text. You will identify topics and their definitions in JSON.\n\nReading Instructions:\n- Ignore document formatting in the provided text.\n- Study the provided text carefully.\n\nHere is the text:\n{text}\n\nResponse Instructions: \n- Do not respond with special characters.\n- Return only topics that are concepts and unique to the provided text.\n- Respond only with well-formed JSON.\n- The JSON response shall be an array of objects with keys \"topic\" and \"definition\". \n- The JSON response shall use the following structure:\n\n```json\n[{{\"topic\": string, \"definition\": string}}]\n```\n\n- Do not write any additional text or explanations."; + +export const default_knowledge_query_prompt = "Study the following set of knowledge statements. The statements are written in Cypher format that has been extracted from a knowledge graph. Use only the provided set of knowledge statements in your response. Do not speculate if the answer is not found in the provided set of knowledge statements.\n\nHere's the knowledge statements:\n{graph}\n\nUse only the provided knowledge statements to respond to the following:\n{query}\n"; + +export const default_document_query_prompt = "Study the following context. Use only the information provided in the context in your response. Do not speculate if the answer is not found in the provided set of knowledge statements.\n\nHere is the context:\n{documents}\n\nUse only the provided knowledge statements to respond to the following:\n{query}\n"; + +export const default_rows_prompt = "\nStudy the following text and derive objects which match the schema provided.\n\nYou must output an array of JSON objects for each object you discover\nwhich matches the schema. For each object, output a JSON object whose fields\ncarry the name field specified in the schema.\n \n\n\n{schema}\n \n\n\n{text}\n \n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not add markdown formatting or headers or prefixes.\n "; + diff --git a/src/simple-editor/state/Deployment.ts b/src/simple-editor/state/Deployment.ts new file mode 100644 index 0000000..1761f1f --- /dev/null +++ b/src/simple-editor/state/Deployment.ts @@ -0,0 +1,30 @@ + +import { create } from 'zustand' + +interface Deployment { + + deploymentConfig : string; + configUrl : string; + + setDeploymentConfig : (v : string) => void; + setConfigUrl : (v : string) => void; + +} + +export const useDeploymentStore = create()( + (set) => ({ + + deploymentConfig: "", + configUrl: "", + + setDeploymentConfig: (v) => set(() => ({ + deploymentConfig: v + })), + + setConfigUrl: (v) => set(() => ({ + configUrl: v + })), + + }) +); + diff --git a/src/simple-editor/state/ModelParams.ts b/src/simple-editor/state/ModelParams.ts index f53a9b5..6a5f786 100644 --- a/src/simple-editor/state/ModelParams.ts +++ b/src/simple-editor/state/ModelParams.ts @@ -1,7 +1,8 @@ import { create } from 'zustand' -interface ModelParams { +export interface ModelParams { + graphStore : string; vectorDB : string; chunkerType: string; @@ -11,8 +12,6 @@ interface ModelParams { modelName : string; temperature : number; maxOutputTokens : number; - deploymentConfig : string; - configUrl : string; setGraphStore : (v : string) => void; setVectorDB : (v : string) => void; @@ -23,8 +22,6 @@ interface ModelParams { setModelName : (v : string) => void; setTemperature : (v : number) => void; setMaxOutputTokens : (v : number) => void; - setDeploymentConfig : (v : string) => void; - setConfigUrl : (v : string) => void; } @@ -40,69 +37,41 @@ export const useModelParamsStore = create ()( modelName: "gemma2:9b", temperature: 0.3, maxOutputTokens: 1000, - deploymentConfig: "", - configUrl: "", setGraphStore: (v) => set(() => ({ graphStore: v, - configUrl: "", - deploymentConfig: "", })), setVectorDB: (v) => set(() => ({ vectorDB: v, - configUrl: "", - deploymentConfig: "", })), setChunkerType: (v) => set(() => ({ chunkerType: v, - configUrl: "", - deploymentConfig: "", })), setChunkSize: (v) => set(() => ({ chunkSize: v, - configUrl: "", - deploymentConfig: "", })), setChunkOverlap: (v) => set(() => ({ chunkOverlap: v, - configUrl: "", - deploymentConfig: "", })), setModelDeployment: (v) => set(() => ({ modelDeployment: v, - configUrl: "", - deploymentConfig: "", })), setModelName: (v) => set(() => ({ modelName: v, - configUrl: "", - deploymentConfig: "", })), setTemperature: (v) => set(() => ({ temperature: v, - configUrl: "", - deploymentConfig: "", })), setMaxOutputTokens: (v) => set(() => ({ maxOutputTokens: v, - configUrl: "", - deploymentConfig: "", - })), - - setDeploymentConfig: (v) => set(() => ({ - deploymentConfig: v - })), - - setConfigUrl: (v) => set(() => ({ - configUrl: v })), }) diff --git a/src/simple-editor/state/Options.ts b/src/simple-editor/state/Options.ts new file mode 100644 index 0000000..2095de7 --- /dev/null +++ b/src/simple-editor/state/Options.ts @@ -0,0 +1,28 @@ + +import { create } from 'zustand' + +export const DEFINITIONS_PROMPT = "definitions-prompt"; +export const RELATIONSHIPS_PROMPT = "relationships-prompt"; +export const TOPICS_PROMPT = "topics-prompt"; +export const KNOWLEDGE_QUERY_PROMPT = "knowledge-query-prompt"; +export const DOCUMENT_QUERY_PROMPT = "document-query-prompt"; +export const ROWS_PROMPT = "rows-prompt"; + +export interface Options { + + options : Set ; + setOptions : (v: Set ) => void; +} + +export const useOptionsStore = create ()( + (set) => ({ + + options: new Set (""), + + setOptions: (v) => set(() => ({ + options: v + })), + + }) +); + diff --git a/src/simple-editor/state/Prompts.ts b/src/simple-editor/state/Prompts.ts new file mode 100644 index 0000000..b53880b --- /dev/null +++ b/src/simple-editor/state/Prompts.ts @@ -0,0 +1,61 @@ + +import { create } from 'zustand' + +import * as prompts from '../prompts'; + +export interface Prompts { + + definitions : string; + relationships : string; + topics : string; + knowledgeQuery : string; + documentQuery : string; + rows : string; + + setDefinitions : (v : string) => void; + setRelationships : (v : string) => void; + setTopics : (v : string) => void; + setKnowledgeQuery : (v : string) => void; + setDocumentQuery : (v : string) => void; + setRows : (v : string) => void; + +} + +export const usePromptsStore = create ()( + + (set) => ({ + + definitions: prompts.default_definition_prompt, + relationships: prompts.default_relationship_prompt, + topics: prompts.default_topics_prompt, + knowledgeQuery: prompts.default_knowledge_query_prompt, + documentQuery: prompts.default_document_query_prompt, + rows: prompts.default_rows_prompt, + + setDefinitions: (v) => set(() => ({ + definitions: v + })), + + setRelationships: (v) => set(() => ({ + relationships: v + })), + + setTopics: (v) => set(() => ({ + topics: v + })), + + setKnowledgeQuery: (v) => set(() => ({ + knowledgeQuery: v + })), + + setDocumentQuery: (v) => set(() => ({ + documentQuery: v + })), + + setRows: (v) => set(() => ({ + rows: v + })), + + }) +); + diff --git a/templates/all-patterns.jsonnet b/templates/all-patterns.jsonnet index 29384d1..f3fb19b 100644 --- a/templates/all-patterns.jsonnet +++ b/templates/all-patterns.jsonnet @@ -7,9 +7,11 @@ import "patterns/triple-store-neo4j.jsonnet", import "patterns/graph-rag.jsonnet", import "patterns/llm-azure.jsonnet", + import "patterns/llm-azure-openai.jsonnet", import "patterns/llm-bedrock.jsonnet", import "patterns/llm-claude.jsonnet", import "patterns/llm-cohere.jsonnet", + import "patterns/llm-googleaistudio.jsonnet", import "patterns/llm-llamafile.jsonnet", import "patterns/llm-ollama.jsonnet", import "patterns/llm-openai.jsonnet", diff --git a/templates/components.jsonnet b/templates/components.jsonnet index 8ed2da0..f27be5c 100644 --- a/templates/components.jsonnet +++ b/templates/components.jsonnet @@ -1,11 +1,13 @@ { "azure": import "components/azure.jsonnet", + "azure-openai": import "components/azure-openai.jsonnet", "bedrock": import "components/bedrock.jsonnet", "claude": import "components/claude.jsonnet", "cohere": import "components/cohere.jsonnet", "document-rag": import "components/document-rag.jsonnet", "embeddings-hf": import "components/embeddings-hf.jsonnet", "embeddings-ollama": import "components/embeddings-ollama.jsonnet", + "googleaistudio": import "components/googleaistudio.jsonnet", "grafana": import "components/grafana.jsonnet", "graph-rag": import "components/graph-rag.jsonnet", "triple-store-cassandra": import "components/cassandra.jsonnet", @@ -19,6 +21,8 @@ "prompt-template-kq-query": import "components/null.jsonnet", "prompt-template-relationships": import "components/null.jsonnet", "prompt-template-rows-template": import "components/null.jsonnet", + "prompt-generic": import "components/prompt-generic.jsonnet", + "prompt-template": import "components/prompt-template.jsonnet", "pulsar": import "components/pulsar.jsonnet", "pulsar-manager": import "components/pulsar-manager.jsonnet", "trustgraph-base": import "components/trustgraph.jsonnet", diff --git a/templates/components/azure-openai.jsonnet b/templates/components/azure-openai.jsonnet new file mode 100644 index 0000000..31e1972 --- /dev/null +++ b/templates/components/azure-openai.jsonnet @@ -0,0 +1,95 @@ +local base = import "base/base.jsonnet"; +local images = import "values/images.jsonnet"; +local url = import "values/url.jsonnet"; +local prompts = import "prompts/mixtral.jsonnet"; + +{ + + "azure-openai-token":: "${AZURE_OPENAI_TOKEN}", + "azure-openai-model":: "GPT-3.5-Turbo", + "azure-openai-max-output-tokens":: 4192, + "azure-openai-temperature":: 0.0, + + "text-completion" +: { + + create:: function(engine) + + local container = + engine.container("text-completion") + .with_image(images.trustgraph) + .with_command([ + "text-completion-azure-openai", + "-p", + url.pulsar, + "-k", + $["azure-openai-token"], + "-m", + $["azure-openai-model"], + "-x", + std.toString($["azure-openai-max-output-tokens"]), + "-t", + std.toString($["azure-openai-temperature"]), + ]) + .with_limits("0.5", "128M") + .with_reservations("0.1", "128M"); + + local containerSet = engine.containers( + "text-completion", [ container ] + ); + + local service = + engine.internalService(containerSet) + .with_port(8000, 8000, "metrics"); + + engine.resources([ + containerSet, + service, + ]) + + }, + + "text-completion-rag" +: { + + create:: function(engine) + + local container = + engine.container("text-completion-rag") + .with_image(images.trustgraph) + .with_command([ + "text-completion-azure", + "-p", + url.pulsar, + "-k", + $["azure-openai-token"], + "-e", + $["azure-openai-model"], + "-x", + std.toString($["azure-openai-max-output-tokens"]), + "-t", + std.toString($["azure-openai-temperature"]), + "-i", + "non-persistent://tg/request/text-completion-rag", + "-o", + "non-persistent://tg/response/text-completion-rag-response", + ]) + .with_limits("0.5", "128M") + .with_reservations("0.1", "128M"); + + local containerSet = engine.containers( + "text-completion-rag", [ container ] + ); + + local service = + engine.internalService(containerSet) + .with_port(8000, 8000, "metrics"); + + engine.resources([ + containerSet, + service, + ]) + + + } + +} + prompts + diff --git a/templates/components/googleaistudio.jsonnet b/templates/components/googleaistudio.jsonnet new file mode 100644 index 0000000..9f9d48d --- /dev/null +++ b/templates/components/googleaistudio.jsonnet @@ -0,0 +1,90 @@ +local base = import "base/base.jsonnet"; +local images = import "values/images.jsonnet"; +local url = import "values/url.jsonnet"; +local prompts = import "prompts/mixtral.jsonnet"; + +{ + + "googleaistudio-key":: "${GOOGLEAISTUDIO_KEY}", + "googleaistudio-max-output-tokens":: 4096, + "googleaistudio-temperature":: 0.0, + + "text-completion" +: { + + create:: function(engine) + + local container = + engine.container("text-completion") + .with_image(images.trustgraph) + .with_command([ + "text-completion-googleaistudio", + "-p", + url.pulsar, + "-k", + $["googleaistudio-key"], + "-x", + std.toString($["googleaistudio-max-output-tokens"]), + "-t", + std.toString($["googleaistudio-temperature"]), + ]) + .with_limits("0.5", "128M") + .with_reservations("0.1", "128M"); + + local containerSet = engine.containers( + "text-completion", [ container ] + ); + + local service = + engine.internalService(containerSet) + .with_port(8000, 8000, "metrics"); + + engine.resources([ + containerSet, + service, + ]) + + }, + + "text-completion-rag" +: { + + create:: function(engine) + + local container = + engine.container("text-completion-rag") + .with_image(images.trustgraph) + .with_command([ + "text-completion-googleaistudio", + "-p", + url.pulsar, + "-k", + $["googleaistudio-key"], + "-x", + std.toString($["googleaistudio-max-output-tokens"]), + "-t", + std.toString($["googleaistudio-temperature"]), + "-i", + "non-persistent://tg/request/text-completion-rag", + "-o", + "non-persistent://tg/response/text-completion-rag-response", + ]) + .with_limits("0.5", "128M") + .with_reservations("0.1", "128M"); + + local containerSet = engine.containers( + "text-completion-rag", [ container ] + ); + + local service = + engine.internalService(containerSet) + .with_port(8000, 8000, "metrics"); + + engine.resources([ + containerSet, + service, + ]) + + + } + +} + prompts + diff --git a/templates/components/ollama.jsonnet b/templates/components/ollama.jsonnet index b0507ce..5b8f55d 100644 --- a/templates/components/ollama.jsonnet +++ b/templates/components/ollama.jsonnet @@ -1,7 +1,7 @@ local base = import "base/base.jsonnet"; local images = import "values/images.jsonnet"; local url = import "values/url.jsonnet"; -local prompts = import "prompts/slm.jsonnet"; +local prompts = import "prompts/mixtral.jsonnet"; { diff --git a/templates/generate-all b/templates/generate-all index 948b811..0b40362 100755 --- a/templates/generate-all +++ b/templates/generate-all @@ -122,8 +122,8 @@ def generate_all(output, version): "docker-compose", "minikube-k8s", "gcp-k8s" ]: for model in [ - "azure", "bedrock", "claude", "cohere", "llamafile", "ollama", - "openai", "vertexai" + "azure", "azure-openai", "bedrock", "claude", "cohere", + "googleaistudio", "llamafile", "ollama", "openai", "vertexai", ]: for graph in [ "cassandra", "neo4j" ]: diff --git a/templates/patterns/llm-azure-openai.jsonnet b/templates/patterns/llm-azure-openai.jsonnet new file mode 100644 index 0000000..06e1a3f --- /dev/null +++ b/templates/patterns/llm-azure-openai.jsonnet @@ -0,0 +1,32 @@ +{ + pattern: { + name: "azure-openai", + icon: "🤖💬", + title: "Add Azure OpenAI LLM endpoint for text completion", + description: "This pattern integrates an Azure OpenAI LLM endpoint hosted in the Azure cloud for text completion operations. You need an Azure subscription to be able to use this service.", + requires: ["pulsar", "trustgraph"], + features: ["llm"], + args: [ + { + name: "azure-openai-max-output-tokens", + label: "Maximum output tokens", + type: "integer", + description: "Limit on number tokens to generate", + default: 4096, + required: true, + }, + { + name: "azure-openai-temperature", + label: "Temperature", + type: "slider", + description: "Controlling predictability / creativity balance", + min: 0, + max: 1, + step: 0.05, + default: 0.5, + }, + ], + category: [ "llm" ], + }, + module: "components/azure.jsonnet", +} diff --git a/templates/patterns/llm-googleaistudio.jsonnet b/templates/patterns/llm-googleaistudio.jsonnet new file mode 100644 index 0000000..aa56d34 --- /dev/null +++ b/templates/patterns/llm-googleaistudio.jsonnet @@ -0,0 +1,32 @@ +{ + pattern: { + name: "googleaistudio", + icon: "🤖💬", + title: "Add GoogleAIStudio for text completion", + description: "This pattern integrates a GoogleAIStudio LLM service for text completion operations. You need a GoogleAISTudio API key to be able to use this service.", + requires: ["pulsar", "trustgraph"], + features: ["llm"], + args: [ + { + name: "googleaistudio-max-output-tokens", + label: "Maximum output tokens", + type: "integer", + description: "Limit on number tokens to generate", + default: 4096, + required: true, + }, + { + name: "googleaistudio-temperature", + label: "Temperature", + type: "slider", + description: "Controlling predictability / creativity balance", + min: 0, + max: 1, + step: 0.05, + default: 0.5, + }, + ], + category: [ "llm" ], + }, + module: "components/googleaistudio.jsonnet", +}