-
Notifications
You must be signed in to change notification settings - Fork 640
/
pyproject.toml
110 lines (105 loc) · 2.82 KB
/
pyproject.toml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
[project]
name = "openllm"
description = "OpenLLM: Self-hosting LLMs Made Easy."
readme = { file = "README.md", content-type = "text/markdown" }
authors = [{ name = "BentoML Team", email = "[email protected]" }]
dynamic = ["version"]
classifiers = [
"Development Status :: 5 - Production/Stable",
"Environment :: GPU :: NVIDIA CUDA",
"Environment :: GPU :: NVIDIA CUDA :: 12",
"Environment :: GPU :: NVIDIA CUDA :: 11.8",
"Environment :: GPU :: NVIDIA CUDA :: 11.7",
"License :: OSI Approved :: Apache Software License",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development :: Libraries",
"Operating System :: OS Independent",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Intended Audience :: System Administrators",
"Typing :: Typed",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
]
dependencies = [
"bentoml",
"typer",
"questionary",
"pyaml",
"psutil",
"pathlib",
"pip_requirements_parser",
"nvidia-ml-py",
"dulwich",
"tabulate",
"uv",
"openai==1.54.4",
]
keywords = [
"MLOps",
"AI",
"BentoML",
"Model Serving",
"Model Deployment",
"LLMOps",
"Falcon",
"Vicuna",
"Llama 2",
"Fine tuning",
"Serverless",
"Large Language Model",
"Generative AI",
"StableLM",
"Alpaca",
"PyTorch",
"Mistral",
"vLLM",
"Transformers",
]
license = "Apache-2.0"
requires-python = ">=3.9"
[project.scripts]
openllm = "openllm.__main__:app"
[project.urls]
Blog = "https://modelserving.com"
Documentation = "https://github.com/bentoml/OpenLLM#readme"
GitHub = "https://github.com/bentoml/OpenLLM"
Homepage = "https://bentoml.com"
Tracker = "https://github.com/bentoml/OpenLLM/issues"
Twitter = "https://twitter.com/bentomlai"
[tool.typer]
src-dir = "src/openllm"
[build-system]
requires = ["hatchling==1.26.3", "hatch-vcs==0.4.0"]
build-backend = 'hatchling.build'
[tool.hatch.version]
source = "vcs"
fallback-version = "0.0.0"
[tool.hatch.build.hooks.vcs]
version-file = "src/openllm/_version.py"
[tool.hatch.version.raw-options]
git_describe_command = [
"git",
"describe",
"--dirty",
"--tags",
"--long",
"--first-parent",
]
version_scheme = "post-release"
fallback_version = "0.0.0"
[tool.hatch.metadata]
allow-direct-references = true
[tool.hatch.build.targets.wheel]
only-include = ["src/openllm"]
sources = ["src"]
[tool.hatch.build.targets.sdist]
exclude = ["/.git_archival.txt", "/.python-version-default"]