Skip to content

Commit 171fabf

Browse files
rename opengpt by rungpt
1 parent 13e6209 commit 171fabf

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

92 files changed

+772
-530
lines changed

.github/workflows/docker-release.yml

+11-11
Original file line numberDiff line numberDiff line change
@@ -34,29 +34,29 @@ jobs:
3434
echo "BUILD_DATE=$(date -u +'%Y-%m-%dT%H:%M:%SZ')" >> $GITHUB_ENV
3535
echo "BUILD_TARGET=clip_executor" >> $GITHUB_ENV
3636
37-
VERSION=$(sed -n '/^__version__ = /p' ./open_gpt/__init__.py | cut -d \' -f2)
37+
VERSION=$(sed -n '/^__version__ = /p' ./rungpt/__init__.py | cut -d \' -f2)
3838
V_VERSION=v${VERSION}
3939
MINOR_VERSION=${VERSION%.*}
4040
MAJOR_VERSION=${MINOR_VERSION%.*}
4141
4242
if [[ "${{ github.event.inputs.triggered_by }}" == "CD" ]]; then
4343
# on every CD release
4444
echo "TAG_ALIAS=\
45-
jinaai/open_gpt:master" \
45+
jinaai/rungpt:master" \
4646
>> $GITHUB_ENV
4747
4848
elif [[ "${{ github.event.inputs.triggered_by }}" == "TAG" ]]; then
4949
# on every tag release
5050
echo "TAG_ALIAS=\
51-
jinaai/open_gpt:latest, \
52-
jinaai/open_gpt:v${VERSION}, \
53-
jinaai/open_gpt:v${MINOR_VERSION} \
51+
jinaai/rungpt:latest, \
52+
jinaai/rungpt:v${VERSION}, \
53+
jinaai/rungpt:v${MINOR_VERSION} \
5454
" >> $GITHUB_ENV
5555
5656
elif [[ "${{ github.event.inputs.triggered_by }}" == "MANUAL" ]]; then
5757
# on every manual release
5858
echo "TAG_ALIAS=\
59-
jinaai/open_gpt:v${VERSION} \
59+
jinaai/rungpt:v${VERSION} \
6060
" >> $GITHUB_ENV
6161
else
6262
echo "Bad triggered_by: ${{ github.event.inputs.triggered_by }}!"
@@ -86,7 +86,7 @@ jobs:
8686
with:
8787
file: Dockerfiles/Dockerfile
8888
platforms: linux/amd64
89-
cache-from: type=registry,ref=jinaai/open_gpt:latest
89+
cache-from: type=registry,ref=jinaai/rungpt:latest
9090
cache-to: type=inline
9191
push: true
9292
tags: ${{env.TAG_ALIAS}}
@@ -101,10 +101,10 @@ jobs:
101101
with:
102102
file: Dockerfiles/gateway.Dockerfile
103103
platforms: linux/amd64
104-
cache-from: type=registry,ref=jinaai/open_gpt_gateway:latest
104+
cache-from: type=registry,ref=jinaai/run_gpt_gateway:latest
105105
cache-to: type=inline
106106
push: true
107-
tags: jinaai/open_gpt_gateway:v${{env.VERSION}}, jinaai/open_gpt_gateway:latest
107+
tags: jinaai/run_gpt_gateway:v${{env.VERSION}}, jinaai/run_gpt_gateway:latest
108108
build-args: |
109109
BUILD_DATE=${{env.BUILD_DATE}}
110110
VERSION=${{env.VERSION}}
@@ -116,10 +116,10 @@ jobs:
116116
with:
117117
file: Dockerfiles/executor.Dockerfile
118118
platforms: linux/amd64
119-
cache-from: type=registry,ref=jinaai/open_gpt_executor:latest
119+
cache-from: type=registry,ref=jinaai/run_gpt_executor:latest
120120
cache-to: type=inline
121121
push: true
122-
tags: jinaai/open_gpt_executor:v${{env.VERSION}}, jinaai/open_gpt_executor:latest
122+
tags: jinaai/run_gpt_executor:v${{env.VERSION}}, jinaai/run_gpt_executor:latest
123123
build-args: |
124124
BUILD_DATE=${{env.BUILD_DATE}}
125125
VERSION=${{env.VERSION}}

.github/workflows/docs.yml

+5-5
Original file line numberDiff line numberDiff line change
@@ -9,12 +9,12 @@ jobs:
99
steps:
1010
- uses: actions/checkout@v3
1111
with:
12-
path: opengpt
12+
path: rungpt
1313
- uses: actions/checkout@v3
1414
with:
15-
repository: numb3r3/opengpt.github.io.git
15+
repository: numb3r3/rungpt.github.io.git
1616
ref: 'main'
17-
path: ./opengpt.github.io
17+
path: ./rungpt.github.io
1818
token: ${{ secrets.GH_TEST_TOKEN }}
1919
- uses: actions/setup-python@v2
2020
with:
@@ -23,5 +23,5 @@ jobs:
2323
pip install pillow cairosvg
2424
sudo apt-get install -y libcairo2-dev libfreetype6-dev libffi-dev libjpeg-dev libpng-dev libz-dev
2525
pip install mkdocs-material mkdocs-material-extensions mkdocs-redirects --upgrade
26-
mkdocs gh-deploy --config-file ../opengpt/mkdocs.yml --force
27-
working-directory: ./opengpt.github.io
26+
mkdocs gh-deploy --config-file ../rungpt/mkdocs.yml --force
27+
working-directory: ./rungpt.github.io

.github/workflows/force-release.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ jobs:
4242
echo "VCS_REF=$VCS_REF" >> $GITHUB_ENV
4343
echo "Will build $VCS_REF"
4444
45-
VERSION=$(sed -n '/^__version__ = /p' ./open_gpt/__init__.py | cut -d \' -f2)
45+
VERSION=$(sed -n '/^__version__ = /p' ./run_gpt/__init__.py | cut -d \' -f2)
4646
4747
echo "VERSION=$VERSION" >> $GITHUB_ENV
4848

.github/workflows/release.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ jobs:
2828
echo "VCS_REF=$VCS_REF" >> $GITHUB_ENV
2929
echo "Will build $VCS_REF"
3030
31-
VERSION=$(sed -n '/^__version__ = /p' ./open_gpt/__init__.py | cut -d \' -f2)
31+
VERSION=$(sed -n '/^__version__ = /p' ./run_gpt/__init__.py | cut -d \' -f2)
3232
3333
echo "VERSION=$VERSION" >> $GITHUB_ENV
3434

.pre-commit-config.yaml

+5-5
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ repos:
33
rev: 4.0.1
44
hooks:
55
- id: flake8
6-
exclude: ^(.git|__pycache__|docs/source/conf.py|old|build|dist|tests|open_gpt/resources/)
6+
exclude: ^(.git|__pycache__|docs/source/conf.py|old|build|dist|tests|run_gpt/resources/)
77
args:
88
- --max-complexity=10
99
- --max-line-length=127
@@ -12,8 +12,8 @@ repos:
1212
# rev: v1.5.8
1313
# hooks:
1414
# - id: darglint
15-
# files: open_gpt/
16-
# exclude: ^(docs/|open_gpt/resources/)
15+
# files: run_gpt/
16+
# exclude: ^(docs/|run_gpt/resources/)
1717
# args:
1818
# - --message-template={path}:{line} {msg_id} {msg}
1919
# - -s=sphinx
@@ -24,15 +24,15 @@ repos:
2424
hooks:
2525
- id: pydocstyle
2626
files: client/
27-
exclude: ^(docs/|open_gpt/resources/)
27+
exclude: ^(docs/|run_gpt/resources/)
2828
args:
2929
- --select=D101,D102,D103
3030
- repo: https://github.com/ambv/black
3131
rev: 22.3.0
3232
hooks:
3333
- id: black
3434
types: [python]
35-
exclude: ^(docs/|open_gpt/resources/)
35+
exclude: ^(docs/|run_gpt/resources/)
3636
args:
3737
- -S
3838
- repo: https://github.com/asottile/blacken-docs

Dockerfiles/Dockerfile

+3-3
Original file line numberDiff line numberDiff line change
@@ -8,9 +8,9 @@ FROM mosaicml/pytorch:${TORCH_VERSION}_cu${CUDA_VERSION}-python3.10-ubuntu20.04
88
ENV DEBIAN_FRONTEND=noninteractive LANG=en_US.UTF-8 LC_ALL=en_US.UTF-8
99

1010
# copy will almost always invalid the cache
11-
COPY . /open_gpt/
12-
WORKDIR /open_gpt
11+
COPY . /run_gpt/
12+
WORKDIR /run_gpt
1313

1414
RUN python3 -m pip install -e .
1515

16-
ENTRYPOINT ["opengpt"]
16+
ENTRYPOINT ["rungpt"]

MODEL_ZOO.md

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
1-
# Model Zoo in OpenGPT
1+
# Model Zoo in RunGPT
22

3-
OpenGPT supports the following models out of the box:
3+
RunGPT supports the following models out of the box:
44

55
- LLM (Large Language Model)
66

README.md

+20-20
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
1-
# ☄️ OpenGPT
1+
# ☄️ RunGPT
22

33
<p align="center">
4-
<a href="https://github.com/jina-ai/opengpt"><img src="https://github.com/jina-ai/opengpt/blob/main/.github/images/logo.png" alt="OpenGPT: An open-source cloud-native large-scale multimodal model serving framework" width="300px"></a>
4+
<a href="https://github.com/jina-ai/rungpt"><img src="https://github.com/jina-ai/rungpt/blob/main/.github/images/logo.png" alt="rungpt: An open-source cloud-native large-scale multimodal model serving framework" width="300px"></a>
55
<br>
66
</p>
77

@@ -11,10 +11,10 @@
1111
1212

1313
![](https://img.shields.io/badge/Made%20with-JinaAI-blueviolet?style=flat)
14-
[![PyPI](https://img.shields.io/pypi/v/open_gpt_torch)](https://pypi.org/project/open_gpt_torch/)
15-
[![PyPI - License](https://img.shields.io/pypi/l/open_gpt_torch)](https://pypi.org/project/open_gpt_torch/)
14+
[![PyPI](https://img.shields.io/pypi/v/run_gpt_torch)](https://pypi.org/project/run_gpt_torch/)
15+
[![PyPI - License](https://img.shields.io/pypi/l/run_gpt_torch)](https://pypi.org/project/run_gpt_torch/)
1616

17-
**OpenGPT** is an open-source _cloud-native_ large-scale **_multimodal models_** (LMMs) serving framework.
17+
**RunGPT** is an open-source _cloud-native_ large-scale **_multimodal models_** (LMMs) serving framework.
1818
It is designed to simplify the deployment and management of large language models, on a distributed cluster of GPUs.
1919
We aim to make it a one-stop solution for a centralized and accessible place to gather techniques for optimizing large-scale multimodal models and make them easy to use for everyone.
2020

@@ -30,7 +30,7 @@ We aim to make it a one-stop solution for a centralized and accessible place to
3030

3131
## Features
3232

33-
OpenGPT provides the following features to make it easy to deploy and serve **large multi-modal models** (LMMs) at scale:
33+
RunGPT provides the following features to make it easy to deploy and serve **large multi-modal models** (LMMs) at scale:
3434

3535
- Support for multi-modal models on top of large language models
3636
- Scalable architecture for handling high traffic loads
@@ -41,13 +41,13 @@ OpenGPT provides the following features to make it easy to deploy and serve **la
4141

4242
## Updates
4343

44-
- **2023-05-12**: 🎉We have released the first version `v0.0.1` of OpenGPT. You can install it with `pip install open_gpt_torch`.
44+
- **2023-05-12**: 🎉We have released the first version `v0.0.1` of RunGPT. You can install it with `pip install run_gpt_torch`.
4545

4646
## Supported Models
4747

4848
<details>
4949

50-
OpenGPT supports the following models out of the box:
50+
RunGPT supports the following models out of the box:
5151

5252
- LLM (Large Language Model)
5353

@@ -69,7 +69,7 @@ For more details about the supported models, please see the [Model Zoo](./MODEL_
6969

7070
## Roadmap
7171

72-
You can view our roadmap with features that are planned, started, and completed on the [Roadmap discussion](https://github.com/jina-ai/opengpt/discussions/categories/roadmap) category.
72+
You can view our roadmap with features that are planned, started, and completed on the [Roadmap discussion](https://github.com/jina-ai/rungpt/discussions/categories/roadmap) category.
7373

7474
## Get Started
7575

@@ -78,15 +78,15 @@ You can view our roadmap with features that are planned, started, and completed
7878
Install the package with `pip`:
7979

8080
```bash
81-
pip install open_gpt_torch
81+
pip install run_gpt_torch
8282
```
8383

8484
### Quickstart
8585

8686
```python
87-
import open_gpt
87+
import run_gpt
8888

89-
model = open_gpt.create_model(
89+
model = run_gpt.create_model(
9090
'stabilityai/stablelm-tuned-alpha-3b', device='cuda', precision='fp16'
9191
)
9292

@@ -117,7 +117,7 @@ We use the [stabilityai/stablelm-tuned-alpha-3b](https://huggingface.co/stabilit
117117
In most cases of large model serving, the model cannot fit into a single GPU. To solve this problem, we also provide a `device_map` option (supported by `accecleate` package) to automatically partition the model and distribute it across multiple GPUs:
118118

119119
```python
120-
model = open_gpt.create_model(
120+
model = run_gpt.create_model(
121121
'stabilityai/stablelm-tuned-alpha-3b', precision='fp16', device_map='balanced'
122122
)
123123
```
@@ -128,24 +128,24 @@ In the above example, `device_map="balanced"` evenly split the model on all avai
128128
> The `device_map` option is supported by the [accelerate](https://github.com/huggingface/accelerate) package.
129129
130130

131-
See [examples on how to use opengpt with different models.](./examples) 🔥
131+
See [examples on how to use rungpt with different models.](./examples) 🔥
132132

133133

134134
## Build a model serving in one line
135135

136136
To do so, you can use the `serve` command:
137137

138138
```bash
139-
opengpt serve stabilityai/stablelm-tuned-alpha-3b --precision fp16 --device_map balanced
139+
rungpt serve stabilityai/stablelm-tuned-alpha-3b --precision fp16 --device_map balanced
140140
```
141141

142-
💡 **Tip**: you can inspect the available options with `opengpt serve --help`.
142+
💡 **Tip**: you can inspect the available options with `rungpt serve --help`.
143143

144144
This will start a gRPC and HTTP server listening on port `51000` and `52000` respectively.
145145
Once the server is ready, as shown below:
146146
<details>
147147
<summary>Click to expand</summary>
148-
<img src="https://github.com/jina-ai/opengpt/blob/main/.github/images/serve_ready.png" width="600px">
148+
<img src="https://github.com/jina-ai/rungpt/blob/main/.github/images/serve_ready.png" width="600px">
149149
</details>
150150

151151
You can then send requests to the server:
@@ -173,7 +173,7 @@ response = requests.post(
173173
What's more, we also provide a [Python client](https://github.com/jina-ai/inference-client/) (`inference-client`) for you to easily interact with the server:
174174

175175
```python
176-
from open_gpt import Client
176+
from run_gpt import Client
177177

178178
client = Client()
179179

@@ -206,7 +206,7 @@ To do so, you can use `deploy` command:
206206
using predefined executor
207207

208208
```bash
209-
opengpt deploy stabilityai/stablelm-tuned-alpha-3b --precision fp16 --device_map balanced --cloud jina --replicas 1
209+
rungpt deploy stabilityai/stablelm-tuned-alpha-3b --precision fp16 --device_map balanced --cloud jina --replicas 1
210210
```
211211

212212
It will give you a HTTP url and a gRPC url by default:
@@ -226,4 +226,4 @@ We welcome contributions from the community! To contribute, please submit a pull
226226

227227
## License
228228

229-
OpenGPT is licensed under the Apache License, Version 2.0. See LICENSE for the full license text.
229+
Rungpt is licensed under the Apache License, Version 2.0. See LICENSE for the full license text.

docs/docs/index.md

+5-5
Original file line numberDiff line numberDiff line change
@@ -1,18 +1,18 @@
11
# Quick start
22

3-
`opengpt` is an open-source _cloud-native_ large-scale **_multimodal models_** (LMMs) serving framework.
3+
`rungpt` is an open-source _cloud-native_ large-scale **_multimodal models_** (LMMs) serving framework.
44
It is designed to simplify the deployment and management of large language models, on a distributed cluster of GPUs.
55
We aim to make it a one-stop solution for a centralized and accessible place to gather techniques for optimizing large-scale multimodal models and make them easy to use for everyone.
66

77

88
## Installation and setup
99

10-
To use `opengpt`, install it with `pip`:
10+
To use `rungpt`, install it with `pip`:
1111

1212
<div class="termy">
1313

1414
```shell
15-
$ pip install open_gpt_torch
15+
$ pip install run_gpt_torch
1616
```
1717

1818
</div>
@@ -25,9 +25,9 @@ We use the [stabilityai/stablelm-tuned-alpha-3b](https://huggingface.co/stabilit
2525
<div class="termy">
2626

2727
```python
28-
import open_gpt
28+
import run_gpt
2929

30-
model = open_gpt.create_model(
30+
model = run_gpt.create_model(
3131
'stabilityai/stablelm-tuned-alpha-3b', device='cuda', precision='fp16'
3232
)
3333

docs/overrides/home.html

+4-4
Original file line numberDiff line numberDiff line change
@@ -23,9 +23,9 @@
2323
<div class="tx-landing__hero">
2424
<div class="tx-landing__hero_text">
2525
<h1>Run LLM faster and easier. Use any cloud.</h1>
26-
<p>opengpt allows you to run your large-scale multi-modal models on any cloud, with a single command.</p>
26+
<p>rungpt allows you to run your large-scale multi-modal models on any cloud, with a single command.</p>
2727

28-
<p>opengpt is open-source, self-hosted, and supports all major cloud providers,
28+
<p>rungpt is open-source, self-hosted, and supports all major cloud providers,
2929
including AWS, GCP, and Azure.</p>
3030

3131
<a href="/docs" class="md-button md-button--primary">
@@ -158,13 +158,13 @@ <h2>Open-source and self-hosted
158158
d="M12 1.5A2.5 2.5 0 0 1 14.5 4 2.5 2.5 0 0 1 12 6.5 2.5 2.5 0 0 1 9.5 4 2.5 2.5 0 0 1 12 1.5M15.87 5C18 5 20 7 20 9c2.7 0 2.7 4 0 4H4c-2.7 0-2.7-4 0-4 0-2 2-4 4.13-4 .44 1.73 2.01 3 3.87 3 1.86 0 3.43-1.27 3.87-3M5 15h3l1 7H7l-2-7m5 0h4l-1 7h-2l-1-7m6 0h3l-2 7h-2l1-7Z"></path></svg></span>
159159
</h2>
160160
<p class="tx-landing__bottom_cta_text">
161-
Getting started with opengpt's <a href="https://github.com/jina-ai/opengpt">open source</a> tool is
161+
Getting started with rungpt's <a href="https://github.com/jina-ai/rungpt">open source</a> tool is
162162
just a mater of:
163163
</p>
164164
<div class="tx-landing__bottom_cta_terminal">
165165
<div class="tx-landing__bottom_cta_terminal_row">
166166
<span class="tx-landing__bottom_cta_terminal_row_prefix">$</span>
167-
pip install "open_gpt_torch"
167+
pip install "run_gpt_torch"
168168
</div>
169169
</div>
170170
<p class="tx-landing__bottom_cta_text">

docs/overrides/main.html

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,9 @@
11
{% extends "base.html" %}
22

33
{% block announce %}
4-
Like opengpt? Give us a <img alt="" style="display: inline-block; height: 1.125em; margin-bottom: -2px"
4+
Like rungpt? Give us a <img alt="" style="display: inline-block; height: 1.125em; margin-bottom: -2px"
55
src="https://cdnjs.cloudflare.com/ajax/libs/twemoji/14.0.2/svg/2b50.svg">
66
on <img style="display: inline-block; height: 1.15em; margin-bottom: -4px; margin-right: -2px"
7-
src="{{ 'assets/images/github-logo.png' | url }}"/> <a href="https://github.com/jina-ai/opengpt" target="_blank">
7+
src="{{ 'assets/images/github-logo.png' | url }}"/> <a href="https://github.com/jina-ai/rungpt" target="_blank">
88
GitHub</a><span style="color: var(--md-code-hl-punctuation-color)">!</span>
99
{% endblock %}

0 commit comments

Comments
 (0)