Skip to content

Commit 62b73b9

Browse files
authored
Add ChatCompletions and Audio endpoints (#237)
1 parent 62ebb44 commit 62b73b9

14 files changed

+507
-35
lines changed

README.md

+38-14
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ pip install openai[datalib]
4545

4646
## Usage
4747

48-
The library needs to be configured with your account's secret key which is available on the [website](https://beta.openai.com/account/api-keys). Either set it as the `OPENAI_API_KEY` environment variable before using the library:
48+
The library needs to be configured with your account's secret key which is available on the [website](https://platform.openai.com/account/api-keys). Either set it as the `OPENAI_API_KEY` environment variable before using the library:
4949
5050
```bash
5151
export OPENAI_API_KEY='sk-...'
@@ -57,14 +57,14 @@ Or set `openai.api_key` to its value:
5757
import openai
5858
openai.api_key = "sk-..."
5959
60-
# list engines
61-
engines = openai.Engine.list()
60+
# list models
61+
models = openai.Model.list()
6262
63-
# print the first engine's id
64-
print(engines.data[0].id)
63+
# print the first model's id
64+
print(models.data[0].id)
6565

6666
# create a completion
67-
completion = openai.Completion.create(engine="ada", prompt="Hello world")
67+
completion = openai.Completion.create(model="ada", prompt="Hello world")
6868

6969
# print the completion
7070
print(completion.choices[0].text)
@@ -127,11 +127,14 @@ which makes it easy to interact with the API from your terminal. Run
127127
`openai api -h` for usage.
128128

129129
```sh
130-
# list engines
131-
openai api engines.list
130+
# list models
131+
openai api models.list
132132

133133
# create a completion
134-
openai api completions.create -e ada -p "Hello world"
134+
openai api completions.create -m ada -p "Hello world"
135+
136+
# create a chat completion
137+
openai api chat_completions.create -m gpt-3.5-turbo -g user "Hello world"
135138

136139
# generate images via DALL·E API
137140
openai api image.create -p "two dogs playing chess, cartoon" -n 1
@@ -152,6 +155,18 @@ Examples of how to use this Python library to accomplish various tasks can be fo
152155

153156
Prior to July 2022, this OpenAI Python library hosted code examples in its examples folder, but since then all examples have been migrated to the [OpenAI Cookbook](https://github.com/openai/openai-cookbook/).
154157

158+
### Chat
159+
160+
Conversational models such as `gpt-3.5-turbo` can be called using the chat completions endpoint.
161+
162+
```python
163+
import openai
164+
openai.api_key = "sk-..." # supply your API key however you choose
165+
166+
completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hello world!"}])
167+
print(completion.choices[0].message.content)
168+
```
169+
155170
### Embeddings
156171

157172
In the OpenAI Python library, an embedding represents a text string as a fixed-length vector of floating point numbers. Embeddings are designed to measure the similarity or relevance between text strings.
@@ -169,7 +184,7 @@ text_string = "sample text"
169184
model_id = "text-similarity-davinci-001"
170185

171186
# compute the embedding of the text
172-
embedding = openai.Embedding.create(input=text_string, engine=model_id)['data'][0]['embedding']
187+
embedding = openai.Embedding.create(input=text_string, model=model_id)['data'][0]['embedding']
173188
```
174189

175190
An example of how to call the embeddings method is shown in this [get embeddings notebook](https://github.com/openai/openai-cookbook/blob/main/examples/Get_embeddings.ipynb).
@@ -208,7 +223,7 @@ For more information on fine-tuning, read the [fine-tuning guide](https://beta.o
208223

209224
### Moderation
210225

211-
OpenAI provides a Moderation endpoint that can be used to check whether content complies with the OpenAI [content policy](https://beta.openai.com/docs/usage-policies)
226+
OpenAI provides a Moderation endpoint that can be used to check whether content complies with the OpenAI [content policy](https://platform.openai.com/docs/usage-policies)
212227

213228
```python
214229
import openai
@@ -217,7 +232,7 @@ openai.api_key = "sk-..." # supply your API key however you choose
217232
moderation_resp = openai.Moderation.create(input="Here is some perfectly innocuous text that follows all OpenAI content policies.")
218233
```
219234

220-
See the [moderation guide](https://beta.openai.com/docs/guides/moderation) for more details.
235+
See the [moderation guide](https://platform.openai.com/docs/guides/moderation) for more details.
221236

222237
## Image generation (DALL·E)
223238

@@ -229,6 +244,15 @@ image_resp = openai.Image.create(prompt="two dogs playing chess, oil painting",
229244

230245
```
231246

247+
## Audio transcription (Whisper)
248+
```python
249+
import openai
250+
openai.api_key = "sk-..." # supply your API key however you choose
251+
f = open("path/to/file.mp3", "rb")
252+
transcript = openai.Audio.transcribe("whisper-1", f)
253+
254+
```
255+
232256
## Async API
233257

234258
Async support is available in the API by prepending `a` to a network-bound method:
@@ -238,7 +262,7 @@ import openai
238262
openai.api_key = "sk-..." # supply your API key however you choose
239263

240264
async def create_completion():
241-
completion_resp = await openai.Completion.acreate(prompt="This is a test", engine="davinci")
265+
completion_resp = await openai.Completion.acreate(prompt="This is a test", model="davinci")
242266

243267
```
244268

@@ -255,7 +279,7 @@ openai.aiosession.set(ClientSession())
255279
await openai.aiosession.get().close()
256280
```
257281

258-
See the [usage guide](https://beta.openai.com/docs/guides/images) for more details.
282+
See the [usage guide](https://platform.openai.com/docs/guides/images) for more details.
259283

260284
## Requirements
261285

openai/__init__.py

+5-1
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,8 @@
77
from typing import Optional, TYPE_CHECKING
88

99
from openai.api_resources import (
10+
Audio,
11+
ChatCompletion,
1012
Completion,
1113
Customer,
1214
Edit,
@@ -52,6 +54,8 @@
5254

5355
__all__ = [
5456
"APIError",
57+
"Audio",
58+
"ChatCompletion",
5559
"Completion",
5660
"Customer",
5761
"Edit",
@@ -74,7 +78,7 @@
7478
"app_info",
7579
"ca_bundle_path",
7680
"debug",
77-
"enable_elemetry",
81+
"enable_telemetry",
7882
"log",
7983
"organization",
8084
"proxy",

openai/api_requestor.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -476,8 +476,8 @@ def _prepare_request_raw(
476476
abs_url = _build_api_url(abs_url, encoded_params)
477477
elif method in {"post", "put"}:
478478
if params and files:
479-
raise ValueError("At most one of params and files may be specified.")
480-
if params:
479+
data = params
480+
if params and not files:
481481
data = json.dumps(params).encode()
482482
headers["Content-Type"] = "application/json"
483483
else:

openai/api_resources/__init__.py

+2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
from openai.api_resources.audio import Audio # noqa: F401
2+
from openai.api_resources.chat_completion import ChatCompletion # noqa: F401
13
from openai.api_resources.completion import Completion # noqa: F401
24
from openai.api_resources.customer import Customer # noqa: F401
35
from openai.api_resources.deployment import Deployment # noqa: F401

openai/api_resources/audio.py

+205
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,205 @@
1+
from typing import Any, List
2+
3+
import openai
4+
from openai import api_requestor, util
5+
from openai.api_resources.abstract import APIResource
6+
7+
8+
class Audio(APIResource):
9+
OBJECT_NAME = "audio"
10+
11+
@classmethod
12+
def _get_url(cls, action):
13+
return cls.class_url() + f"/{action}"
14+
15+
@classmethod
16+
def _prepare_request(
17+
cls,
18+
file,
19+
filename,
20+
model,
21+
api_key=None,
22+
api_base=None,
23+
api_type=None,
24+
api_version=None,
25+
organization=None,
26+
**params,
27+
):
28+
requestor = api_requestor.APIRequestor(
29+
api_key,
30+
api_base=api_base or openai.api_base,
31+
api_type=api_type,
32+
api_version=api_version,
33+
organization=organization,
34+
)
35+
files: List[Any] = []
36+
data = {
37+
"model": model,
38+
**params,
39+
}
40+
files.append(("file", (filename, file, "application/octet-stream")))
41+
return requestor, files, data
42+
43+
@classmethod
44+
def transcribe(
45+
cls,
46+
model,
47+
file,
48+
api_key=None,
49+
api_base=None,
50+
api_type=None,
51+
api_version=None,
52+
organization=None,
53+
**params,
54+
):
55+
requestor, files, data = cls._prepare_request(file, file.name, model, **params)
56+
url = cls._get_url("transcriptions")
57+
response, _, api_key = requestor.request("post", url, files=files, params=data)
58+
return util.convert_to_openai_object(
59+
response, api_key, api_version, organization
60+
)
61+
62+
@classmethod
63+
def translate(
64+
cls,
65+
model,
66+
file,
67+
api_key=None,
68+
api_base=None,
69+
api_type=None,
70+
api_version=None,
71+
organization=None,
72+
**params,
73+
):
74+
requestor, files, data = cls._prepare_request(file, file.name, model, **params)
75+
url = cls._get_url("translations")
76+
response, _, api_key = requestor.request("post", url, files=files, params=data)
77+
return util.convert_to_openai_object(
78+
response, api_key, api_version, organization
79+
)
80+
81+
@classmethod
82+
def transcribe_raw(
83+
cls,
84+
model,
85+
file,
86+
filename,
87+
api_key=None,
88+
api_base=None,
89+
api_type=None,
90+
api_version=None,
91+
organization=None,
92+
**params,
93+
):
94+
requestor, files, data = cls._prepare_request(file, filename, model, **params)
95+
url = cls._get_url("transcriptions")
96+
response, _, api_key = requestor.request("post", url, files=files, params=data)
97+
return util.convert_to_openai_object(
98+
response, api_key, api_version, organization
99+
)
100+
101+
@classmethod
102+
def translate_raw(
103+
cls,
104+
model,
105+
file,
106+
filename,
107+
api_key=None,
108+
api_base=None,
109+
api_type=None,
110+
api_version=None,
111+
organization=None,
112+
**params,
113+
):
114+
requestor, files, data = cls._prepare_request(file, filename, model, **params)
115+
url = cls._get_url("translations")
116+
response, _, api_key = requestor.request("post", url, files=files, params=data)
117+
return util.convert_to_openai_object(
118+
response, api_key, api_version, organization
119+
)
120+
121+
@classmethod
122+
async def atranscribe(
123+
cls,
124+
model,
125+
file,
126+
api_key=None,
127+
api_base=None,
128+
api_type=None,
129+
api_version=None,
130+
organization=None,
131+
**params,
132+
):
133+
requestor, files, data = cls._prepare_request(file, file.name, model, **params)
134+
url = cls._get_url("transcriptions")
135+
response, _, api_key = await requestor.arequest(
136+
"post", url, files=files, params=data
137+
)
138+
return util.convert_to_openai_object(
139+
response, api_key, api_version, organization
140+
)
141+
142+
@classmethod
143+
async def atranslate(
144+
cls,
145+
model,
146+
file,
147+
api_key=None,
148+
api_base=None,
149+
api_type=None,
150+
api_version=None,
151+
organization=None,
152+
**params,
153+
):
154+
requestor, files, data = cls._prepare_request(file, file.name, model, **params)
155+
url = cls._get_url("translations")
156+
response, _, api_key = await requestor.arequest(
157+
"post", url, files=files, params=data
158+
)
159+
return util.convert_to_openai_object(
160+
response, api_key, api_version, organization
161+
)
162+
163+
@classmethod
164+
async def atranscribe_raw(
165+
cls,
166+
model,
167+
file,
168+
filename,
169+
api_key=None,
170+
api_base=None,
171+
api_type=None,
172+
api_version=None,
173+
organization=None,
174+
**params,
175+
):
176+
requestor, files, data = cls._prepare_request(file, filename, model, **params)
177+
url = cls._get_url("transcriptions")
178+
response, _, api_key = await requestor.arequest(
179+
"post", url, files=files, params=data
180+
)
181+
return util.convert_to_openai_object(
182+
response, api_key, api_version, organization
183+
)
184+
185+
@classmethod
186+
async def atranslate_raw(
187+
cls,
188+
model,
189+
file,
190+
filename,
191+
api_key=None,
192+
api_base=None,
193+
api_type=None,
194+
api_version=None,
195+
organization=None,
196+
**params,
197+
):
198+
requestor, files, data = cls._prepare_request(file, filename, model, **params)
199+
url = cls._get_url("translations")
200+
response, _, api_key = await requestor.arequest(
201+
"post", url, files=files, params=data
202+
)
203+
return util.convert_to_openai_object(
204+
response, api_key, api_version, organization
205+
)

0 commit comments

Comments
 (0)