|
2 | 2 | import logging
|
3 | 3 | import os
|
4 | 4 | import openai
|
5 |
| -from langchain.prompts import PromptTemplate |
6 |
| -from langchain.llms import OpenAI |
7 |
| -from langchain.llms.openai import AzureOpenAI |
| 5 | +from langchain_core.prompts import PromptTemplate |
| 6 | +from langchain_openai import AzureChatOpenAI |
| 7 | +from azure.identity import DefaultAzureCredential |
8 | 8 |
|
9 | 9 | app = func.FunctionApp()
|
10 | 10 |
|
11 |
| -@app.function_name(name='ask') |
12 |
| -@app.route(route='ask', auth_level='anonymous', methods=['POST']) |
13 |
| -def main(req): |
14 | 11 |
|
15 |
| - prompt = req.params.get('prompt') |
16 |
| - if not prompt: |
17 |
| - try: |
18 |
| - req_body = req.get_json() |
19 |
| - except ValueError: |
20 |
| - raise RuntimeError("prompt data must be set in POST.") |
21 |
| - else: |
22 |
| - prompt = req_body.get('prompt') |
23 |
| - if not prompt: |
24 |
| - raise RuntimeError("prompt data must be set in POST.") |
| 12 | +# Initializes Azure OpenAI environment |
| 13 | +def init(): |
| 14 | + global credential |
| 15 | + global AZURE_OPENAI_ENDPOINT |
| 16 | + global AZURE_OPENAI_KEY |
| 17 | + global AZURE_OPENAI_CHATGPT_DEPLOYMENT |
| 18 | + global OPENAI_API_VERSION |
| 19 | + |
| 20 | + # Use the Entra Id DefaultAzureCredential to get the token |
| 21 | + credential = DefaultAzureCredential() |
| 22 | + # Set the API type to `azure_ad` |
| 23 | + os.environ["OPENAI_API_TYPE"] = "azure_ad" |
| 24 | + # Set the API_KEY to the token from the Azure credential |
| 25 | + os.environ["OPENAI_API_KEY"] = credential.get_token( |
| 26 | + "https://cognitiveservices.azure.com/.default" |
| 27 | + ).token |
25 | 28 |
|
26 |
| - # init OpenAI: Replace these with your own values, either in environment variables or directly here |
27 |
| - USE_LANGCHAIN = os.getenv("USE_LANGCHAIN", 'True').lower() in ('true', '1', 't') |
28 |
| - AZURE_OPENAI_KEY = os.environ.get("AZURE_OPENAI_KEY") |
| 29 | + # Initialize Azure OpenAI environment |
29 | 30 | AZURE_OPENAI_ENDPOINT = os.environ.get("AZURE_OPENAI_ENDPOINT")
|
30 |
| - AZURE_OPENAI_SERVICE = os.environ.get("AZURE_OPENAI_SERVICE") or "myopenai" |
31 |
| - AZURE_OPENAI_GPT_DEPLOYMENT = os.environ.get("AZURE_OPENAI_GPT_DEPLOYMENT") or "davinci" |
32 |
| - AZURE_OPENAI_CHATGPT_DEPLOYMENT = os.environ.get("AZURE_OPENAI_CHATGPT_DEPLOYMENT") or "chat" #GPT turbo |
33 |
| - if 'AZURE_OPENAI_KEY' not in os.environ: |
34 |
| - raise RuntimeError("No 'AZURE_OPENAI_KEY' env var set. Please see Readme.") |
| 31 | + AZURE_OPENAI_KEY = credential.get_token( |
| 32 | + "https://cognitiveservices.azure.com/.default" |
| 33 | + ).token |
| 34 | + AZURE_OPENAI_CHATGPT_DEPLOYMENT = os.environ.get( |
| 35 | + "AZURE_OPENAI_CHATGPT_DEPLOYMENT") or "chat" |
| 36 | + OPENAI_API_VERSION = os.environ.get( |
| 37 | + "OPENAI_API_VERSION") or "2023-05-15" |
35 | 38 |
|
36 |
| - # configure azure openai for langchain and/or llm |
| 39 | + # Configure base OpenAI framework for LangChain and/or llm |
37 | 40 | openai.api_key = AZURE_OPENAI_KEY
|
38 |
| - openai.api_base = AZURE_OPENAI_ENDPOINT # your endpoint should look like the following https://YOUR_RESOURCE_NAME.openai.azure.com/ |
39 |
| - openai.api_type = 'azure' |
40 |
| - openai.api_version = '2023-05-15' # this may change in the future |
41 |
| - # for langchain, set this version in environment variables using OPENAI_API_VERSION |
| 41 | + openai.api_base = AZURE_OPENAI_ENDPOINT |
| 42 | + openai.api_type = "azure" |
| 43 | + openai.api_version = OPENAI_API_VERSION |
42 | 44 |
|
43 |
| - if bool(USE_LANGCHAIN): |
44 |
| - logging.info('Using Langchain') |
45 | 45 |
|
46 |
| - llm = AzureOpenAI(deployment_name=AZURE_OPENAI_CHATGPT_DEPLOYMENT, temperature=0.3, openai_api_key=AZURE_OPENAI_KEY) |
47 |
| - llm_prompt = PromptTemplate( |
48 |
| - input_variables=["human_prompt"], |
49 |
| - template="The following is a conversation with an AI assistant. The assistant is helpful.\n\nAI: I am an AI created by OpenAI. How can I help you today?\nHuman: {human_prompt}?", |
50 |
| - ) |
51 |
| - from langchain.chains import LLMChain |
52 |
| - chain = LLMChain(llm=llm, prompt=llm_prompt) |
53 |
| - return chain.run(prompt) |
54 |
| - |
55 |
| - else: |
56 |
| - logging.info('Using ChatGPT LLM directly') |
| 46 | +# Initialize Azure OpenAI environment |
| 47 | +init() |
57 | 48 |
|
58 |
| - completion = openai.Completion.create( |
59 |
| - engine=AZURE_OPENAI_CHATGPT_DEPLOYMENT, |
60 |
| - prompt=generate_prompt(prompt), |
61 |
| - temperature=0.3, |
62 |
| - max_tokens=200 |
63 |
| - ) |
64 |
| - return completion.choices[0].text |
65 | 49 |
|
| 50 | +# Function App entry point route for /api/ask |
| 51 | +@app.function_name(name="ask") |
| 52 | +@app.route(route="ask", auth_level="function", methods=["POST"]) |
| 53 | +def main(req): |
66 | 54 |
|
67 |
| -def generate_prompt(prompt): |
68 |
| - capitalized_prompt = prompt.capitalize() |
| 55 | + try: |
| 56 | + req_body = req.get_json() |
| 57 | + prompt = req_body.get("prompt") |
| 58 | + except ValueError: |
| 59 | + raise RuntimeError("prompt data must be set in POST.") |
| 60 | + else: |
| 61 | + if not prompt: |
| 62 | + raise RuntimeError("prompt data must be set in POST.") |
69 | 63 |
|
70 |
| - # Chat |
71 |
| - return f'The following is a conversation with an AI assistant. The assistant is helpful, creative, clever, and very friendly.\n\nHuman: Hello, who are you?\nAI: I am an AI created by OpenAI. How can I help you today?\nHuman: {capitalized_prompt}' |
| 64 | + # LangChain user code goes here |
| 65 | + llm = AzureChatOpenAI( |
| 66 | + deployment_name=AZURE_OPENAI_CHATGPT_DEPLOYMENT, |
| 67 | + temperature=0.3 |
| 68 | + ) |
| 69 | + llm_prompt = PromptTemplate.from_template( |
| 70 | + "The following is a conversation with an AI assistant. " + |
| 71 | + "The assistant is helpful.\n\n" + |
| 72 | + "A:How can I help you today?\n" + |
| 73 | + "Human: {human_prompt}?" |
| 74 | + ) |
| 75 | + formatted_prompt = llm_prompt.format(human_prompt=prompt) |
72 | 76 |
|
73 |
| - # Classification |
74 |
| - #return 'The following is a list of companies and the categories they fall into:\n\nApple, Facebook, Fedex\n\nApple\nCategory: ' |
| 77 | + response = llm.invoke(formatted_prompt) |
| 78 | + logging.info(response.content) |
75 | 79 |
|
76 |
| - # Natural language to Python |
77 |
| - #return '\"\"\"\n1. Create a list of first names\n2. Create a list of last names\n3. Combine them randomly into a list of 100 full names\n\"\"\"' |
| 80 | + return func.HttpResponse(response.content) |
0 commit comments