diff --git a/MANIFEST.in b/MANIFEST.in index 2bf8bfe27..ddf3a5524 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -7,6 +7,7 @@ include dff/context_storages/protocols.json exclude makefile recursive-exclude tests * +recursive-exclude examples * recursive-exclude tutorials * recursive-exclude * __pycache__ recursive-exclude * *.py[co] diff --git a/docs/source/examples.rst b/docs/source/examples.rst index 6c5854dbc..0a688c067 100644 --- a/docs/source/examples.rst +++ b/docs/source/examples.rst @@ -1,4 +1,21 @@ Examples -------- -Examples are available in this `repository `_. +:doc:`FAQ bot <./examples/faq_bot>` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +FAQ bot for Deeppavlov users built using `DFF`. +Can be run with Telegram or with a web interface. + +:doc:`Customer service bot <./examples/customer_service_bot>` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Customer service bot built using `DFF`. +This bot is designed to answer any type of user questions in a limited business domain (book shop). +Uses a Telegram interface. + +.. toctree:: + :hidden: + + examples/faq_bot + examples/customer_service_bot diff --git a/docs/source/examples/customer_service_bot.rst b/docs/source/examples/customer_service_bot.rst new file mode 100644 index 000000000..76ea8ecc1 --- /dev/null +++ b/docs/source/examples/customer_service_bot.rst @@ -0,0 +1,67 @@ +Customer service bot +-------------------- + +Customer service bot built using `DFF`. +This bot is designed to answer any type of user questions in a limited business domain (book shop). +Uses a Telegram interface. + +You can read more about deploying the project in its README file. + +Project structure +~~~~~~~~~~~~~~~~~ + +While DFF allows you to choose any structure for your own projects, +we propose a schema of how project files can be meaningfully split +into services and modules. + +* In our projects, we go for docker-based deployment due to its scalability and universal + applicability. If you decide to go for the same deployment scheme, you will always + have at least one service that wraps your bot. + +* Neural network models that you run locally can be factored out into a separate service. + This way your main service, i.e. the service wrapping the bot, won't crash if something + unexpected happens with the model. + +* In the main service directory, we make a separate package for all DFF-related abstractions. + There, we put the script into a separate module, also creating modules for + `processing, condition, and response functions <../user_guides/basic_conceptions#>`__. + +* The rest of the project-related Python code is factored out into other packages. + +* We also create 'run.py' and 'test.py' at the project root. These files import the ready pipeline + and execute it to test or run the service. + +.. code-block:: shell + + examples/customer_service_bot/ + ├── docker-compose.yml # docker-compose orchestrates the services + ├── bot # main docker service + │ ├── api + │ │ ├── __init__.py + │ │ ├── chatgpt.py + │ │ └── intent_catcher.py + │ ├── dialog_graph # Separate package for DFF-related abstractions + │ │ ├── __init__.py + │ │ ├── conditions.py # Condition callbacks + │ │ ├── consts.py # Constant values for keys + │ │ ├── processing.py # Processing callbacks + │ │ ├── response.py # Response callbacks + │ │ └── script.py # DFF script and pipeline are constructed here + │ ├── dockerfile # The dockerfile takes care of setting up the project. See the file for more details + │ ├── requirements.txt + │ ├── run.py + │ └── test.py + └── intent_catcher # intent catching model wrapped as a docker service + ├── dockerfile + ├── requirements.txt + ├── server.py + └── test_server.py + +Models +~~~~~~ + +Two differently designed models are used to power the customer service bot: an intent classifier and a generative model. +The classifier is being deployed as a separate service while ChatGPT is being interacted with via API. + +* [DeepPavlov Intent Catcher](https://docs.deeppavlov.ai/en/0.14.1/features/models/intent_catcher.html) is used for intent retrieval. +* [ChatGPT](https://openai.com/pricing#language-models) is used for context based question answering. \ No newline at end of file diff --git a/docs/source/examples/faq_bot.rst b/docs/source/examples/faq_bot.rst new file mode 100644 index 000000000..8631cc89d --- /dev/null +++ b/docs/source/examples/faq_bot.rst @@ -0,0 +1,61 @@ +FAQ Bot +------- + +FAQ bot for Deeppavlov users built using `DFF`. +Can be run with Telegram or with a web interface. + +You can read more about deploying the project in its README file. + +Project structure +~~~~~~~~~~~~~~~~~ + +* In our projects, we go for docker-based deployment due to its scalability and universal + applicability. If you decide to go for the same deployment scheme, you will always + have at least one service that wraps your bot. + +* In the main service directory, we make a separate package for all DFF-related abstractions. + There, we put the `script <#>`__ into a separate module, also creating modules for + `condition and response functions <#>`__. + +* We also create a separate package for `pipeline services <#>`__. + +* The rest of the project-related Python code is factored out into other packages. + + +.. code-block:: shell + + examples/frequently_asked_question_bot/ + ├── README.md + ├── compose.yml # docker compose file orchestrates the services + ├── nginx.conf # web service proxy configurations + └── web + ├── Dockerfile + ├── app.py + ├── bot + │ ├── dialog_graph # A separate module for DFF-related abstractions + │ │ ├── responses.py + │ │ └── script.py # DFF script is constructed here + │ ├── faq_model # model-related code + │ │ ├── faq_dataset_sample.json + │ │ ├── model.py + │ │ ├── request_translations.json + │ │ └── response_translations.json + │ ├── pipeline.py + │ ├── pipeline_services # Separately stored pipeline service functions + │ │ └── pre_services.py + │ └── test.py + ├── requirements.txt + └── static + ├── LICENSE.txt + ├── index.css + ├── index.html + └── index.js + +Models +~~~~~~~ + +The project makes use of the `clips/mfaq `__ model that powers the bot's ability to understand queries in multiple languages. +A number of techniques is employed to make the usage more efficient. + +* The project's Dockerfile illustrates caching a model using SentenceTransformer in a Docker container. + The model is constructed during image build, so that the weights that the Huggingface library fetches from the web are downloaded in advance. At runtime, the fetched weights will be quickly read from the disk. diff --git a/examples/README.md b/examples/README.md new file mode 100644 index 000000000..604c59de0 --- /dev/null +++ b/examples/README.md @@ -0,0 +1,5 @@ +# DFF example projects + +This directory contains examples of bots built using [DFF](https://github.com/deeppavlov/dialog_flow_framework) (Dialog Flow Framework). + +The example projects include a FAQ bot for potential Linux users and a customer service bot for a book shop. Both bots use Telegram as an interface. diff --git a/examples/customer_service_bot/.env b/examples/customer_service_bot/.env new file mode 100644 index 000000000..9830af9e2 --- /dev/null +++ b/examples/customer_service_bot/.env @@ -0,0 +1,2 @@ +TG_BOT_TOKEN=bot_token +OPENAI_API_TOKEN=openai_api_token \ No newline at end of file diff --git a/examples/customer_service_bot/README.md b/examples/customer_service_bot/README.md new file mode 100644 index 000000000..ab2310f30 --- /dev/null +++ b/examples/customer_service_bot/README.md @@ -0,0 +1,67 @@ +## Description + +### Customer service bot + +Customer service bot built using `DFF`. Uses Telegram as an interface. +This bot is designed to answer any type of user questions in a limited business domain (book shop). + +* [DeepPavlov Intent Catcher](https://docs.deeppavlov.ai/en/0.14.1/features/models/intent_catcher.html) is used for intent retrieval. +* [ChatGPT](https://openai.com/pricing#language-models) is used for context based question answering. + +### Intent Catcher + +Intent catcher is a DistilBERT-based classifier for user intent classes. +We use the DeepPavlov library for a seamless training and inference experience. +Sample code for training the model can be found in `Training_intent_catcher.ipynb`. +The model is deployed as a separate microservice running at port 4999. + +The bot interacts with the container via `/respond` endpoint. +The API expects a json object with the dialog history passed as an array and labeled 'dialog_contexts'. Intents will be extracted from the last utterance. + +```json +{ + "dialog_contexts": ["phrase_1", "phrase_2"] +} +``` + +The API responds with a nested array containing `label - score` pairs. + +```json +[["no",0.3393537402153015]] +``` + +Run the intent catcher: +```commandline +docker compose up --build --abort-on-container-exit --exit-code-from intent_client +``` + +## Running the bot + +### Step 1: Configuring the docker services +To interact with external APIs, the bot requires API tokens that can be set through the [.env](.env) file. Update it replacing the placeholders with actual token values. +``` +TG_BOT_TOKEN=*** +OPENAI_API_TOKEN=*** +``` + +### Step 2: Launching the project +*The commands below need to be run from the /examples/customer_service_bot directory* + +Building the bot and launching it in the background can be done with a single command given that the environment variables have been configured correctly. Then you can immediately interact with your bot in Telegram. +```commandline +docker-compose up -d +``` + +If any of the source files have received updates, you can rebuild and sync the bot using the docker-compose build command. +```commandline +docker compose build +``` +In case of bugs, you can test whether the bot correctly handles basic functionality using the following command: +```commandline +docker compose run assistant pytest test.py +``` + +The bot can also be run as a self-standing service, i.e. without the intent catcher for a less resource-demanding workflow: +```commandline +docker compose run assistant python run.py +``` diff --git a/examples/customer_service_bot/Training_intent_catcher.ipynb b/examples/customer_service_bot/Training_intent_catcher.ipynb new file mode 100644 index 000000000..2b8ded7a8 --- /dev/null +++ b/examples/customer_service_bot/Training_intent_catcher.ipynb @@ -0,0 +1,184 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "provenance": [], + "mount_file_id": "1K8dSq-mrFOR44N6CwDp8WiqVDHdtDHJQ", + "authorship_tag": "ABX9TyP5keJL46m+Vgb5Qj+tw1SA", + "include_colab_link": true + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + }, + "language_info": { + "name": "python" + }, + "accelerator": "GPU", + "gpuClass": "standard" + }, + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "view-in-github", + "colab_type": "text" + }, + "source": [ + "\"Open" + ] + }, + { + "cell_type": "code", + "source": [ + "!pip install --upgrade pip" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "rY6UASGLpmt6", + "outputId": "f44435da-8658-43f2-f56d-e8987663663c" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n", + "Requirement already satisfied: pip in /usr/local/lib/python3.10/dist-packages (23.1.2)\n" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "!pip install deeppavlov" + ], + "metadata": { + "id": "RboxW9XRp57X" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "!curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y\n", + "# The required version of 'tokenizers' library depends on a Rust compiler." + ], + "metadata": { + "id": "BfpE0tExLbN2" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "!export PATH=\"/$HOME/.cargo/bin:${PATH}\" && pip install 'tokenizers==0.10.3'\n", + "# Before installing 'tokenizers', we ensure system-wide Rust compiler availability." + ], + "metadata": { + "id": "aDJWGvk0tU1-" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Fl7obdeKFomg" + }, + "outputs": [], + "source": [ + "!git clone https://github.com/deeppavlov/dream.git" + ] + }, + { + "cell_type": "code", + "source": [ + "!pip install 'xeger==0.3.5'\n", + "!pip install 'transformers==4.6.0'" + ], + "metadata": { + "id": "Gl9xIpKFqiLs" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "# In order to train the model with custom classes, we need to modify the 'intent_phrases.json' file.\n", + "# Each intent in the json structure includes a 'phrases' section.\n", + "# Regular expressions from that section will be used to generate the data used during training.\n", + "import json\n", + "INTENT_PHRASES = './dream/annotators/IntentCatcherTransformers/intent_phrases.json'\n", + "\n", + "with open(INTENT_PHRASES, 'r') as file:\n", + " intents = json.load(file)\n", + "\n", + "intents['purchase'] = {\n", + " \"phrases\": [\n", + " \"i think i'll ((order)|(purchase)|(buy)) a book\",\n", + " \"i plan on ((buying)|(purchasing)|(ordering)) a book\",\n", + " \"i would ((love)|(like)) to ((order)|(purchase)|(buy)) a book\",\n", + " \"i'm interested in ((buying)|(purchasing)|(ordering)) a book\",\n", + " \"do you have this book in stock\",\n", + " \"i'm looking to ((order)|(purchase)|(buy)) a book\",\n", + " \"add this to my cart\",\n", + " \"i want to make an order\"\n", + " ],\n", + " \"reg_phrases\": [\n", + " \"i want to buy a book\",\n", + " \"order an item\",\n", + " \"order a book\"\n", + " ],\n", + " \"min_precision\": 0.94,\n", + " \"punctuation\": [\n", + " \".\",\n", + " \"?\"\n", + " ]\n", + "}\n", + "\n", + "with open(INTENT_PHRASES, 'w') as file:\n", + " json.dump(itents, file)" + ], + "metadata": { + "id": "d26Ko8xFF6sH" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "!cd /content/dream/annotators/IntentCatcherTransformers/ && export CUDA_VISIBLE_DEVICES=0 && python -m deeppavlov train intents_model_dp_config.json\n", + "# CUDA_VISIBLE_DEVICES variable is required for GPU-powered training with DeepPavlov." + ], + "metadata": { + "id": "lOmGOt6Wllly" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "!cp /root/.deeppavlov/models/classifiers/intents_model_v2/model.pth.tar /content/drive/MyDrive/\n", + "!cp /root/.deeppavlov/models/classifiers/intents_model_v2/classes.dict /content/drive/MyDrive/\n", + "# Weights and metadata produced during training can be copied to mounted Google drive." + ], + "metadata": { + "id": "YUeJ67-CeuX5" + }, + "execution_count": null, + "outputs": [] + } + ] +} diff --git a/examples/customer_service_bot/bot/api/__init__.py b/examples/customer_service_bot/bot/api/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/examples/customer_service_bot/bot/api/chatgpt.py b/examples/customer_service_bot/bot/api/chatgpt.py new file mode 100644 index 000000000..039192ec9 --- /dev/null +++ b/examples/customer_service_bot/bot/api/chatgpt.py @@ -0,0 +1,77 @@ +""" +ChatGPT +------- +This module defines functions for OpenAI API interaction. +""" +import os +import openai + +CHATGPT_MAIN_PROMPT = """ +Pretend that you are a member of staff for a book shop "Book Lovers Paradise". +Located at 123 Main Street. +Open seven days a week, from 9 AM to 9 PM. +Extensive collection of genres, including fiction, and non-fiction. +Knowledgeable staff. Online catalogue for easy browsing and ordering. +Comfortable seating areas and peaceful atmosphere. +Refund policy within 30 days of purchase. +Loyalty program for frequent customers (10% off purchases). +As a member of staff, you have access to information +outside of this description, e.g. you can make it up. +Don't mention your role in your replies, so that they seem natural. +""" + +CHATGPT_QUESTION_PROMPT = """ +What follows is a user query: answer it if you can do so +as a book shop's member of staff. If the query is unrelated to +your role, deny it. +""" + +CHATGPT_COHERENCE_PROMPT = """ +What follows is a question and an answer. Just write 'true' if the answer was satisfactory or 'false' otherwise. +""" + +openai.api_key = os.getenv("OPENAI_API_TOKEN") + + +def get_output_factory(): + """ + Construct a get_output function encapsulating the execution counter. + The function prompts ChatGPT for generated output. + The main prompt is only included + on the first invocation of the function. + """ + + def get_output_inner(request: str) -> str: + messages = [ + {"role": "system", "content": CHATGPT_MAIN_PROMPT}, + {"role": "system", "content": CHATGPT_QUESTION_PROMPT}, + {"role": "user", "content": request}, + ] # temporary fix until a better solution is found + get_output_inner.num_calls += 1 + response = openai.ChatCompletion.create( + model="gpt-3.5-turbo", + messages=messages, + ) + return response["choices"][0]["message"]["content"] + + get_output_inner.num_calls = 0 + return get_output_inner + + +def get_coherence(request: str, response: str) -> str: + """ + Prompt ChatGPT to evaluate the coherence of a request + response pair. + """ + response = openai.ChatCompletion.create( + model="gpt-3.5-turbo", + messages=[ + {"role": "system", "content": CHATGPT_COHERENCE_PROMPT}, + {"role": "user", "content": request}, + {"role": "assistant", "content": response}, + ], + ) + return response["choices"][0]["message"]["content"] + + +get_output = get_output_factory() diff --git a/examples/customer_service_bot/bot/api/intent_catcher.py b/examples/customer_service_bot/bot/api/intent_catcher.py new file mode 100644 index 000000000..5a02e2e1d --- /dev/null +++ b/examples/customer_service_bot/bot/api/intent_catcher.py @@ -0,0 +1,27 @@ +""" +Intent Catcher +---- +This module includes queries to a local intent catcher service. +""" +import requests +from dff.script import Message + + +INTENT_CATCHER_SERVICE = "http://localhost:4999/respond" + + +def get_intents(request: Message): + """ + Query the local intent catcher service extracting intents from the + last user utterance. + """ + if not request.text: + return [] + request_body = {"dialog_contexts": [request.text]} + try: + response = requests.post(INTENT_CATCHER_SERVICE, json=request_body) + except requests.RequestException: + response = None + if response and response.status_code == 200: + return [response.json()[0][0]] + return [] diff --git a/examples/customer_service_bot/bot/dialog_graph/__init__.py b/examples/customer_service_bot/bot/dialog_graph/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/examples/customer_service_bot/bot/dialog_graph/conditions.py b/examples/customer_service_bot/bot/dialog_graph/conditions.py new file mode 100644 index 000000000..bde5b1942 --- /dev/null +++ b/examples/customer_service_bot/bot/dialog_graph/conditions.py @@ -0,0 +1,39 @@ +""" +Conditions +----------- +This module defines transition conditions. +""" +from typing import Callable + +from dff.script import Context +from dff.pipeline import Pipeline + +from . import consts + + +def has_intent(labels: list) -> Callable: + """ + Check if any of the given intents are in the context. + """ + + def has_intent_inner(ctx: Context, _: Pipeline) -> bool: + if ctx.validation: + return False + + return any([label in ctx.misc.get(consts.INTENTS, []) for label in labels]) + + return has_intent_inner + + +def slots_filled(slots: list) -> Callable: + """ + Check if any of the given slots are filled. + """ + + def slots_filled_inner(ctx: Context, _: Pipeline) -> bool: + if ctx.validation: + return False + + return all([slot in ctx.misc[consts.SLOTS] for slot in slots]) + + return slots_filled_inner diff --git a/examples/customer_service_bot/bot/dialog_graph/consts.py b/examples/customer_service_bot/bot/dialog_graph/consts.py new file mode 100644 index 000000000..bf154a7fd --- /dev/null +++ b/examples/customer_service_bot/bot/dialog_graph/consts.py @@ -0,0 +1,9 @@ +""" +Consts +------ +This module contains constant variables to use in the `Context` object. +""" +SLOTS = "slots" +INTENTS = "caught_intents" +CHATGPT_OUTPUT = "chatgpt_output" +CHATGPT_COHERENCE = "chatgpt_coherence" diff --git a/examples/customer_service_bot/bot/dialog_graph/processing.py b/examples/customer_service_bot/bot/dialog_graph/processing.py new file mode 100644 index 000000000..f9d4475ff --- /dev/null +++ b/examples/customer_service_bot/bot/dialog_graph/processing.py @@ -0,0 +1,123 @@ +""" +Processing +---------- +This module contains processing routines for the customer service +chat bot. +""" +import re +from string import punctuation +from dff.script import Context +from dff.pipeline import Pipeline +from api import chatgpt, intent_catcher +from . import consts + + +def extract_intents(): + """ + Extract intents from intent catcher response. + """ + + def extract_intents_inner(ctx: Context, _: Pipeline) -> Context: + ctx.misc[consts.INTENTS] = intent_catcher.get_intents(ctx.last_request) + return ctx + + return extract_intents_inner + + +def clear_intents(): + """ + Clear intents container. + """ + + def clear_intents_inner(ctx: Context, _: Pipeline) -> Context: + ctx.misc[consts.INTENTS] = [] + return ctx + + return clear_intents_inner + + +def clear_slots(): + """ + Clear slots container. + """ + + def clear_slots_inner(ctx: Context, _: Pipeline) -> Context: + ctx.misc[consts.SLOTS] = {} + return ctx + + return clear_slots_inner + + +def generate_response(): + """ + Store ChatGPT output and ChatGPT coherence measure in the context. + """ + expression = re.compile(r"true", re.IGNORECASE) + + def generate_response_inner(ctx: Context, _: Pipeline) -> Context: + if ctx.validation: + return ctx + + chatgpt_output = chatgpt.get_output(ctx.last_request.text) + ctx.misc[consts.CHATGPT_OUTPUT] = chatgpt_output + coherence_output = chatgpt.get_coherence(ctx.last_request.text, chatgpt_output) + ctx.misc[consts.CHATGPT_COHERENCE] = True if re.search(expression, coherence_output) else False + return ctx + + return generate_response_inner + + +def extract_item(): + """ + Extract item slot. + """ + expression = re.compile(r".+") + + def extract_item_inner(ctx: Context, _: Pipeline) -> Context: + if ctx.validation: + return ctx + + text: str = ctx.last_request.text + search = re.search(expression, text) + if search is not None: + group = search.group() + ctx.misc[consts.SLOTS]["items"] = [item.strip(punctuation) for item in group.split(", ")] + return ctx + + return extract_item_inner + + +def extract_payment_method(): + """Extract payment method slot.""" + expression = re.compile(r"(card|cash)", re.IGNORECASE) + + def extract_payment_method_inner(ctx: Context, _: Pipeline) -> Context: + if ctx.validation: + return ctx + + text: str = ctx.last_request.text + search = re.search(expression, text) + if search is not None: + ctx.misc[consts.SLOTS]["payment_method"] = search.group() + return ctx + + return extract_payment_method_inner + + +def extract_delivery(): + """ + Extract delivery slot. + """ + expression = re.compile(r"(pickup|deliver)", re.IGNORECASE) + + def extract_delivery_inner(ctx: Context, _: Pipeline) -> Context: + if ctx.validation: + return ctx + + text: str = ctx.last_request.text + search = re.search(expression, text) + if search is not None: + ctx.misc[consts.SLOTS]["delivery"] = search.group() + return ctx + + return extract_delivery_inner diff --git a/examples/customer_service_bot/bot/dialog_graph/response.py b/examples/customer_service_bot/bot/dialog_graph/response.py new file mode 100644 index 000000000..c75ca3e4f --- /dev/null +++ b/examples/customer_service_bot/bot/dialog_graph/response.py @@ -0,0 +1,39 @@ +""" +Response +-------- +This module contains response customization functions. +""" +from dff.script import Context, Message +from dff.pipeline import Pipeline + +from . import consts + +FALLBACK_RESPONSE = ( + "I'm afraid I cannot elaborate on this subject. If you have any other questions, feel free to ask them." +) + + +def choose_response(ctx: Context, _: Pipeline) -> Message: + """ + Return ChatGPT response if it is coherent, fall back to + predetermined response otherwise. + """ + if ctx.validation: + return Message() + coherence = ctx.misc[consts.CHATGPT_COHERENCE] + response = ctx.misc[consts.CHATGPT_OUTPUT] + return Message(text=(response if coherence else FALLBACK_RESPONSE)) + + +def confirm(ctx: Context, _: Pipeline) -> Message: + if ctx.validation: + return Message() + msg_text = ( + "We registered your transaction. " + + f"Requested titles are: {', '.join(ctx.misc[consts.SLOTS]['items'])}. " + + f"Delivery method: {ctx.misc[consts.SLOTS]['delivery']}. " + + f"Payment method: {ctx.misc[consts.SLOTS]['payment_method']}. " + + "Type `abort` to cancel, type `ok` to continue." + ) + msg = Message(text=msg_text) + return msg diff --git a/examples/customer_service_bot/bot/dialog_graph/script.py b/examples/customer_service_bot/bot/dialog_graph/script.py new file mode 100644 index 000000000..f641da159 --- /dev/null +++ b/examples/customer_service_bot/bot/dialog_graph/script.py @@ -0,0 +1,92 @@ +""" +Script +-------- +This module defines the bot script. +""" +from dff.script import RESPONSE, TRANSITIONS, LOCAL, PRE_TRANSITIONS_PROCESSING, PRE_RESPONSE_PROCESSING +from dff.script import Message +from dff.script import conditions as cnd +from dff.script import labels as lbl + +from . import conditions as loc_cnd +from . import response as loc_rsp +from . import processing as loc_prc + + +script = { + "general_flow": { + LOCAL: { + TRANSITIONS: { + ("form_flow", "ask_item", 1.0): cnd.any( + [loc_cnd.has_intent(["purchase"]), cnd.regexp(r"\border\b|\bpurchase\b")] + ), + ("chitchat_flow", "init_chitchat", 0.8): cnd.true(), + }, + PRE_TRANSITIONS_PROCESSING: {"1": loc_prc.extract_intents()}, + }, + "start_node": { + RESPONSE: Message(text=""), + }, + "fallback_node": { + RESPONSE: Message(text="Cannot recognize your query. Type 'ok' to continue."), + }, + }, + "chitchat_flow": { + LOCAL: { + TRANSITIONS: { + ("form_flow", "ask_item", 1.0): cnd.any( + [loc_cnd.has_intent(["purchase"]), cnd.regexp(r"\border\b|\bpurchase\b")] + ), + }, + PRE_TRANSITIONS_PROCESSING: {"1": loc_prc.clear_intents(), "2": loc_prc.extract_intents()}, + }, + "init_chitchat": { + RESPONSE: Message(text="'Book Lovers Paradise' welcomes you! Ask us anything you would like to know."), + TRANSITIONS: {("chitchat_flow", "chitchat", 0.8): cnd.true()}, + PRE_TRANSITIONS_PROCESSING: {"2": loc_prc.clear_slots()}, + }, + "chitchat": { + PRE_RESPONSE_PROCESSING: {"1": loc_prc.generate_response()}, + TRANSITIONS: {lbl.repeat(0.8): cnd.true()}, # repeat unless conditions for moving forward are met + RESPONSE: loc_rsp.choose_response, + }, + }, + "form_flow": { + LOCAL: { + TRANSITIONS: { + ("chitchat_flow", "init_chitchat", 1.2): cnd.any( + [cnd.regexp(r"\bcancel\b|\babort\b"), loc_cnd.has_intent(["no"])] + ), + } + }, + "ask_item": { + RESPONSE: Message( + text="Which books would you like to order? Separate the titles by commas (type 'abort' to cancel)" + ), + PRE_TRANSITIONS_PROCESSING: {"1": loc_prc.extract_item()}, + TRANSITIONS: {("form_flow", "ask_delivery"): loc_cnd.slots_filled(["items"]), lbl.repeat(0.8): cnd.true()}, + }, + "ask_delivery": { + RESPONSE: Message( + text="Which delivery method would you like to use? We currently offer pickup or home delivery." + ), + PRE_TRANSITIONS_PROCESSING: {"1": loc_prc.extract_delivery()}, + TRANSITIONS: { + ("form_flow", "ask_payment_method"): loc_cnd.slots_filled(["delivery"]), + lbl.repeat(0.8): cnd.true(), # repeat unless conditions for moving forward are met + }, + }, + "ask_payment_method": { + RESPONSE: Message(text="Please, enter the payment method you would like to use: cash or credit card."), + PRE_TRANSITIONS_PROCESSING: {"1": loc_prc.extract_payment_method()}, + TRANSITIONS: { + ("form_flow", "success"): loc_cnd.slots_filled(["payment_method"]), + lbl.repeat(0.8): cnd.true(), # repeat unless conditions for moving forward are met + }, + }, + "success": { + RESPONSE: loc_rsp.confirm, + TRANSITIONS: {("chitchat_flow", "init_chitchat"): cnd.true()}, + }, + }, +} diff --git a/examples/customer_service_bot/bot/dockerfile b/examples/customer_service_bot/bot/dockerfile new file mode 100644 index 000000000..ed4a0a4fa --- /dev/null +++ b/examples/customer_service_bot/bot/dockerfile @@ -0,0 +1,10 @@ +# syntax=docker/dockerfile:1 + +FROM python:3.10-slim-buster +RUN apt update && apt install -y git + +COPY requirements.txt requirements.txt +RUN pip3 install -r requirements.txt + +COPY . . +CMD ["python3", "run.py"] diff --git a/examples/customer_service_bot/bot/requirements.txt b/examples/customer_service_bot/bot/requirements.txt new file mode 100644 index 000000000..b0275c119 --- /dev/null +++ b/examples/customer_service_bot/bot/requirements.txt @@ -0,0 +1,6 @@ +dff[telegram, tests] >= 0.6.3 +itsdangerous==2.0.1 +gunicorn==19.9.0 +sentry-sdk[flask]==0.14.1 +healthcheck==1.3.3 +openai==0.27.3 \ No newline at end of file diff --git a/examples/customer_service_bot/bot/run.py b/examples/customer_service_bot/bot/run.py new file mode 100644 index 000000000..7e973d5ea --- /dev/null +++ b/examples/customer_service_bot/bot/run.py @@ -0,0 +1,39 @@ +import os + +from dff.messengers.telegram import PollingTelegramInterface +from dff.pipeline import Pipeline + +from dialog_graph import script + + +def get_pipeline(use_cli_interface: bool = False) -> Pipeline: + telegram_token = os.getenv("TG_BOT_TOKEN") + openai_api_token = os.getenv("OPENAI_API_TOKEN") + + if not openai_api_token: + raise RuntimeError("Openai api token (`OPENAI_API_TOKEN`) system variable is required.") + + if use_cli_interface: + messenger_interface = None + elif telegram_token: + messenger_interface = PollingTelegramInterface(token=telegram_token) + + else: + raise RuntimeError( + "Telegram token (`TG_BOT_TOKEN`) is not set. `TG_BOT_TOKEN` can be set via `.env` file." + " For more info see README.md." + ) + + pipeline = Pipeline.from_script( + script=script.script, + start_label=("general_flow", "start_node"), + fallback_label=("general_flow", "fallback_node"), + messenger_interface=messenger_interface, + ) + + return pipeline + + +if __name__ == "__main__": + pipeline = get_pipeline() + pipeline.run() diff --git a/examples/customer_service_bot/bot/test.py b/examples/customer_service_bot/bot/test.py new file mode 100644 index 000000000..b9a19660a --- /dev/null +++ b/examples/customer_service_bot/bot/test.py @@ -0,0 +1,45 @@ +import pytest +from dff.utils.testing.common import check_happy_path +from dff.messengers.telegram import TelegramMessage +from dff.script import RESPONSE, Message + +from dialog_graph.script import script +from run import get_pipeline + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "happy_path", + [ + ( + (TelegramMessage(text="/start"), script["chitchat_flow"]["init_chitchat"][RESPONSE]), + (TelegramMessage(text="I need to make an order"), script["form_flow"]["ask_item"][RESPONSE]), + (TelegramMessage(text="abort"), script["chitchat_flow"]["init_chitchat"][RESPONSE]), + (TelegramMessage(text="I need to make an order"), script["form_flow"]["ask_item"][RESPONSE]), + (TelegramMessage(text="'Pale Fire', 'Lolita'"), script["form_flow"]["ask_delivery"][RESPONSE]), + ( + TelegramMessage(text="I want it delivered to my place"), + script["form_flow"]["ask_payment_method"][RESPONSE], + ), + (TelegramMessage(text="abort"), script["chitchat_flow"]["init_chitchat"][RESPONSE]), + (TelegramMessage(text="I need to make an order"), script["form_flow"]["ask_item"][RESPONSE]), + (TelegramMessage(text="'Pale Fire', 'Lolita'"), script["form_flow"]["ask_delivery"][RESPONSE]), + ( + TelegramMessage(text="I want it delivered to my place"), + script["form_flow"]["ask_payment_method"][RESPONSE], + ), + (TelegramMessage(text="foo bar baz"), script["form_flow"]["ask_payment_method"][RESPONSE]), + ( + TelegramMessage(text="card"), + Message( + text="We registered your transaction. Requested titles are: Pale Fire, Lolita. " + "Delivery method: deliver. Payment method: card. " + "Type `abort` to cancel, type `ok` to continue." + ), + ), + (TelegramMessage(text="ok"), script["chitchat_flow"]["init_chitchat"][RESPONSE]), + ) + ], +) +async def test_happy_path(happy_path): + check_happy_path(pipeline=get_pipeline(use_cli_interface=True), happy_path=happy_path) diff --git a/examples/customer_service_bot/docker-compose.yml b/examples/customer_service_bot/docker-compose.yml new file mode 100644 index 000000000..93f2261f8 --- /dev/null +++ b/examples/customer_service_bot/docker-compose.yml @@ -0,0 +1,24 @@ +version: "2" +services: + assistant: + env_file: [ .env ] + build: + args: + SERVICE_NAME: assistant + SERVICE_PORT: 5000 + context: bot/ + volumes: + - ./bot/:/app:ro + ports: + - 5000:5000 + + intent_catcher: + env_file: [ .env ] + build: + args: + SERVICE_PORT: 4999 + IC_WEIGHTS: https://huggingface.co/ruthenian8/deeppavlov-intent-catcher-transformers/resolve/main/model.pth.tar + IC_CLASSES: https://huggingface.co/ruthenian8/deeppavlov-intent-catcher-transformers/raw/main/classes.dict + context: ./intent_catcher/ + ports: + - 4999:4999 diff --git a/examples/customer_service_bot/intent_catcher/dockerfile b/examples/customer_service_bot/intent_catcher/dockerfile new file mode 100644 index 000000000..15b0287c8 --- /dev/null +++ b/examples/customer_service_bot/intent_catcher/dockerfile @@ -0,0 +1,31 @@ +# syntax=docker/dockerfile:1 +FROM pytorch/pytorch:1.6.0-cuda10.1-cudnn7-runtime as base + +RUN apt-get update && \ + apt-get install -y gnupg2 && \ + apt-get install -y curl && \ + apt-get install -y --allow-unauthenticated wget && \ + apt-get install -y git && \ + apt-get install -y unzip && \ + apt-key del 7fa2af80 && \ + rm -f /etc/apt/sources.list.d/cuda*.list && \ + curl https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/cuda-keyring_1.0-1_all.deb \ + -o cuda-keyring_1.0-1_all.deb && \ + dpkg -i cuda-keyring_1.0-1_all.deb + # update cuda keyring (https://developer.nvidia.com/blog/updating-the-cuda-linux-gpg-repository-key/) + +ARG SERVICE_PORT +ENV SERVICE_PORT ${SERVICE_PORT} +ARG IC_WEIGHTS +ARG IC_CLASSES + +WORKDIR /src +COPY ./requirements.txt /src/requirements.txt +RUN pip install -r /src/requirements.txt +RUN wget ${IC_WEIGHTS} +RUN wget ${IC_CLASSES} + +FROM base as prod +WORKDIR /src +COPY . /src +CMD gunicorn --workers=1 server:app -b 0.0.0.0:${SERVICE_PORT} --timeout=1200 diff --git a/examples/customer_service_bot/intent_catcher/requirements.txt b/examples/customer_service_bot/intent_catcher/requirements.txt new file mode 100644 index 000000000..5c430cebf --- /dev/null +++ b/examples/customer_service_bot/intent_catcher/requirements.txt @@ -0,0 +1,7 @@ +flask==2.2.5 +itsdangerous==2.0.1 +gunicorn==19.9.0 +requests==2.22.0 +sentry-sdk[flask]==0.14.1 +healthcheck==1.3.3 +transformers==4.6.0 diff --git a/examples/customer_service_bot/intent_catcher/server.py b/examples/customer_service_bot/intent_catcher/server.py new file mode 100644 index 000000000..7d7871960 --- /dev/null +++ b/examples/customer_service_bot/intent_catcher/server.py @@ -0,0 +1,85 @@ +import logging +import time +import os +import random +import csv + +import torch +import sentry_sdk +from flask import Flask, request, jsonify +from sentry_sdk.integrations.flask import FlaskIntegration +from transformers import DistilBertConfig, AutoModelForSequenceClassification, AutoTokenizer, pipeline + + +sentry_sdk.init(dsn=os.getenv("SENTRY_DSN"), integrations=[FlaskIntegration()]) + +logging.basicConfig(format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO) +logger = logging.getLogger(__name__) + +random.seed(42) + +DEFAULT_CONFIDENCE = 0.9 +ZERO_CONFIDENCE = 0.0 +MODEL_PATH = "model.pth.tar" +CLASSES_PATH = "classes.dict" + +with open(CLASSES_PATH, "r") as file: + reader = csv.reader(file, delimiter="\t") + label2id = {line[0]: line[1] for line in reader} + +id2label = {value: key for key, value in label2id.items()} + +try: + if torch.cuda.is_available(): + no_cuda = False + else: + no_cuda = True + model = AutoModelForSequenceClassification.from_config(DistilBertConfig(num_labels=23)) + state = torch.load(MODEL_PATH, map_location="cpu" if no_cuda else "gpu") + model.load_state_dict(state["model_state_dict"]) + tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased") + pipe = pipeline("text-classification", model=model, tokenizer=tokenizer) + logger.info("predictor is ready") +except Exception as e: + sentry_sdk.capture_exception(e) + logger.exception(e) + raise e + +app = Flask(__name__) +logging.getLogger("werkzeug").setLevel("WARNING") + + +@app.route("/respond", methods=["POST"]) +def respond(): + """ + The API expects a json object with the dialog history passed as an array and labeled 'dialog_contexts'. + Intents will be extracted from the last utterance. + + .. code-block:: python + { + "dialog_contexts": ["phrase_1", "phrase_2"] + } + + The API responds with a nested array containing 'label - score' pairs. + + .. code-block:: python + [["definition",0.3393537402153015]] + + """ + st_time = time.time() + contexts = request.json.get("dialog_contexts", []) + + try: + results = pipe(contexts) + indices = [int("".join(filter(lambda x: x.isdigit(), result["label"]))) for result in results] + responses = [list(label2id.keys())[idx] for idx in indices] + confidences = [result["score"] for result in results] + except Exception as exc: + logger.exception(exc) + sentry_sdk.capture_exception(exc) + responses = [""] * len(contexts) + confidences = [ZERO_CONFIDENCE] * len(contexts) + + total_time = time.time() - st_time + logger.info(f"Intent catcher exec time: {total_time:.3f}s") + return jsonify(list(zip(responses, confidences))) diff --git a/examples/customer_service_bot/intent_catcher/test_server.py b/examples/customer_service_bot/intent_catcher/test_server.py new file mode 100644 index 000000000..1bca73f6c --- /dev/null +++ b/examples/customer_service_bot/intent_catcher/test_server.py @@ -0,0 +1,15 @@ +import os +import requests + + +def test_respond(): + url = "http://0.0.0.0:{}/respond".format(os.getenv("SERVICE_PORT")) + + contexts = [["I want to order food"], ["cancel_the_order"]] + result = requests.post(url, json={"dialog_contexts": contexts}).json() + assert [len(sample[0]) > 0 and sample[1] > 0.0 for sample in result], f"Got\n{result}\n, something is wrong" + print("Success") + + +if __name__ == "__main__": + test_respond() diff --git a/examples/frequently_asked_question_bot/.env b/examples/frequently_asked_question_bot/.env new file mode 100644 index 000000000..6ee31f4c5 --- /dev/null +++ b/examples/frequently_asked_question_bot/.env @@ -0,0 +1,6 @@ +POSTGRES_USERNAME=postgres +POSTGRES_PASSWORD=pass +POSTGRES_DB=test +TELEGRAM_TOKEN=*** +INTERFACE=web +HOST=*** \ No newline at end of file diff --git a/examples/frequently_asked_question_bot/README.md b/examples/frequently_asked_question_bot/README.md new file mode 100644 index 000000000..3673a68ce --- /dev/null +++ b/examples/frequently_asked_question_bot/README.md @@ -0,0 +1,45 @@ +## Description + +Example FAQ bot built on `dff` with a web interface. + +This example serves bot responses either through Telegram or through a website with a chat interface using `WebSockets`. You can configure the service to use either of those using the +"INTERFACE" environment variable by setting it to "telegram" or "web", respectively. +Chat history is stored inside a `postgresql` database. + + +The web interface is accessible via http://localhost:80. In case with Telegram, +the service will power the bot the token of which you pass at the configuration stage. + +**Note that Telegram needs to configure a web hook, so you'll only be able to launch it using an SSL-protected url which needs to be passed through the HOST environment variable.** + +The bot itself works as follows: + +Whenever a user asks a question it searches for the most similar question in its database using `clips/mfaq` an answer to which is sent to the user. + +A showcase of the website: +![faq_web](https://user-images.githubusercontent.com/61429541/233875303-b9bc81c9-522b-4596-8599-6efcfa708d1e.gif) + +## Running the project + +### Step 1: Configuring docker services + +The project services need to be configured with variables that can be set through the [.env](.env) file. Update the file replacing the placeholders with desired values. + +```shell +POSTGRES_USERNAME=*** +POSTGRES_PASSWORD=*** +POSTGRES_DB=*** +TELEGRAM_TOKEN=*** +INTERFACE=telegram +# or INTERFACE=web +# or INTERFACE=cli +HOST=*** # required for telegram +``` + +### Step 2: Launching the docker project +*The commands below should be run from the /examples/frequently_asked_question_bot/web directory.* + +Launching the project +```commandline +docker-compose up --build -d +``` diff --git a/examples/frequently_asked_question_bot/compose.yml b/examples/frequently_asked_question_bot/compose.yml new file mode 100644 index 000000000..470af8fdf --- /dev/null +++ b/examples/frequently_asked_question_bot/compose.yml @@ -0,0 +1,38 @@ +version: '3.8' + +services: + web: + build: + context: web/ + volumes: + - ./web/:/app:ro + ports: + - 8000:8000 + env_file: + - ./.env + depends_on: + db: + condition: service_healthy + db: + env_file: [.env] + image: postgres:latest + restart: unless-stopped + healthcheck: + test: pg_isready --username=$${POSTGRES_USERNAME} + interval: 4s + timeout: 3s + retries: 3 + volumes: + - postgres_data:/var/lib/postgresql/data/ + nginx: + image: nginx + depends_on: + - web + ports: + - 80:80 + volumes: + - ./nginx.conf:/etc/nginx/nginx.conf:ro + - ./web/static:/app/static:ro + +volumes: + postgres_data: \ No newline at end of file diff --git a/examples/frequently_asked_question_bot/nginx.conf b/examples/frequently_asked_question_bot/nginx.conf new file mode 100644 index 000000000..27e10ee4f --- /dev/null +++ b/examples/frequently_asked_question_bot/nginx.conf @@ -0,0 +1,37 @@ +events { + worker_connections 1024; +} + +http { + server { + listen 80; + + location / { + proxy_pass http://web:8000; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header Host $host; + proxy_redirect off; + } + + location /ws/ { + proxy_pass http://web:8000/ws/; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "Upgrade"; + proxy_set_header Host $host; + } + location /static/ { + alias /app/static/; + types { + text/html html htm shtml; + text/css css; + text/xml xml; + image/gif gif; + image/jpeg jpeg jpg; + application/x-javascript js; + application/atom+xml atom; + application/rss+xml rss; + } + } + } +} \ No newline at end of file diff --git a/examples/frequently_asked_question_bot/web/Dockerfile b/examples/frequently_asked_question_bot/web/Dockerfile new file mode 100644 index 000000000..46fb25219 --- /dev/null +++ b/examples/frequently_asked_question_bot/web/Dockerfile @@ -0,0 +1,17 @@ +# syntax=docker/dockerfile:1 + +FROM python:3.10-slim-buster + +WORKDIR /app + +COPY requirements.txt requirements.txt +RUN pip3 install -r requirements.txt + +# cache mfaq model +RUN ["python3", "-c", "from sentence_transformers import SentenceTransformer; _ = SentenceTransformer('clips/mfaq')"] + +COPY . . + +RUN ["pytest", "bot/test.py"] + +CMD ["python3", "app.py"] diff --git a/examples/frequently_asked_question_bot/web/__init__.py b/examples/frequently_asked_question_bot/web/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/examples/frequently_asked_question_bot/web/app.py b/examples/frequently_asked_question_bot/web/app.py new file mode 100644 index 000000000..3df7a2d48 --- /dev/null +++ b/examples/frequently_asked_question_bot/web/app.py @@ -0,0 +1,71 @@ +import os +import asyncio +from bot.pipeline import pipeline + +import uvicorn +from telebot import types +from dff.messengers.telegram.messenger import TelegramMessenger +from dff.messengers.telegram.interface import extract_telegram_request_and_id +from fastapi import FastAPI, WebSocket, WebSocketDisconnect, Request +from fastapi.responses import FileResponse +from dff.script import Message, Context + +HOST = os.getenv("HOST", "0.0.0.0") +PORT = 8000 +FULL_URI = f"https://{HOST}:{PORT}/telegram" +telegram_token = os.getenv("TELEGRAM_TOKEN") + +app = FastAPI() + + +@app.get("/") +async def index(): + return FileResponse("static/index.html", media_type="text/html") + + +@app.websocket("/ws/{client_id}") +async def websocket_endpoint(websocket: WebSocket, client_id: str): + await websocket.accept() + + # store user info in the dialogue context + await pipeline.context_storage.set_item_async( + client_id, Context(id=client_id, misc={"ip": websocket.client.host, "headers": websocket.headers.raw}) + ) + + async def respond(request: Message): + context = await pipeline._run_pipeline(request, client_id) + response = context.last_response.text + await websocket.send_text(response) + return context + + try: + await respond(Message()) # display welcome message + + while True: + data = await websocket.receive_text() + await respond(Message(text=data)) + except WebSocketDisconnect: # ignore disconnects + pass + + +if telegram_token is not None: + messenger = TelegramMessenger(telegram_token) + messenger.remove_webhook() + messenger.set_webhook(FULL_URI) + + @app.post("/telegram") + async def endpoint(request: Request): + json_string = (await request.body()).decode("utf-8") + update = types.Update.de_json(json_string) + request, ctx_id = extract_telegram_request_and_id(update, messenger) + resp = asyncio.run(pipeline(request, ctx_id)) + messenger.send_response(resp.id, resp.last_response) + return "" + + +if __name__ == "__main__": + uvicorn.run( + app, + host="0.0.0.0", + port=PORT, + ) diff --git a/examples/frequently_asked_question_bot/web/bot/__init__.py b/examples/frequently_asked_question_bot/web/bot/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/examples/frequently_asked_question_bot/web/bot/dialog_graph/__init__.py b/examples/frequently_asked_question_bot/web/bot/dialog_graph/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/examples/frequently_asked_question_bot/web/bot/dialog_graph/responses.py b/examples/frequently_asked_question_bot/web/bot/dialog_graph/responses.py new file mode 100644 index 000000000..0adefd447 --- /dev/null +++ b/examples/frequently_asked_question_bot/web/bot/dialog_graph/responses.py @@ -0,0 +1,63 @@ +""" +Responses +--------- +This module defines different responses the bot gives. +""" + +from dff.script import Context +from dff.script import Message +from dff.pipeline import Pipeline +from ..faq_model.model import faq, response_translations, request_translations + + +def get_bot_answer(question: str, language: str) -> Message: + """The Message the bot will return as an answer if the most similar question is `question`.""" + index = list(faq.keys()).index(question) + request = list(request_translations[language])[index] + return Message(text=f"Q: {request}\nA: {response_translations[language][index]}") + + +def get_fallback_answer(language: str): + """Fallback answer that the bot returns if user's query is not similar to any of the questions.""" + fallbacks = { + "en": "I don't have an answer to that question. ", + "es": "No tengo una respuesta a esa pregunta. ", + "fr": "Je n'ai pas de réponse à cette question. ", + "de": "Ich habe keine Antwort auf diese Frage. ", + "zh-cn": "我对这个问题没有答案。", + "ru": "У меня нет ответа на этот вопрос. ", + } + + return Message( + text=fallbacks[language], + ) + + +FIRST_MESSAGE = Message( + text="Welcome! Ask me questions about Deeppavlov.\n" + "¡Bienvenido! Hazme preguntas sobre Deeppavlov.\n" + "Bienvenue ! Posez-moi des questions sur Deeppavlov.\n" + "Willkommen! Stellen Sie mir Fragen zu Deeppavlov.\n" + "欢迎!向我询问有关Deeppavlov的问题。\n" + "Добро пожаловать! Задайте мне вопросы о Deeppavlov." +) + +FALLBACK_NODE_MESSAGE = Message(text="Something went wrong.\n" "You may continue asking me questions about Deeppavlov.") + + +def answer_similar_question(ctx: Context, _: Pipeline): + """Answer with the most similar question to user's query.""" + if ctx.validation: # this function requires non-empty fields and cannot be used during script validation + return Message() + last_request = ctx.last_request + language = last_request.annotations["user_language"] + if last_request is None: + raise RuntimeError("No last requests.") + if last_request.annotations is None: + raise RuntimeError("No annotations.") + similar_question = last_request.annotations.get("similar_question") + + if similar_question is None: # question is not similar to any of the questions + return get_fallback_answer(language) + else: + return get_bot_answer(similar_question, language) diff --git a/examples/frequently_asked_question_bot/web/bot/dialog_graph/script.py b/examples/frequently_asked_question_bot/web/bot/dialog_graph/script.py new file mode 100644 index 000000000..8c83b9d0c --- /dev/null +++ b/examples/frequently_asked_question_bot/web/bot/dialog_graph/script.py @@ -0,0 +1,38 @@ +""" +Script +-------- +This module defines a script that the bot follows during conversation. +""" +from dff.script import RESPONSE, TRANSITIONS, GLOBAL, Message +import dff.script.conditions as cnd + +from .responses import answer_similar_question, FIRST_MESSAGE, FALLBACK_NODE_MESSAGE + + +pipeline_kwargs = { + "script": { + GLOBAL: { + TRANSITIONS: { + # an empty message is used to init a dialogue + ("qa_flow", "welcome_node"): cnd.exact_match(Message(), skip_none=False), + ("qa_flow", "answer_question"): cnd.true(), + }, + }, + "qa_flow": { + "welcome_node": { + RESPONSE: FIRST_MESSAGE, + }, + "answer_question": { + RESPONSE: answer_similar_question, + }, + }, + "service_flow": { + "start_node": {}, # this is the start node, it simply redirects to welcome node + "fallback_node": { # this node will only be used if something goes wrong (e.g. an exception is raised) + RESPONSE: FALLBACK_NODE_MESSAGE, + }, + }, + }, + "start_label": ("service_flow", "start_node"), + "fallback_label": ("service_flow", "fallback_node"), +} diff --git a/examples/frequently_asked_question_bot/web/bot/faq_model/__init__.py b/examples/frequently_asked_question_bot/web/bot/faq_model/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/examples/frequently_asked_question_bot/web/bot/faq_model/faq_dataset_sample.json b/examples/frequently_asked_question_bot/web/bot/faq_model/faq_dataset_sample.json new file mode 100644 index 000000000..41b08f614 --- /dev/null +++ b/examples/frequently_asked_question_bot/web/bot/faq_model/faq_dataset_sample.json @@ -0,0 +1,6 @@ +{ + "What is Deeppavlov?": "Deeppavlov is an open-source stack of technologies in Conversational AI that facilitate the development of the complex dialog systems.\n Find more info at the official website.", + "What can the Deeppavlov library do?": "Deeppavlov is designed for natural language understanding and handles various NLP tasks, like intent recognition or named entity detection.\n A powerful demonstration app is available here.\n", + "Why would I want to use Deeppavlov?": "Deeppavlov is the technology behind some of the award-winning solutions for the Amazon Alexa chat bot competition.\n It's employed by the Dream architecture.", + "How do I learn more about Deeppavlov?": "Here, you can find the documentation to the latest version of the Deeppavlov library,\n including installation and usage instructions.\n" +} \ No newline at end of file diff --git a/examples/frequently_asked_question_bot/web/bot/faq_model/model.py b/examples/frequently_asked_question_bot/web/bot/faq_model/model.py new file mode 100644 index 000000000..749886116 --- /dev/null +++ b/examples/frequently_asked_question_bot/web/bot/faq_model/model.py @@ -0,0 +1,34 @@ +""" +Model +----- +This module defines AI-dependent functions. +""" +import json +from pathlib import Path + +import numpy as np +from sentence_transformers import SentenceTransformer + +model = SentenceTransformer("clips/mfaq") + +with open(Path(__file__).parent / "request_translations.json", "r", encoding="utf-8") as file: + request_translations = json.load(file) + +with open(Path(__file__).parent / "response_translations.json", "r", encoding="utf-8") as file: + response_translations = json.load(file) + +with open(Path(__file__).parent / "faq_dataset_sample.json", "r", encoding="utf-8") as file: + faq = json.load(file) + + +def find_similar_question(question: str, lang: str) -> str | None: + """Return the most similar question from the faq database.""" + questions = list(map(lambda x: "" + x, request_translations[lang])) + q_emb, *faq_emb = model.encode(["" + question] + questions) + + scores = list(map(lambda x: np.linalg.norm(x - q_emb), faq_emb)) + + argmin = scores.index(min(scores)) + if scores[argmin] < 5: + return list(faq.keys())[argmin] + return None diff --git a/examples/frequently_asked_question_bot/web/bot/faq_model/request_translations.json b/examples/frequently_asked_question_bot/web/bot/faq_model/request_translations.json new file mode 100644 index 000000000..f79437681 --- /dev/null +++ b/examples/frequently_asked_question_bot/web/bot/faq_model/request_translations.json @@ -0,0 +1,38 @@ +{ + "en": [ + "What is Deeppavlov?", + "What can the Deeppavlov library do?", + "Why would I want to use Deeppavlov?", + "How do I learn more about Deeppavlov?" + ], + "es": [ + "¿Qué es Deeppavlov?", + "¿Qué puede hacer la biblioteca Deeppavlov?", + "¿Por qué querría usar Deeppavlov?", + "¿Cómo aprendo más sobre Deeppavlov?" + ], + "fr": [ + "Qu'est-ce que Deeppavlov?", + "Que peut faire la bibliothèque Deeppavlov?", + "Pourquoi voudrais-je utiliser Deeppavlov?", + "Comment en savoir plus sur Deeppavlov?" + ], + "de": [ + "Was ist Deeppavlov?", + "Was kann die Deeppavlov-Bibliothek tun?", + "Warum sollte ich Deeppavlov verwenden wollen?", + "Wie erfahre ich mehr über Deeppavlov?" + ], + "zh-cn": [ + "什么是Deeppavlov?", + "Deeppavlov图书馆能做什么?", + "我为什么要使用Deeppavlov?", + "我如何了解更多关于Deeppavlov的信息?" + ], + "ru": [ + "Что такое Deeppavlov?", + "Что может делать библиотека Deeppavlov?", + "Зачем мне использовать Deeppavlov?", + "Как мне узнать больше о Deeppavlov?" + ] +} diff --git a/examples/frequently_asked_question_bot/web/bot/faq_model/response_translations.json b/examples/frequently_asked_question_bot/web/bot/faq_model/response_translations.json new file mode 100644 index 000000000..f5c0c0dc0 --- /dev/null +++ b/examples/frequently_asked_question_bot/web/bot/faq_model/response_translations.json @@ -0,0 +1,38 @@ +{ + "en": [ + "Deeppavlov is an open-source stack of technologies in Conversational AI that facilitate the development of the complex dialog systems.\n Find more info at the official website.", + "Deeppavlov is designed for natural language understanding and handles various NLP tasks, like intent recognition or named entity detection.\n A powerful demonstration app is available here.\n", + "Deeppavlov is the technology behind some of the award-winning solutions for the Amazon Alexa chat bot competition.\n It's employed by the Dream architecture.", + "Here, you can find the documentation to the latest version of the Deeppavlov library,\n including installation and usage instructions.\n" + ], + "es": [ + "Deeppavlov es un conjunto de tecnologías de código abierto en Inteligencia Artificial Conversacional que facilitan el desarrollo de sistemas de diálogo complejos.\n Encuentra más información en el sitio web oficial.", + "Deeppavlov está diseñado para la comprensión del lenguaje natural y maneja diversas tareas de PLN, como el reconocimiento de intenciones o la detección de entidades nombradas.\n Una potente aplicación de demostración está disponible aquí.\n", + "Deeppavlov es la tecnología detrás de algunas de las soluciones ganadoras de premios para la competición de chatbots de Amazon Alexa.\n Está empleada por la arquitectura Dream.", + "Aquí, puedes encontrar la documentación de la última versión de la biblioteca Deeppavlov,\n incluyendo instrucciones de instalación y uso.\n" + ], + "fr": [ + "Deeppavlov est une pile de technologies open-source en IA Conversationnelle qui facilite le développement de systèmes de dialogues complexes.\n Trouvez plus d'infos sur le site officiel.", + "Deeppavlov est conçu pour la compréhension du langage naturel et gère diverses tâches de TAL, comme la reconnaissance d'intentions ou la détection d'entités nommées.\n Une application de démonstration puissante est disponible ici.\n", + "Deeppavlov est la technologie derrière certaines des solutions primées pour le concours de chatbot Amazon Alexa.\n Elle est utilisée par l'architecture Dream.", + "Ici, vous pouvez trouver la documentation sur la dernière version de la bibliothèque Deeppavlov,\n incluant les instructions d'installation et d'utilisation.\n" + ], + "de": [ + "Deeppavlov ist ein Open-Source-Technologiestack in Conversational AI, der die Entwicklung komplexer Dialogsysteme erleichtert.\n Weitere Informationen finden Sie auf der offiziellen Website.", + "Deeppavlov ist für das Verständnis natürlicher Sprache konzipiert und bewältigt verschiedene NLP-Aufgaben, wie die Erkennung von Absichten oder die Erkennung benannter Entitäten.\n Eine leistungsfähige Demonstrations-App ist hier verfügbar.\n", + "Deeppavlov ist die Technologie hinter einigen der preisgekrönten Lösungen für den Amazon Alexa Chatbot-Wettbewerb.\n Es wird von der Dream-Architektur verwendet.", + "Hier finden Sie die Dokumentation zur neuesten Version der Deeppavlov-Bibliothek,\n einschließlich Installations- und Gebrauchsanweisungen.\n" + ], + "zh-cn": [ + "Deeppavlov是一个开源的对话人工智能技术堆栈,促进了复杂对话系统的开发。\n 在官方网站上找到更多信息。", + "Deeppavlov被设计用于自然语言理解,并处理各种自然语言处理任务,如意图识别或命名实体检测。\n 一个强大的演示应用程序可在这里找到。\n", + "Deeppavlov是亚马逊Alexa聊天机器人比赛中一些获奖解决方案背后的技术。\n 它被Dream架构所采用。", + "这里可以找到Deeppavlov库的最新版本文档,\n 包括安装和使用说明。\n" + ], + "ru": [ + "Deeppavlov - это открытый стек технологий в разговорном ИИ, который облегчает разработку сложных диалоговых систем.\n Больше информации на официальном сайте.", + "Deeppavlov предназначен для понимания естественного языка и обрабатывает различные задачи NLP, такие как распознавание интентов или именованных сущностей.\n Демо библиотеки доступно по ссылке.\n", + "Deeppavlov - это технология, стоящая за некоторыми из призовых решений для соревнований чат-ботов Amazon Alexa.\n Она используется в архитектуре Dream.", + "Здесь вы можете найти документацию к последней версии библиотеки." + ] +} diff --git a/examples/frequently_asked_question_bot/web/bot/pipeline.py b/examples/frequently_asked_question_bot/web/bot/pipeline.py new file mode 100644 index 000000000..fb0ba9ec3 --- /dev/null +++ b/examples/frequently_asked_question_bot/web/bot/pipeline.py @@ -0,0 +1,22 @@ +import os + +from dff.pipeline import Pipeline +from dff.context_storages import context_storage_factory + +from .dialog_graph import script +from .pipeline_services import pre_services + + +db_uri = "postgresql+asyncpg://{}:{}@db:5432/{}".format( + os.getenv("POSTGRES_USERNAME"), + os.getenv("POSTGRES_PASSWORD"), + os.getenv("POSTGRES_DB"), +) +db = context_storage_factory(db_uri) + +pipeline: Pipeline = Pipeline.from_script( + **script.pipeline_kwargs, + context_storage=db, + # pre-services run before bot sends a response + pre_services=pre_services.services, +) diff --git a/examples/frequently_asked_question_bot/web/bot/pipeline_services/__init__.py b/examples/frequently_asked_question_bot/web/bot/pipeline_services/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/examples/frequently_asked_question_bot/web/bot/pipeline_services/pre_services.py b/examples/frequently_asked_question_bot/web/bot/pipeline_services/pre_services.py new file mode 100644 index 000000000..f15192065 --- /dev/null +++ b/examples/frequently_asked_question_bot/web/bot/pipeline_services/pre_services.py @@ -0,0 +1,59 @@ +""" +Pre Services +--- +This module defines services that process user requests before script transition. +""" +from dff.script import Context +from langdetect import detect_langs + +from ..faq_model.model import find_similar_question + +PROCESSED_LANGUAGES = ["en", "de", "fr", "es", "ru", "zh-cn"] + + +def language_processor(ctx: Context): + """Store the user language; the language is detected from the last user utterance. + The value can be one of: English, German, Spanish, French, Mandarin Chinese or Russian. + """ + last_request = ctx.last_request + if last_request is None or last_request.text is None: + return + if last_request.annotations is None: + last_request.annotations = {} + else: + if last_request.annotations.get("user_language") is not None: + return + + candidate_languages = detect_langs(last_request.text) + if len(candidate_languages) == 0: + last_request.annotations["user_language"] = "en" + else: + most_probable_language = candidate_languages[0] + if most_probable_language.prob < 0.3: + last_request.annotations["user_language"] = "en" + elif most_probable_language.lang not in PROCESSED_LANGUAGES: + last_request.annotations["user_language"] = "en" + else: + last_request.annotations["user_language"] = most_probable_language.lang + + ctx.last_request = last_request + + +def question_processor(ctx: Context): + """Store the most similar question to user's query in the `annotations` field of a message.""" + last_request = ctx.last_request + if last_request is None or last_request.text is None: + return + if last_request.annotations is None: + last_request.annotations = {} + else: + if last_request.annotations.get("similar_question") is not None: + return + + language = last_request.annotations["user_language"] + last_request.annotations["similar_question"] = find_similar_question(last_request.text, language) + + ctx.last_request = last_request + + +services = [language_processor, question_processor] # pre-services run before bot sends a response diff --git a/examples/frequently_asked_question_bot/web/bot/test.py b/examples/frequently_asked_question_bot/web/bot/test.py new file mode 100644 index 000000000..9e93e1984 --- /dev/null +++ b/examples/frequently_asked_question_bot/web/bot/test.py @@ -0,0 +1,39 @@ +import pytest +from dff.utils.testing.common import check_happy_path +from dff.script import Message +from dff.pipeline import Pipeline + +from .dialog_graph import script +from .pipeline_services import pre_services +from .dialog_graph.responses import get_bot_answer, get_fallback_answer, FIRST_MESSAGE + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "happy_path", + [ + ( + ( + Message(), + FIRST_MESSAGE, + ), + ( + Message(text="Why use Deeppavlov?"), + get_bot_answer("Why would I want to use Deeppavlov?", "en"), + ), + ( + Message(text="What is deeppavlov?"), + get_bot_answer("What is Deeppavlov?", "en"), + ), + ( + Message(text="where am I?"), + get_fallback_answer("en"), + ), + ) + ], +) +async def test_happy_path(happy_path): + check_happy_path( + pipeline=Pipeline.from_script(**script.pipeline_kwargs, pre_services=pre_services.services), + happy_path=happy_path, + ) diff --git a/examples/frequently_asked_question_bot/web/requirements.txt b/examples/frequently_asked_question_bot/web/requirements.txt new file mode 100644 index 000000000..e0ac43786 --- /dev/null +++ b/examples/frequently_asked_question_bot/web/requirements.txt @@ -0,0 +1,6 @@ +dff[tests, postgresql, telegram]>=0.6.3 +sentence_transformers==2.2.2 +uvicorn==0.21.1 +fastapi>=0.95.1 +websockets==11.0.2 +langdetect==1.0.9 \ No newline at end of file diff --git a/examples/frequently_asked_question_bot/web/static/LICENSE.txt b/examples/frequently_asked_question_bot/web/static/LICENSE.txt new file mode 100644 index 000000000..713fa215c --- /dev/null +++ b/examples/frequently_asked_question_bot/web/static/LICENSE.txt @@ -0,0 +1,8 @@ +Copyright (c) 2023 by neil kalman (https://codepen.io/thatkookooguy/pen/VPJpaW) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/examples/frequently_asked_question_bot/web/static/index.css b/examples/frequently_asked_question_bot/web/static/index.css new file mode 100644 index 000000000..ad782ecc3 --- /dev/null +++ b/examples/frequently_asked_question_bot/web/static/index.css @@ -0,0 +1,254 @@ +@import 'https://fonts.googleapis.com/css?family=Noto+Sans'; +* { + box-sizing: border-box; +} + +body { + background: skyblue; + font: 12px/16px "Noto Sans", sans-serif; +} + +.floating-chat { + cursor: pointer; + display: flex; + align-items: center; + justify-content: center; + color: white; + position: fixed; + bottom: 10px; + right: 10px; + width: 40px; + height: 40px; + transform: translateY(70px); + transition: all 250ms ease-out; + border-radius: 50%; + opacity: 0; + background: -moz-linear-gradient(-45deg, #183850 0, #183850 25%, #192C46 50%, #22254C 75%, #22254C 100%); + background: -webkit-linear-gradient(-45deg, #183850 0, #183850 25%, #192C46 50%, #22254C 75%, #22254C 100%); + background-repeat: no-repeat; + background-attachment: fixed; +} +.floating-chat.enter:hover { + box-shadow: 0 10px 20px rgba(0, 0, 0, 0.19), 0 6px 6px rgba(0, 0, 0, 0.23); + opacity: 1; +} +.floating-chat.enter { + transform: translateY(0); + opacity: 0.6; + box-shadow: 0px 1px 3px rgba(0, 0, 0, 0.12), 0px 1px 2px rgba(0, 0, 0, 0.14); +} +.floating-chat.expand { + width: 250px; + max-height: 400px; + height: 400px; + border-radius: 5px; + cursor: auto; + opacity: 1; +} +.floating-chat :focus { + outline: 0; + box-shadow: 0 0 3pt 2pt rgba(14, 200, 121, 0.3); +} +.floating-chat button { + background: transparent; + border: 0; + color: white; + text-transform: uppercase; + border-radius: 3px; + cursor: pointer; +} +.floating-chat .chat { + display: flex; + flex-direction: column; + position: absolute; + opacity: 0; + width: 1px; + height: 1px; + border-radius: 50%; + transition: all 250ms ease-out; + margin: auto; + top: 0; + left: 0; + right: 0; + bottom: 0; +} +.floating-chat .chat.enter { + opacity: 1; + border-radius: 0; + margin: 10px; + width: auto; + height: auto; +} +.floating-chat .chat .header { + flex-shrink: 0; + padding-bottom: 10px; + display: flex; + background: transparent; +} +.floating-chat .chat .header .title { + flex-grow: 1; + flex-shrink: 1; + padding: 0 5px; +} +.floating-chat .chat .header button { + flex-shrink: 0; +} +.floating-chat .chat .messages { + padding: 10px; + margin: 0; + list-style: none; + overflow-y: scroll; + overflow-x: hidden; + flex-grow: 1; + border-radius: 4px; + background: transparent; +} +.floating-chat .chat .messages::-webkit-scrollbar { + width: 5px; +} +.floating-chat .chat .messages::-webkit-scrollbar-track { + border-radius: 5px; + background-color: rgba(25, 147, 147, 0.1); +} +.floating-chat .chat .messages::-webkit-scrollbar-thumb { + border-radius: 5px; + background-color: rgba(25, 147, 147, 0.2); +} +.floating-chat .chat .messages li { + position: relative; + clear: both; + display: inline-block; + padding: 14px; + margin: 0 0 20px 0; + font: 12px/16px "Noto Sans", sans-serif; + border-radius: 10px; + background-color: rgba(25, 147, 147, 0.2); + word-wrap: break-word; + max-width: 81%; +} +.floating-chat .chat .messages li:before { + position: absolute; + top: 0; + width: 25px; + height: 25px; + border-radius: 25px; + content: ""; + background-size: cover; +} +.floating-chat .chat .messages li:after { + position: absolute; + top: 10px; + content: ""; + width: 0; + height: 0; + border-top: 10px solid rgba(25, 147, 147, 0.2); +} +.floating-chat .chat .messages li.bot { + animation: show-chat-odd 0.15s 1 ease-in; + -moz-animation: show-chat-odd 0.15s 1 ease-in; + -webkit-animation: show-chat-odd 0.15s 1 ease-in; + float: right; + margin-right: 45px; + color: #0AD5C1; +} +.floating-chat .chat .messages li.bot:before { + right: -45px; + background-image: url(https://thumb.tildacdn.com/tild3665-3130-4938-a265-363663393337/-/resize/264x/-/format/webp/_DeepPavlov_200x200-.png); +} +.floating-chat .chat .messages li.bot:after { + border-right: 10px solid transparent; + right: -10px; +} +.floating-chat .chat .messages li.user { + animation: show-chat-even 0.15s 1 ease-in; + -moz-animation: show-chat-even 0.15s 1 ease-in; + -webkit-animation: show-chat-even 0.15s 1 ease-in; + float: left; + margin-left: 45px; + color: #0EC879; +} +.floating-chat .chat .messages li.user:before { + left: -45px; + background-image: url(https://lens-storage.storage.googleapis.com/png/2fa7d0ae96604dca94fb71f298d31dc8); +} +.floating-chat .chat .messages li.user:after { + border-left: 10px solid transparent; + left: -10px; +} +.floating-chat .chat .footer { + flex-shrink: 0; + display: flex; + padding-top: 10px; + max-height: 90px; + background: transparent; +} +.floating-chat .chat .footer .text-box { + border-radius: 3px; + background: rgba(25, 147, 147, 0.2); + min-height: 100%; + width: 100%; + margin-right: 5px; + color: #0EC879; + overflow-y: auto; + padding: 2px 5px; +} +.floating-chat .chat .footer .text-box::-webkit-scrollbar { + width: 5px; +} +.floating-chat .chat .footer .text-box::-webkit-scrollbar-track { + border-radius: 5px; + background-color: rgba(25, 147, 147, 0.1); +} +.floating-chat .chat .footer .text-box::-webkit-scrollbar-thumb { + border-radius: 5px; + background-color: rgba(25, 147, 147, 0.2); +} + +@keyframes show-chat-even { + 0% { + margin-left: -480px; + } + 100% { + margin-left: 0; + } +} +@-moz-keyframes show-chat-even { + 0% { + margin-left: -480px; + } + 100% { + margin-left: 0; + } +} +@-webkit-keyframes show-chat-even { + 0% { + margin-left: -480px; + } + 100% { + margin-left: 0; + } +} +@keyframes show-chat-odd { + 0% { + margin-right: -480px; + } + 100% { + margin-right: 0; + } +} +@-moz-keyframes show-chat-odd { + 0% { + margin-right: -480px; + } + 100% { + margin-right: 0; + } +} +@-webkit-keyframes show-chat-odd { + 0% { + margin-right: -480px; + } + 100% { + margin-right: 0; + } +} \ No newline at end of file diff --git a/examples/frequently_asked_question_bot/web/static/index.html b/examples/frequently_asked_question_bot/web/static/index.html new file mode 100644 index 000000000..034761dc4 --- /dev/null +++ b/examples/frequently_asked_question_bot/web/static/index.html @@ -0,0 +1,33 @@ + + + + Chat + + + + +
+ +
+
+ + FAQ Bot + + + +
+
    +
+ +
+
+ + + + + diff --git a/examples/frequently_asked_question_bot/web/static/index.js b/examples/frequently_asked_question_bot/web/static/index.js new file mode 100644 index 000000000..32f89e8d3 --- /dev/null +++ b/examples/frequently_asked_question_bot/web/static/index.js @@ -0,0 +1,105 @@ +var element = $('.floating-chat'); +var client_id = createUUID(); + +/* +Here Websocket URI is computed using current location. (https://stackoverflow.com/a/10418013) +I don't know how reliable that is, in prod it is probably +better to use hardcoded uri, e.g. ws_uri = "ws://example.com/ws/..." +*/ +var loc = window.location, ws_uri; +if (loc.protocol === "https:") { + ws_uri = "wss:"; +} else { + ws_uri = "ws:"; +} +ws_uri += "//" + loc.host; +ws_uri += loc.pathname + "ws/" + client_id; + +var ws = new WebSocket(ws_uri); +ws.onmessage = receiveBotMessage; + +setTimeout(function() { + element.addClass('enter'); +}, 1000); + +element.click(openElement); + +function openElement() { + var messages = element.find('.messages'); + var textInput = element.find('.text-box'); + element.find('>i').hide(); + element.addClass('expand'); + element.find('.chat').addClass('enter'); + var strLength = textInput.val().length * 2; + textInput.keydown(onMetaAndEnter).prop("disabled", false).focus(); + element.off('click', openElement); + element.find('.header button').click(closeElement); + element.find('#sendMessage').click(sendNewMessage); + messages.scrollTop(messages.prop("scrollHeight")); +} + +function closeElement() { + element.find('.chat').removeClass('enter').hide(); + element.find('>i').show(); + element.removeClass('expand'); + element.find('.header button').off('click', closeElement); + element.find('#sendMessage').off('click', sendNewMessage); + element.find('.text-box').off('keydown', onMetaAndEnter).prop("disabled", true).blur(); + setTimeout(function() { + element.find('.chat').removeClass('enter').show() + element.click(openElement); + }, 500); +} + +function createUUID() { + // http://www.ietf.org/rfc/rfc4122.txt + var s = []; + var hexDigits = "0123456789abcdef"; + for (var i = 0; i < 36; i++) { + s[i] = hexDigits.substr(Math.floor(Math.random() * 0x10), 1); + } + s[14] = "4"; // bits 12-15 of the time_hi_and_version field to 0010 + s[19] = hexDigits.substr((s[19] & 0x3) | 0x8, 1); // bits 6-7 of the clock_seq_hi_and_reserved to 01 + s[8] = s[13] = s[18] = s[23] = "-"; + + var uuid = s.join(""); + return uuid; +} + +function addMessageToContainer(messageToAdd, type) { + var newMessage = messageToAdd; + + if (!newMessage) return; + + var messagesContainer = $('.messages'); + + messagesContainer.append([ + `
  • `, + newMessage, + '
  • ' + ].join('')); + + messagesContainer.animate({ + scrollTop: messagesContainer.prop("scrollHeight") + }, 250); + + return newMessage; +} + +function sendNewMessage() { + var input = document.getElementById("messageText"); + var result = addMessageToContainer(input.value, "user"); + ws.send(result); + input.value = ''; + event.preventDefault(); +} + +function receiveBotMessage(event) { + addMessageToContainer(event.data, "bot"); +} + +function onMetaAndEnter(event) { + if (event.keyCode == 13) { + sendNewMessage(); + } +} \ No newline at end of file