Skip to content

Commit

Permalink
Update
Browse files Browse the repository at this point in the history
  • Loading branch information
Mylinde committed May 1, 2024
1 parent b304fd5 commit fb4fae3
Show file tree
Hide file tree
Showing 6 changed files with 91 additions and 66 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

Experience the power of **G**enerative **P**retrained **T**ransformers with a user-friendly interface.

## GPT 3.5/4, LLaMA 2/3, Mixtral, openchat
## GPT 3.5, GPT 4, LLaMA 2, LLaMA 3

<strong>NO API KEY REQUIERED</strong>

Expand Down
21 changes: 7 additions & 14 deletions client/js/provider-sort.js
Original file line number Diff line number Diff line change
Expand Up @@ -5,16 +5,13 @@ const modelDisplayNameMapping = {
'gpt-3.5-turbo': 'GPT-3.5 Turbo',
'llama2-70b': 'LLaMA2',
'llama3-70b-instruct': 'LLaMA3',
'mixtral-8x7b': 'Mixtral',
'openchat_3.5': 'openchat'
};

const providerDisplayNameMapping = {
'g4f.Provider.Auto': 'Auto',
'g4f.Provider.Liaobots': 'Liaobots',
'g4f.Provider.You': 'You',
'g4f.Provider.DeepInfra': 'DeepInfra',
'g4f.Provider.PerplexityLab': 'Perplexity Labs'
'g4f.Provider.Llama': 'Llama'
};

document.addEventListener('DOMContentLoaded', (event) => {
Expand All @@ -30,15 +27,13 @@ function updateModelOptions() {
const selectedProvider = providerSelect.value;

if (selectedProvider === 'g4f.Provider.Auto') {
availableModels.push('gpt-3.5-turbo', 'gpt-4', 'mixtral-8x7b', 'llama2-70b', 'llama3-70b-instruct', 'openchat_3.5');
availableModels.push('gpt-3.5-turbo', 'gpt-4', 'llama2-70b', 'llama3-70b-instruct');
} else if (selectedProvider === 'g4f.Provider.Liaobots') {
availableModels.push('gpt-3.5-turbo', 'gpt-4');
} else if (selectedProvider === 'g4f.Provider.You') {
availableModels.push('gpt-3.5-turbo');
} else if (selectedProvider === 'g4f.Provider.DeepInfra') {
availableModels.push('openchat_3.5');
} else if (selectedProvider === 'g4f.Provider.PerplexityLab') {
availableModels.push('mixtral-8x7b', 'llama2-70b', 'llama3-70b-instruct', 'gpt-3.5-turbo');
} else if (selectedProvider === 'g4f.Provider.Llama') {
availableModels.push('llama2-70b', 'llama3-70b-instruct');
}

let modelSelect = document.getElementById('model');
Expand All @@ -57,13 +52,11 @@ function updateProviderOptions() {
const selectedModel = modelSelect.value;

if (selectedModel === 'gpt-3.5-turbo') {
availableProviders.push('g4f.Provider.Auto', 'g4f.Provider.PerplexityLab', 'g4f.Provider.Liaobots', 'g4f.Provider.You', 'g4f.Provider.DeepInfra');
availableProviders.push('g4f.Provider.Auto', 'g4f.Provider.Llama', 'g4f.Provider.Liaobots', 'g4f.Provider.You');
} else if (selectedModel === 'gpt-4') {
availableProviders.push('g4f.Provider.Liaobots');
} else if (selectedModel === 'llama2-70b' || selectedModel === 'llama3-70b-instruct' || selectedModel === 'mixtral-8x7b') {
availableProviders.push('g4f.Provider.PerplexityLab');
} else if (selectedModel === 'openchat_3.5') {
availableProviders.push('g4f.Provider.DeepInfra');
} else if (selectedModel === 'llama2-70b' || selectedModel === 'llama3-70b-instruct') {
availableProviders.push('g4f.Provider.Llama');
}

let providerSelect = document.getElementById('provider');
Expand Down
2 changes: 1 addition & 1 deletion client/js/provider-sort.min.js

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

9 changes: 3 additions & 6 deletions g4f/Provider/Llama.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,15 +3,14 @@
from aiohttp import ClientSession

from ..typing import AsyncResult, Messages
from ..raise_for_status import raise_for_status
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin


class Llama(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://www.llama2.ai"
working = True
supports_message_history = True
default_model = "meta/llama-3-70b-chat"
default_model = "meta/llama-2-70b-chat"
models = [
"meta/llama-2-7b-chat",
"meta/llama-2-13b-chat",
Expand Down Expand Up @@ -72,10 +71,8 @@ async def create_async_generator(
}
started = False
async with session.post(f"{cls.url}/api", json=data, proxy=proxy) as response:
await raise_for_status(response)
response.raise_for_status()
async for chunk in response.content.iter_any():
if not chunk:
continue
if not started:
chunk = chunk.lstrip()
started = True
Expand All @@ -88,4 +85,4 @@ def format_prompt(messages: Messages):
else message["content"]
for message in messages
]
return "\n".join(messages) + "\n"
return "\n".join(messages) + "\n"
119 changes: 77 additions & 42 deletions g4f/models.py
Original file line number Diff line number Diff line change
@@ -1,19 +1,21 @@
from __future__ import annotations

from dataclasses import dataclass
from .Provider import RetryProvider, ProviderType
from .Provider import (
PerplexityLabs,
GeminiProChat,
ChatgptNext,
FreeChatgpt,

from .Provider import RetryProvider, ProviderType
from .Provider import (
ChatgptAi,
ChatgptNext,
DeepInfra,
GeminiProChat,
Koala,
Liaobots,
Llama,
GptGo,
You
PerplexityLabs,
You,
)


@dataclass(unsafe_hash=True)
class Model:
"""
Expand All @@ -27,7 +29,7 @@ class Model:
name: str
base_provider: str
best_provider: ProviderType = None

@staticmethod
def __all__() -> list[str]:
"""Returns a list of all model names."""
Expand All @@ -36,7 +38,7 @@ def __all__() -> list[str]:
default = Model(
name = "",
base_provider = "",
best_provider = RetryProvider([ChatgptAi, GptGo, You])
best_provider = RetryProvider([ChatgptAi, You])
)

# GPT-3.5 too, but all providers supports long requests and responses
Expand All @@ -50,7 +52,7 @@ def __all__() -> list[str]:
gpt_35_turbo = Model(
name = 'gpt-3.5-turbo',
base_provider = 'openai',
best_provider = RetryProvider([ChatgptNext, Liaobots, GptGo, You])
best_provider = RetryProvider([You, ChatgptNext, Koala])
)

gpt_4 = Model(
Expand All @@ -61,45 +63,45 @@ def __all__() -> list[str]:

llama2_7b = Model(
name = "meta-llama/Llama-2-7b-chat-hf",
base_provider = 'huggingface',
best_provider = RetryProvider([Llama, PerplexityLabs])
base_provider = 'meta',
best_provider = RetryProvider([Llama, DeepInfra])
)

llama2_13b = Model(
name = "meta-llama/Llama-2-13b-chat-hf",
base_provider = 'huggingface',
base_provider = 'meta',
best_provider = RetryProvider([Llama, DeepInfra])
)

llama2_70b = Model(
name = "meta-llama/Llama-2-70b-chat-hf",
base_provider = "huggingface",
best_provider = RetryProvider([PerplexityLabs, DeepInfra, Llama])
base_provider = "meta",
best_provider = RetryProvider([Llama, DeepInfra])
)

llama3_70b_instruct = Model(
name = "meta-llama/Meta-Llama-3-70b-instruct",
llama3_8b_instruct = Model(
name = "meta-llama/Meta-Llama-3-8B-Instruct",
base_provider = "meta",
best_provider = PerplexityLabs
best_provider = RetryProvider([Llama, DeepInfra])
)

codellama_34b_instruct = Model(
name = "codellama/CodeLlama-34b-Instruct-hf",
base_provider = "huggingface",
best_provider = RetryProvider([PerplexityLabs, DeepInfra])
llama3_70b_instruct = Model(
name = "meta-llama/Meta-Llama-3-70B-Instruct",
base_provider = "meta",
best_provider = RetryProvider([Llama, DeepInfra])
)

codellama_70b_instruct = Model(
name = "codellama/CodeLlama-70b-Instruct-hf",
base_provider = "huggingface",
best_provider = DeepInfra
base_provider = "meta",
best_provider = RetryProvider([DeepInfra, PerplexityLabs])
)

# Mistral
mixtral_8x7b = Model(
name = "mistralai/Mixtral-8x7B-Instruct-v0.1",
base_provider = "huggingface",
best_provider = PerplexityLabs
best_provider = RetryProvider([DeepInfra, PerplexityLabs])
)

mistral_7b = Model(
Expand All @@ -108,6 +110,18 @@ def __all__() -> list[str]:
best_provider = PerplexityLabs
)

mistral_7b_v02 = Model(
name = "mistralai/Mistral-7B-Instruct-v0.2",
base_provider = "huggingface",
best_provider = DeepInfra
)

mixtral_8x22b = Model(
name = "HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1",
base_provider = "huggingface",
best_provider = DeepInfra
)

# Misc models
dolphin_mixtral_8x7b = Model(
name = "cognitivecomputations/dolphin-2.6-mixtral-8x7b",
Expand All @@ -127,23 +141,23 @@ def __all__() -> list[str]:
best_provider = DeepInfra
)

airoboros_l2_70b = Model(
name = "jondurbin/airoboros-l2-70b-gpt4-1.4.1",
base_provider = "huggingface",
best_provider = DeepInfra
)

openchat_35 = Model(
name = "openchat/openchat_3.5",
base_provider = "huggingface",
best_provider = DeepInfra
)

# Bard
claude_v2 = Model(
name = 'claude-v2',
claude_3_opus = Model(
name = 'claude-3-opus',
base_provider = 'anthropic',
best_provider = FreeChatgpt
best_provider = You
)

claude_3_sonnet = Model(
name = 'claude-3-sonnet',
base_provider = 'anthropic',
best_provider = You
)

gpt_35_turbo_16k = Model(
Expand Down Expand Up @@ -185,7 +199,13 @@ def __all__() -> list[str]:
gemini_pro = Model(
name = 'gemini-pro',
base_provider = 'google',
best_provider = RetryProvider([FreeChatgpt, GeminiProChat])
best_provider = RetryProvider([GeminiProChat, You])
)

dbrx_instruct = Model(
name = 'databricks/dbrx-instruct',
base_provider = 'mistral',
best_provider = RetryProvider([DeepInfra, PerplexityLabs])
)

class ModelUtils:
Expand All @@ -210,23 +230,38 @@ class ModelUtils:
'gpt-4-32k' : gpt_4_32k,
'gpt-4-32k-0613' : gpt_4_32k_0613,

# Llama 2
# Llama
'llama2-7b' : llama2_7b,
'llama2-13b': llama2_13b,
'llama2-70b': llama2_70b,

'llama3-8b' : llama3_8b_instruct, # alias
'llama3-70b': llama3_70b_instruct, # alias
'llama3-8b-instruct' : llama3_8b_instruct,
'llama3-70b-instruct': llama3_70b_instruct,
'codellama-34b-instruct': codellama_34b_instruct,
'codellama-70b-instruct': codellama_70b_instruct,

'codellama-70b-instruct': codellama_70b_instruct,

# Mistral Opensource
'mixtral-8x7b': mixtral_8x7b,
'mistral-7b': mistral_7b,
'mistral-7b-v02': mistral_7b_v02,
'mixtral-8x22b': mixtral_8x22b,
'dolphin-mixtral-8x7b': dolphin_mixtral_8x7b,

# google gemini
'gemini-pro': gemini_pro,

# anthropic
'claude-3-opus': claude_3_opus,
'claude-3-sonnet': claude_3_sonnet,

# other
'dbrx-instruct': dbrx_instruct,
'lzlv-70b': lzlv_70b,
'airoboros-70b': airoboros_70b,
'airoboros-l2-70b': airoboros_l2_70b,
'openchat_3.5': openchat_35,
'gemini-pro': gemini_pro,
'claude-v2': claude_v2

}

_all_models = list(ModelUtils.convert.keys())
4 changes: 2 additions & 2 deletions gunicorn_config.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
import os

workers = int(os.environ.get('GUNICORN_PROCESSES', '2'))
threads = int(os.environ.get('GUNICORN_THREADS', '8'))
timeout = int(os.environ.get('GUNICORN_TIMEOUT', '10'))
threads = int(os.environ.get('GUNICORN_THREADS', '16'))
timeout = int(os.environ.get('GUNICORN_TIMEOUT', '30'))
bind = os.environ.get('GUNICORN_BIND', '0.0.0.0:1338')
forwarded_allow_ips = '*'
secure_scheme_headers = { 'X-Forwarded-Proto': 'https' }

0 comments on commit fb4fae3

Please sign in to comment.