Skip to content

Commit

Permalink
add icons for model pools
Browse files Browse the repository at this point in the history
  • Loading branch information
deep-diver committed Jun 11, 2023
1 parent b4e70ed commit 902e241
Show file tree
Hide file tree
Showing 21 changed files with 342 additions and 36 deletions.
12 changes: 8 additions & 4 deletions app.py
Original file line number Diff line number Diff line change
Expand Up @@ -253,6 +253,7 @@ def move_to_second_view(btn):
info['example2'],
info['example3'],
info['example4'],
info['thumb-tiny'],
gr.update(choices=load_mode_list, value=load_mode_list[0]),
"",
)
Expand All @@ -267,6 +268,7 @@ def download_completed(
gen_config_path,
gen_config_sum_path,
load_mode,
thumbnail_tiny,
force_download,
):
tmp_args = types.SimpleNamespace()
Expand All @@ -275,6 +277,7 @@ def download_completed(
tmp_args.gen_config_path = gen_config_path
tmp_args.gen_config_summarization_path = gen_config_sum_path
tmp_args.force_download_ckpt = force_download
tmp_args.thumbnail_tiny = thumbnail_tiny

tmp_args.mode_cpu = True if load_mode == "cpu" else False
tmp_args.mode_mps = True if load_mode == "apple silicon" else False
Expand Down Expand Up @@ -645,7 +648,8 @@ def main(args):
| half precision | load_in_8bit | load_in_4bit |
| ------------------------------ | ------------------------- | ------------------------- |
| {round(7830/1024., 1)}GiB | {round(5224/1024., 1)}GiB | {round(4324/1024., 1)}GiB |
""")
""")
model_thumbnail_tiny = gr.Textbox("", visible=False)

with gr.Column():
gen_config_path = gr.Dropdown(
Expand Down Expand Up @@ -686,7 +690,7 @@ def main(args):
with gr.Tab("Ex4"):
example_showcase4 = gr.Chatbot(
[("hello", "world"), ("damn", "good")]
)
)

with gr.Row():
back_to_model_choose_btn = gr.Button("Back")
Expand Down Expand Up @@ -820,7 +824,7 @@ def main(args):
model_image, model_name, model_params, model_base, model_ckpt,
model_desc, model_vram, gen_config_path,
example_showcase1, example_showcase2, example_showcase3, example_showcase4,
load_mode,
model_thumbnail_tiny, load_mode,
progress_view
]
)
Expand Down Expand Up @@ -885,7 +889,7 @@ def main(args):
lambda: "Start downloading/loading the model...", None, txt_view
).then(
download_completed,
[model_name, model_base, model_ckpt, gen_config_path, gen_config_sum_path, load_mode, force_redownload],
[model_name, model_base, model_ckpt, gen_config_path, gen_config_sum_path, load_mode, model_thumbnail_tiny, force_redownload],
[progress_view2]
).then(
lambda: "Model is fully loaded...", None, txt_view
Expand Down
15 changes: 15 additions & 0 deletions chats/alpaca.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,14 +9,29 @@

def build_prompts(ppmanager, user_message, global_context, win_size=3):
dummy_ppm = copy.deepcopy(ppmanager)

dummy_ppm.ctx = global_context
for pingpong in dummy_ppm.pingpongs:
pong = pingpong.pong
first_sentence = pong.split(" ")[0]
if first_sentence != "" and \
pre.contains_image_markdown(first_sentence):
pong = ' '.join(pong.split(" ")[1:])
pingpong.pong = pong

lws = CtxLastWindowStrategy(win_size)

prompt = lws(dummy_ppm)
return prompt

def text_stream(ppmanager, streamer):
count = 0

for new_text in streamer:
if count == 0:
ppmanager.append_pong(f"![]({global_vars.model_thumbnail_tiny})***[{global_vars.model_type}]:*** ")
count = count + 1

ppmanager.append_pong(new_text)
yield ppmanager, ppmanager.build_uis()

Expand Down
15 changes: 15 additions & 0 deletions chats/alpaca_gpt4.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,14 +9,29 @@

def build_prompts(ppmanager, user_message, global_context, win_size=3):
dummy_ppm = copy.deepcopy(ppmanager)

dummy_ppm.ctx = global_context
for pingpong in dummy_ppm.pingpongs:
pong = pingpong.pong
first_sentence = pong.split(" ")[0]
if first_sentence != "" and \
pre.contains_image_markdown(first_sentence):
pong = ' '.join(pong.split(" ")[1:])
pingpong.pong = pong

lws = CtxLastWindowStrategy(win_size)

prompt = lws(dummy_ppm)
return prompt

def text_stream(ppmanager, streamer):
count = 0

for new_text in streamer:
if count == 0:
ppmanager.append_pong(f"![]({global_vars.model_thumbnail_tiny})***[{global_vars.model_type}]:*** ")
count = count + 1

ppmanager.append_pong(new_text)
yield ppmanager, ppmanager.build_uis()

Expand Down
12 changes: 10 additions & 2 deletions chats/alpacoom.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,10 +9,18 @@

def build_prompts(ppmanager, user_message, global_context, win_size=3):
dummy_ppm = copy.deepcopy(ppmanager)
lws = CtxLastWindowStrategy(win_size)

dummy_ppm.ctx = global_context

for pingpong in dummy_ppm.pingpongs:
pong = pingpong.pong
first_sentence = pong.split(" ")[0]
if first_sentence != "" and \
pre.contains_image_markdown(first_sentence):
pong = ' '.join(pong.split(" ")[1:])
pingpong.pong = pong

lws = CtxLastWindowStrategy(win_size)

prompt = lws(dummy_ppm)
return prompt

Expand Down
16 changes: 16 additions & 0 deletions chats/baize.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,17 +9,33 @@

def build_prompts(ppmanager, user_message, global_context, win_size=3):
dummy_ppm = copy.deepcopy(ppmanager)

dummy_ppm.ctx = global_context
for pingpong in dummy_ppm.pingpongs:
pong = pingpong.pong
first_sentence = pong.split(" ")[0]
if first_sentence != "" and \
pre.contains_image_markdown(first_sentence):
pong = ' '.join(pong.split(" ")[1:])
pingpong.pong = pong

lws = CtxLastWindowStrategy(win_size)

prompt = lws(dummy_ppm)
return prompt

def text_stream(ppmanager, streamer):
count = 0

for new_text in streamer:
if "[|Human|]" in new_text or \
"[|AI|]" in new_text:
break

if count == 0:
ppmanager.append_pong(f"![]({global_vars.model_thumbnail_tiny})***[{global_vars.model_type}]:*** ")
count = count + 1

ppmanager.append_pong(new_text)
yield ppmanager, ppmanager.build_uis()

Expand Down
15 changes: 15 additions & 0 deletions chats/falcon.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,14 +20,29 @@ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwa

def build_prompts(ppmanager, user_message, global_context, win_size=3):
dummy_ppm = copy.deepcopy(ppmanager)

dummy_ppm.ctx = global_context
for pingpong in dummy_ppm.pingpongs:
pong = pingpong.pong
first_sentence = pong.split(" ")[0]
if first_sentence != "" and \
pre.contains_image_markdown(first_sentence):
pong = ' '.join(pong.split(" ")[1:])
pingpong.pong = pong

lws = CtxLastWindowStrategy(win_size)

prompt = lws(dummy_ppm)
return prompt

def text_stream(ppmanager, streamer):
count = 0

for new_text in streamer:
if count == 0:
ppmanager.append_pong(f"![]({global_vars.model_thumbnail_tiny})***[{global_vars.model_type}]:*** ")
count = count + 1

ppmanager.append_pong(new_text)
yield ppmanager, ppmanager.build_uis()

Expand Down
22 changes: 18 additions & 4 deletions chats/flan_alpaca.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,15 +9,29 @@

def build_prompts(ppmanager, user_message, global_context, win_size=3):
dummy_ppm = copy.deepcopy(ppmanager)
lws = CtxLastWindowStrategy(win_size)


dummy_ppm.ctx = global_context

prompt = lws(dummy_ppm)
for pingpong in dummy_ppm.pingpongs:
pong = pingpong.pong
first_sentence = pong.split(" ")[0]
if first_sentence != "" and \
pre.contains_image_markdown(first_sentence):
pong = ' '.join(pong.split(" ")[1:])
pingpong.pong = pong

lws = CtxLastWindowStrategy(win_size)

prompt = lws(dummy_ppm)
return prompt

def text_stream(ppmanager, streamer):
count = 0

for new_text in streamer:
if count == 0:
ppmanager.append_pong(f"![]({global_vars.model_thumbnail_tiny})***[{global_vars.model_type}]:*** ")
count = count + 1

ppmanager.append_pong(new_text)
yield ppmanager, ppmanager.build_uis()

Expand Down
20 changes: 17 additions & 3 deletions chats/guanaco.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,15 +21,29 @@ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwa

def build_prompts(ppmanager, user_message, global_context, win_size=3):
dummy_ppm = copy.deepcopy(ppmanager)
lws = CtxLastWindowStrategy(win_size)

dummy_ppm.ctx = global_context

prompt = lws(dummy_ppm)
for pingpong in dummy_ppm.pingpongs:
pong = pingpong.pong
first_sentence = pong.split(" ")[0]
if first_sentence != "" and \
pre.contains_image_markdown(first_sentence):
pong = ' '.join(pong.split(" ")[1:])
pingpong.pong = pong

lws = CtxLastWindowStrategy(win_size)

prompt = lws(dummy_ppm)
return prompt

def text_stream(ppmanager, streamer):
count = 0

for new_text in streamer:
if count == 0:
ppmanager.append_pong(f"![]({global_vars.model_thumbnail_tiny})***[{global_vars.model_type}]:*** ")
count = count + 1

ppmanager.append_pong(new_text)
yield ppmanager, ppmanager.build_uis()

Expand Down
20 changes: 17 additions & 3 deletions chats/koalpaca.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,15 +9,29 @@

def build_prompts(ppmanager, user_message, global_context, win_size=3):
dummy_ppm = copy.deepcopy(ppmanager)
lws = CtxLastWindowStrategy(win_size)

dummy_ppm.ctx = global_context

prompt = lws(dummy_ppm)
for pingpong in dummy_ppm.pingpongs:
pong = pingpong.pong
first_sentence = pong.split(" ")[0]
if first_sentence != "" and \
pre.contains_image_markdown(first_sentence):
pong = ' '.join(pong.split(" ")[1:])
pingpong.pong = pong

lws = CtxLastWindowStrategy(win_size)

prompt = lws(dummy_ppm)
return prompt

def text_stream(ppmanager, streamer):
count = 0

for new_text in streamer:
if count == 0:
ppmanager.append_pong(f"![]({global_vars.model_thumbnail_tiny})***[{global_vars.model_type}]:*** ")
count = count + 1

ppmanager.append_pong(new_text)
yield ppmanager, ppmanager.build_uis()

Expand Down
20 changes: 17 additions & 3 deletions chats/mpt.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,15 +26,29 @@ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwa

def build_prompts(ppmanager, user_message, global_context, win_size=3):
dummy_ppm = copy.deepcopy(ppmanager)
lws = CtxLastWindowStrategy(win_size)

dummy_ppm.ctx = global_context

prompt = lws(dummy_ppm)
for pingpong in dummy_ppm.pingpongs:
pong = pingpong.pong
first_sentence = pong.split(" ")[0]
if first_sentence != "" and \
pre.contains_image_markdown(first_sentence):
pong = ' '.join(pong.split(" ")[1:])
pingpong.pong = pong

lws = CtxLastWindowStrategy(win_size)

prompt = lws(dummy_ppm)
return prompt

def text_stream(ppmanager, streamer):
count = 0

for new_text in streamer:
if count == 0:
ppmanager.append_pong(f"![]({global_vars.model_thumbnail_tiny})***[{global_vars.model_type}]:*** ")
count = count + 1

ppmanager.append_pong(new_text)
yield ppmanager, ppmanager.build_uis()

Expand Down
6 changes: 6 additions & 0 deletions chats/os_stablelm.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,13 @@ def build_prompts(ppmanager, user_message, global_context, win_size=3):
return prompt

def text_stream(ppmanager, streamer):
count = 0

for new_text in streamer:
if count == 0:
ppmanager.append_pong(f"![]({global_vars.model_thumbnail_tiny})***[{global_vars.model_type}]:*** ")
count = count + 1

ppmanager.append_pong(new_text)
yield ppmanager, ppmanager.build_uis()

Expand Down
6 changes: 6 additions & 0 deletions chats/pre.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,15 @@
import re
import copy
import global_vars
from threading import Thread
from transformers import TextIteratorStreamer
from transformers import GenerationConfig

def contains_image_markdown(string):
regex = re.compile(r'!\[(.*?)\]\((.*?)\)')
match = regex.search(string)
return match

def build_model_inputs(prompt, return_token_type_ids):
model_inputs = global_vars.tokenizer(
[prompt],
Expand Down
Loading

0 comments on commit 902e241

Please sign in to comment.