|
| 1 | +import json |
| 2 | +import os |
| 3 | +from concurrent.futures import ThreadPoolExecutor |
| 4 | + |
| 5 | +import requests |
| 6 | +import streamlit as st |
| 7 | + |
| 8 | +MODEL_NAME = os.environ["MODEL_NAME"] |
| 9 | + |
| 10 | +# App title |
| 11 | +st.set_page_config(page_title="TorchServe Chatbot") |
| 12 | + |
| 13 | +with st.sidebar: |
| 14 | + st.title("TorchServe Chatbot") |
| 15 | + |
| 16 | + st.session_state.model_loaded = False |
| 17 | + try: |
| 18 | + res = requests.get(url="http://localhost:8080/ping") |
| 19 | + res = requests.get(url=f"http://localhost:8081/models/{MODEL_NAME}") |
| 20 | + status = "NOT READY" |
| 21 | + if res.status_code == 200: |
| 22 | + status = json.loads(res.text)[0]["workers"][0]["status"] |
| 23 | + |
| 24 | + if status == "READY": |
| 25 | + st.session_state.model_loaded = True |
| 26 | + st.success("Proceed to entering your prompt message!", icon="👉") |
| 27 | + else: |
| 28 | + st.warning("Model not loaded in TorchServe", icon="⚠️") |
| 29 | + |
| 30 | + except requests.ConnectionError: |
| 31 | + st.warning("TorchServe is not up. Try again", icon="⚠️") |
| 32 | + |
| 33 | + if st.session_state.model_loaded: |
| 34 | + st.success(f"Model loaded: {MODEL_NAME}!", icon="👉") |
| 35 | + |
| 36 | + st.subheader("Model parameters") |
| 37 | + temperature = st.sidebar.slider( |
| 38 | + "temperature", min_value=0.1, max_value=1.0, value=0.5, step=0.1 |
| 39 | + ) |
| 40 | + top_p = st.sidebar.slider( |
| 41 | + "top_p", min_value=0.1, max_value=1.0, value=0.5, step=0.1 |
| 42 | + ) |
| 43 | + max_new_tokens = st.sidebar.slider( |
| 44 | + "max_new_tokens", min_value=48, max_value=512, value=50, step=4 |
| 45 | + ) |
| 46 | + concurrent_requests = st.sidebar.select_slider( |
| 47 | + "concurrent_requests", options=[2**j for j in range(0, 8)] |
| 48 | + ) |
| 49 | + |
| 50 | +# Store LLM generated responses |
| 51 | +if "messages" not in st.session_state.keys(): |
| 52 | + st.session_state.messages = [ |
| 53 | + {"role": "assistant", "content": "How may I assist you today?"} |
| 54 | + ] |
| 55 | + |
| 56 | +# Display or clear chat messages |
| 57 | +for message in st.session_state.messages: |
| 58 | + with st.chat_message(message["role"]): |
| 59 | + st.write(message["content"]) |
| 60 | + |
| 61 | + |
| 62 | +def clear_chat_history(): |
| 63 | + st.session_state.messages = [ |
| 64 | + {"role": "assistant", "content": "How may I assist you today?"} |
| 65 | + ] |
| 66 | + |
| 67 | + |
| 68 | +st.sidebar.button("Clear Chat History", on_click=clear_chat_history) |
| 69 | + |
| 70 | + |
| 71 | +def generate_model_response(prompt_input, executor): |
| 72 | + string_dialogue = ( |
| 73 | + "Question: What are the names of the planets in the solar system? Answer: " |
| 74 | + ) |
| 75 | + headers = {"Content-type": "application/json", "Accept": "text/plain"} |
| 76 | + url = f"http://127.0.0.1:8080/predictions/{MODEL_NAME}" |
| 77 | + data = json.dumps( |
| 78 | + { |
| 79 | + "prompt": prompt_input, |
| 80 | + "params": { |
| 81 | + "max_new_tokens": max_new_tokens, |
| 82 | + "top_p": top_p, |
| 83 | + "temperature": temperature, |
| 84 | + }, |
| 85 | + } |
| 86 | + ) |
| 87 | + res = [ |
| 88 | + executor.submit(requests.post, url=url, data=data, headers=headers, stream=True) |
| 89 | + for i in range(concurrent_requests) |
| 90 | + ] |
| 91 | + |
| 92 | + return res, max_new_tokens |
| 93 | + |
| 94 | + |
| 95 | +# User-provided prompt |
| 96 | +if prompt := st.chat_input(): |
| 97 | + st.session_state.messages.append({"role": "user", "content": prompt}) |
| 98 | + with st.chat_message("user"): |
| 99 | + st.write(prompt) |
| 100 | + |
| 101 | +# Generate a new response if last message is not from assistant |
| 102 | +if st.session_state.messages[-1]["role"] != "assistant": |
| 103 | + with st.chat_message("assistant"): |
| 104 | + with st.spinner("Thinking..."): |
| 105 | + with ThreadPoolExecutor() as executor: |
| 106 | + futures, max_tokens = generate_model_response(prompt, executor) |
| 107 | + placeholder = st.empty() |
| 108 | + full_response = "" |
| 109 | + count = 0 |
| 110 | + for future in futures: |
| 111 | + response = future.result() |
| 112 | + for chunk in response.iter_content(chunk_size=None): |
| 113 | + if chunk: |
| 114 | + data = chunk.decode("utf-8") |
| 115 | + full_response += data |
| 116 | + placeholder.markdown(full_response) |
| 117 | + message = {"role": "assistant", "content": full_response} |
| 118 | + st.session_state.messages.append(message) |
0 commit comments