(1)Prepare Your Environment
(1)-1 Activate Your Virtual Environment
# Windows Command Prompt
venv\Scripts\activate.bat
(1)-2 Install the Required Library
pip install streamlit --upgrade
(2)Simple Example
if selected == "Chatbot":
st.title("Hello Bot")
#Places for finding Emojis: https://www.webfx.com/tools/emoji-cheat-sheet/
with st.chat_message("user"):
st.write("Hello 👋")
(3)EchoBot
if selected == "Chatbot":
#Build a bot that mirrors your input
#Bot will respond to your input with the same message
#st.chat_message: Display the user's input
#st.chat_input: Accept user input
#session_state: Store the chat history so we can display it in the chat message container
#What should we build?
#(1)Two chat message containers to display messages from the user and the bot, respectively
#(2)Way to store the chat history so we can display it in the chat message containers
#Use a list to store the messages > Append to it every time the user or bot sends a message
#(Each entry in the list will be a dictionary with the following keys: role(the author of the message), and content(the message content))
st.title("Echo Bot")
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages from history on app rerun
for message in st.session_state.messages: #For loop to iterate through the chat history and display each message in the chat message container (with the author role and message content)
with st.chat_message(message["role"]):
st.markdown(message["content"])
#Check to see if the messages key is in st.session_state.
#If it's not, we initialize it to an empty list.
#This is because we'll be adding messages to the list later on, and we don't want to overwrite the list every time the app reruns
# React to user input(:= operator to assign the user's input to the prompt variable and checked if it's not None in the same line)
if prompt := st.chat_input("What is up?"):
# Display user message in chat message container
with st.chat_message("user"):
st.markdown(prompt)
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
#If the user has sent a message, we display the message in the chat message container and append it to the chat history.
response = f"Echo: {prompt}"
# Display assistant response in chat message container
with st.chat_message("assistant"):
st.markdown(response)
# Add assistant response to chat history(Display the bot's response in the chat message container and add it to the history)
st.session_state.messages.append({"role": "assistant", "content": response})
-session_state features help keep a record of the conversation
Feature allow your app to remember data between user interactions like a web browser’s memory during a browsing session
(4)ChatGPT AI Clone
How?
Instead of returning the same prompt as the answer, want to use chatgpt
(1)Prepare Your Environment
(1)-1 Activate Your Virtual Environment
# Windows Command Prompt
venv\Scripts\activate.bat
(1)-2 Install the Required Library
pip install openai
Reference: https://docs.streamlit.io/knowledge-base/tutorials/build-conversational-apps
(2) Create Chatgpt open-ai key
(2)-1 Login to website
Link: https://platform.openai.com/api-keys
(2)-2 Click on Create new secret key
Copy and paste it and store it elsewhere
(3) Add OpenAI API key to Streamlit secrets
(3)-1 Make a new folder called .streamlit
(3)-2 In the file, create a new file named secrets.toml
+새로 만들기 > 텍스트 문서 > Type in secrets.toml
(3)-3 Store the open-api key in the secrets.toml file
Put your open-api key and give it a name
(4) Write app
(4)-1 Import library
import openai
(4)-2 Load the api key value
openai.api_key = st.secrets["OPENAI_API_KEY"]
(4)-3 Pick the model we want to use
if "openai_model" not in st.session_state:
st.session_state["openai_model"] = "gpt-3.5-turbo"
(4)-4 Other code stays the same
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages from history on app rerun
for message in st.session_state.messages: #For loop to iterate through the chat history and display each message in the chat message container (with the author role and message content)
with st.chat_message(message["role"]):
st.markdown(message["content"])
#Check to see if the messages key is in st.session_state.
#If it's not, we initialize it to an empty list.
#This is because we'll be adding messages to the list later on, and we don't want to overwrite the list every time the app reruns
# React to user input(:= operator to assign the user's input to the prompt variable and checked if it's not None in the same line)
if prompt := st.chat_input("What is up?"):
# Display user message in chat message container
with st.chat_message("user"):
st.markdown(prompt)
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
(4)-5 Tweak the response code
if selected == "Chatbot":
#Build a bot that mirrors your input
#Bot will respond to your input with the same message
#st.chat_message: Display the user's input
#st.chat_input: Accept user input
#session_state: Store the chat history so we can display it in the chat message container
#What should we build?
#(1)Two chat message containers to display messages from the user and the bot, respectively
#(2)Way to store the chat history so we can display it in the chat message containers
#Use a list to store the messages > Append to it every time the user or bot sends a message
#(Each entry in the list will be a dictionary with the following keys: role(the author of the message), and content(the message content))
st.title("Cloned ChatGPT-3.5")
openai.api_key = st.secrets["OPENAI_API_KEY"]
if "openai_model" not in st.session_state:
st.session_state["openai_model"] = "gpt-3.5-turbo"
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages from history on app rerun
for message in st.session_state.messages: #For loop to iterate through the chat history and display each message in the chat message container (with the author role and message content)
with st.chat_message(message["role"]):
st.markdown(message["content"])
#Check to see if the messages key is in st.session_state.
#If it's not, we initialize it to an empty list.
#This is because we'll be adding messages to the list later on, and we don't want to overwrite the list every time the app reruns
# React to user input(:= operator to assign the user's input to the prompt variable and checked if it's not None in the same line)
if prompt := st.chat_input("What is up?"):
# Display user message in chat message container
with st.chat_message("user"):
st.markdown(prompt)
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
# Display assistant response in chat message container
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = "" #full_response to an empy string
for response in openai.ChatCompletion.create( #Call the openai api
model=st.session_state["openai_model"], #Pass model saved in the session states
messages=[ #Conversation so far
{"role": m["role"], "content": m["content"]}
for m in st.session_state.messages
],
stream=True, #Slowly keep the gpt response and simulate typing effect to make app interactive
):
full_response += response.choices[0].delta.get("content", "") #Add chatgpt response to full_response string
message_placeholder.markdown(full_response + "🐇 ") #Use message_placeholder to show the response so far
message_placeholder.markdown(full_response) #Display full response one more time
st.session_state.messages.append({"role": "assistant", "content": response}) #Add chatgpt response to the messages list (Ensure response is saved)
(4)-6 Fixing Error
Reference: https://github.com/openai/openai-python/discussions/742
cmd > 관리자 권한으로 실행
wsl --install
wsl
Run wsl
cd /mnt/d/스터디/QuickWebsite/
Access Your WSL Environment
curl -fsSL https://docs.grit.io/install | bash
source /home/[Username]/.bashrc
grit install
grit apply openai
(4)-7 Fixing another error
Need to add payment
(5) Entire code
from PIL import Image
import requests
import streamlit as st
from openai import OpenAI
if selected == "Chatbot":
#Build a bot that mirrors your input
#Bot will respond to your input with the same message
#st.chat_message: Display the user's input
#st.chat_input: Accept user input
#session_state: Store the chat history so we can display it in the chat message container
#What should we build?
#(1)Two chat message containers to display messages from the user and the bot, respectively
#(2)Way to store the chat history so we can display it in the chat message containers
#Use a list to store the messages > Append to it every time the user or bot sends a message
#(Each entry in the list will be a dictionary with the following keys: role(the author of the message), and content(the message content))
st.title("Cloned ChatGPT-3.5")
if "openai_model" not in st.session_state:
st.session_state["openai_model"] = "gpt-3.5-turbo"
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages from history on app rerun
for message in st.session_state.messages: #For loop to iterate through the chat history and display each message in the chat message container (with the author role and message content)
with st.chat_message(message["role"]):
st.markdown(message["content"])
#Check to see if the messages key is in st.session_state.
#If it's not, we initialize it to an empty list.
#This is because we'll be adding messages to the list later on, and we don't want to overwrite the list every time the app reruns
# React to user input(:= operator to assign the user's input to the prompt variable and checked if it's not None in the same line)
if prompt := st.chat_input("What is up?"):
# Display user message in chat message container
with st.chat_message("user"):
st.markdown(prompt)
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
# Display assistant response in chat message container
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = "" #full_response to an empy string
for response in client.chat.completions.create(#Call the openai api
model=st.session_state["openai_model"], #Pass model saved in the session states
messages=[ #Conversation so far
{"role": m["role"], "content": m["content"]}
for m in st.session_state.messages
],
stream=True):
full_response += response.choices[0].delta.get("content", "") #Add chatgpt response to full_response string
message_placeholder.markdown(full_response + "🐇 ") #Use message_placeholder to show the response so far
message_placeholder.markdown(full_response) #Display full response one more time
st.session_state.messages.append({"role": "assistant", "content": response}) #Add chatgpt response to the messages list (Ensure response is saved)
Reference: https://www.youtube.com/watch?v=sBhK-2K9bUc