-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathchat_app.py
More file actions
43 lines (31 loc) · 1.17 KB
/
chat_app.py
File metadata and controls
43 lines (31 loc) · 1.17 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
import os
from dotenv import load_dotenv, find_dotenv
from langchain import HuggingFaceHub
from langchain import PromptTemplate, LLMChain
import chainlit as cl
import textwrap
load_dotenv(find_dotenv())
HUGGINGFACEHUB_API_TOKEN = os.environ["HUGGINGFACEHUB_API_TOKEN"]
# --------------------------------------------------------------
llm = HuggingFaceHub(
repo_id="tiiuae/falcon-7b-instruct", model_kwargs={"temperature": 0.1, "max_new_tokens": 500}
)
# --------------------------------------------------------------
# template = """Question: {question}
# Answer: Let's think step by step."""
# template = """Question: {question}
# Answer: """
template = """Answer the following question: {question}
"""
prompt = PromptTemplate(template=template, input_variables=["question"])
llm_chain = LLMChain(prompt=prompt, llm=llm,verbose=True)
@cl.on_message
async def main(message: str):
# Your custom logic goes here...
response = llm_chain.run(message)
wrapped_text = textwrap.fill(
response, width=100, break_long_words=False, replace_whitespace=False)
# Send a response back to the user
await cl.Message(
content=f"{wrapped_text}",
).send()