Skip to content

Commit 82782cd

Browse files
committed
+ kg-chat
1 parent c0cf2f9 commit 82782cd

File tree

10 files changed

+609
-250
lines changed

10 files changed

+609
-250
lines changed

examples/basic/chat.py

Lines changed: 37 additions & 70 deletions
Original file line numberDiff line numberDiff line change
@@ -8,41 +8,30 @@
88
99
Use optional arguments to change the settings, e.g.:
1010
11-
-m "local" to use a model served locally at an OpenAI-API-compatible endpoint
12-
[ Ensure the API endpoint url matches the one in the code below, or edit it. ]
13-
OR
14-
- m "litellm/ollama/llama2" to use any model supported by litellm
15-
(see list here https://docs.litellm.ai/docs/providers)
16-
[Note you must prepend "litellm/" to the model name required in the litellm docs,
17-
e.g. "ollama/llama2" becomes "litellm/ollama/llama2",
18-
"bedrock/anthropic.claude-instant-v1" becomes
19-
"litellm/bedrock/anthropic.claude-instant-v1"]
20-
11+
-m <local_model_spec>
2112
-ns # no streaming
2213
-d # debug mode
2314
-nc # no cache
24-
-ct momento # use momento cache (instead of redis)
15+
-sm <system_message>
16+
-q <initial user msg>
2517
26-
For details on running with local Llama model, see:
27-
https://langroid.github.io/langroid/blog/2023/09/14/using-langroid-with-local-llms/
18+
For details on running with local or non-OpenAI models, see:
19+
https://langroid.github.io/langroid/tutorials/non-openai-llms/
2820
"""
2921
import typer
3022
from rich import print
3123
from rich.prompt import Prompt
32-
from pydantic import BaseSettings
3324
from dotenv import load_dotenv
3425

26+
import langroid.language_models as lm
3527
from langroid.agent.chat_agent import ChatAgent, ChatAgentConfig
3628
from langroid.agent.task import Task
37-
from langroid.language_models.openai_gpt import OpenAIGPTConfig
3829
from langroid.utils.configuration import set_global, Settings
3930
from langroid.utils.logging import setup_colored_logging
4031

4132

4233
app = typer.Typer()
4334

44-
setup_colored_logging()
45-
4635
# Create classes for non-OpenAI model configs
4736

4837
# OPTION 1: LiteLLM-supported models
@@ -72,27 +61,28 @@
7261
# of the config so that the `openai.*` completion functions can be used
7362
# without having to rely on adapter libraries like litellm.
7463

75-
MyLLMConfig = OpenAIGPTConfig.create(prefix="myllm")
76-
my_llm_config = MyLLMConfig(
77-
chat_model="litellm/ollama/llama2",
78-
# or, other possibilities for example:
79-
# "litellm/bedrock/anthropic.claude-instant-v1"
80-
# "litellm/ollama/llama2"
81-
# "local/localhost:8000/v1"
82-
# "local/localhost:8000"
83-
chat_context_length=2048, # adjust based on model
84-
)
85-
86-
87-
class CLIOptions(BaseSettings):
88-
model: str = ""
89-
90-
class Config:
91-
extra = "forbid"
92-
env_prefix = ""
93-
9464

95-
def chat(opts: CLIOptions) -> None:
65+
@app.command()
66+
def main(
67+
debug: bool = typer.Option(False, "--debug", "-d", help="debug mode"),
68+
model: str = typer.Option("", "--model", "-m", help="model name"),
69+
no_stream: bool = typer.Option(False, "--nostream", "-ns", help="no streaming"),
70+
nocache: bool = typer.Option(False, "--nocache", "-nc", help="don't use cache"),
71+
query: str = typer.Option("", "--query", "-q", help="initial user query or msg"),
72+
sys_msg: str = typer.Option(
73+
"You are a helpful assistant. Be concise in your answers.",
74+
"--sysmsg",
75+
"-sm",
76+
help="system message",
77+
),
78+
) -> None:
79+
set_global(
80+
Settings(
81+
debug=debug,
82+
cache=not nocache,
83+
stream=not no_stream,
84+
)
85+
)
9686
print(
9787
"""
9888
[blue]Welcome to the basic chatbot!
@@ -103,19 +93,15 @@ def chat(opts: CLIOptions) -> None:
10393
load_dotenv()
10494

10595
# use the appropriate config instance depending on model name
106-
if opts.model.startswith("litellm/") or opts.model.startswith("local/"):
107-
# e.g. litellm/ollama/llama2 or litellm/bedrock/anthropic.claude-instant-v1
108-
llm_config = my_llm_config
109-
llm_config.chat_model = opts.model
110-
111-
else:
112-
llm_config = OpenAIGPTConfig()
113-
114-
default_sys_msg = "You are a helpful assistant. Be concise in your answers."
96+
llm_config = lm.OpenAIGPTConfig(
97+
chat_model=model or lm.OpenAIChatModel.GPT4_TURBO,
98+
chat_context_length=4096,
99+
timeout=45,
100+
)
115101

116102
sys_msg = Prompt.ask(
117103
"[blue]Tell me who I am. Hit Enter for default, or type your own\n",
118-
default=default_sys_msg,
104+
default=sys_msg,
119105
)
120106

121107
config = ChatAgentConfig(
@@ -128,29 +114,10 @@ def chat(opts: CLIOptions) -> None:
128114
# but in some scenarios, other (e.g. llama) models
129115
# seem to do better when kicked off with a sys msg and a user msg.
130116
# In those cases we may want to do task.run("hello") instead.
131-
task.run()
132-
133-
134-
@app.command()
135-
def main(
136-
debug: bool = typer.Option(False, "--debug", "-d", help="debug mode"),
137-
model: str = typer.Option("", "--model", "-m", help="model name"),
138-
no_stream: bool = typer.Option(False, "--nostream", "-ns", help="no streaming"),
139-
nocache: bool = typer.Option(False, "--nocache", "-nc", help="don't use cache"),
140-
cache_type: str = typer.Option(
141-
"redis", "--cachetype", "-ct", help="redis or momento"
142-
),
143-
) -> None:
144-
set_global(
145-
Settings(
146-
debug=debug,
147-
cache=not nocache,
148-
stream=not no_stream,
149-
cache_type=cache_type,
150-
)
151-
)
152-
opts = CLIOptions(model=model)
153-
chat(opts)
117+
if query:
118+
task.run(query)
119+
else:
120+
task.run()
154121

155122

156123
if __name__ == "__main__":

examples/basic/fn-call-local-simple.py

Lines changed: 75 additions & 70 deletions
Original file line numberDiff line numberDiff line change
@@ -17,9 +17,14 @@
1717
1818
python3 examples/docqa/fn-call-local-simple.py
1919
20+
To change the local model, use the optional arg -m <local_model>.
21+
See this [script](https://github.com/langroid/langroid-examples/blob/main/examples/docqa/rag-local-simple.py)
22+
for other ways to specify the local_model.
23+
2024
"""
2125
import os
2226
from typing import List
27+
import fire
2328

2429
from pydantic import BaseModel, Field
2530
import langroid as lr
@@ -30,44 +35,11 @@
3035

3136
os.environ["TOKENIZERS_PARALLELISM"] = "false"
3237

33-
# Create the llm config object.
34-
# Note: if instead of ollama you've spun up your local LLM to listen at
35-
# an OpenAI-Compatible Endpoint like `localhost:8000`, then you can set
36-
# chat_model="local/localhost:8000"; carefully note there's no http in this,
37-
# and if the endpoint is localhost:8000/v1, then you must set
38-
# chat_model="local/localhost:8000/v1"
39-
# Similarly if your endpoint is `http://128.0.4.5:8000/v1`, then you must set
40-
# chat_model="local/128.0.4.5:8000/v1"
41-
llm_cfg = lm.OpenAIGPTConfig(
42-
chat_model="litellm/ollama/mistral:7b-instruct-v0.2-q4_K_M",
43-
chat_context_length=4096, # set this based on model
44-
max_output_tokens=100,
45-
temperature=0.2,
46-
stream=True,
47-
timeout=45,
48-
)
49-
50-
# Recommended: First test if basic chat works with this llm setup as below:
51-
# Once this works, then you can try the rest of the example.
52-
#
53-
# agent = lr.ChatAgent(
54-
# lr.ChatAgentConfig(
55-
# llm=llm_cfg,
56-
# )
57-
# )
58-
#
59-
# agent.llm_response("What is 3 + 4?")
60-
#
61-
# task = lr.Task(agent)
62-
# verify you can interact with this in a chat loop on cmd line:
63-
# task.run("Concisely answer some questions")
64-
6538
# (1) Define the desired structure via Pydantic.
6639
# Here we define a nested structure for City information.
6740
# The "Field" annotations are optional, and are included in the system message
6841
# if provided, and help with generation accuracy.
6942

70-
7143
class CityData(BaseModel):
7244
population: int = Field(..., description="population of city")
7345
country: str = Field(..., description="country of city")
@@ -90,9 +62,9 @@ def handle(self) -> str:
9062
"""Handle LLM's structured output if it matches City structure"""
9163
print("SUCCESS! Got Valid City Info")
9264
return """
93-
Thanks! ask me for another city name, do not say anything else
94-
until you get a city name.
95-
"""
65+
Thanks! ask me for another city name, do not say anything else
66+
until you get a city name.
67+
"""
9668

9769
@staticmethod
9870
def handle_message_fallback(
@@ -101,10 +73,10 @@ def handle_message_fallback(
10173
"""Fallback method when LLM forgets to generate a tool"""
10274
if isinstance(msg, ChatDocument) and msg.metadata.sender == "LLM":
10375
return """
104-
You must use the `city_tool` to generate city information.
105-
You either forgot to use it, or you used it with the wrong format.
106-
Make sure all fields are filled out.
107-
"""
76+
You must use the `city_tool` to generate city information.
77+
You either forgot to use it, or you used it with the wrong format.
78+
Make sure all fields are filled out.
79+
"""
10880

10981
@classmethod
11082
def examples(cls) -> List["ToolMessage"]:
@@ -122,33 +94,66 @@ def examples(cls) -> List["ToolMessage"]:
12294
]
12395

12496

125-
# (3) Define a ChatAgentConfig and ChatAgent
126-
127-
config = lr.ChatAgentConfig(
128-
llm=llm_cfg,
129-
system_message="""
130-
You are an expert on world city information.
131-
The user will give you a city name, and you should use the `city_tool` to
132-
generate information about the city, and present it to the user.
133-
Make up the values if you don't know them exactly, but make sure
134-
the structure is as specified in the `city_tool` JSON definition.
135-
136-
DO NOT SAY ANYTHING ELSE BESIDES PROVIDING THE CITY INFORMATION.
137-
138-
START BY ASKING ME TO GIVE YOU A CITY NAME.
139-
DO NOT GENERATE ANYTHING YOU GET A CITY NAME.
140-
141-
Once you've generated the city information using `city_tool`,
142-
ask for another city name, and so on.
143-
""",
144-
)
145-
146-
agent = lr.ChatAgent(config)
147-
148-
# (4) Enable the Tool for this agent --> this auto-inserts JSON instructions
149-
# and few-shot examples into the system message
150-
agent.enable_message(CityTool)
151-
152-
# (5) Create task and run it to start an interactive loop
153-
task = lr.Task(agent)
154-
task.run()
97+
def app(
98+
m: str = "litellm/ollama/mistral:7b-instruct-v0.2-q4_K_M",
99+
):
100+
# create LLM config
101+
llm_cfg = lm.OpenAIGPTConfig(
102+
chat_model= m or "litellm/ollama/mistral:7b-instruct-v0.2-q4_K_M",
103+
chat_context_length=4096, # set this based on model
104+
max_output_tokens=100,
105+
temperature=0.2,
106+
stream=True,
107+
timeout=45,
108+
)
109+
110+
# Recommended: First test if basic chat works with this llm setup as below:
111+
# Once this works, then you can try the rest of the example.
112+
#
113+
# agent = lr.ChatAgent(
114+
# lr.ChatAgentConfig(
115+
# llm=llm_cfg,
116+
# )
117+
# )
118+
#
119+
# agent.llm_response("What is 3 + 4?")
120+
#
121+
# task = lr.Task(agent)
122+
# verify you can interact with this in a chat loop on cmd line:
123+
# task.run("Concisely answer some questions")
124+
125+
126+
127+
# Define a ChatAgentConfig and ChatAgent
128+
129+
config = lr.ChatAgentConfig(
130+
llm=llm_cfg,
131+
system_message="""
132+
You are an expert on world city information.
133+
The user will give you a city name, and you should use the `city_tool` to
134+
generate information about the city, and present it to the user.
135+
Make up the values if you don't know them exactly, but make sure
136+
the structure is as specified in the `city_tool` JSON definition.
137+
138+
DO NOT SAY ANYTHING ELSE BESIDES PROVIDING THE CITY INFORMATION.
139+
140+
START BY ASKING ME TO GIVE YOU A CITY NAME.
141+
DO NOT GENERATE ANYTHING YOU GET A CITY NAME.
142+
143+
Once you've generated the city information using `city_tool`,
144+
ask for another city name, and so on.
145+
""",
146+
)
147+
148+
agent = lr.ChatAgent(config)
149+
150+
# (4) Enable the Tool for this agent --> this auto-inserts JSON instructions
151+
# and few-shot examples into the system message
152+
agent.enable_message(CityTool)
153+
154+
# (5) Create task and run it to start an interactive loop
155+
task = lr.Task(agent)
156+
task.run()
157+
158+
if __name__ == "__main__":
159+
fire.Fire(app)

0 commit comments

Comments
 (0)