Skip to content

Commit 1bf88b3

Browse files
committed
update examples to use constants.AT not @
1 parent 45ab787 commit 1bf88b3

File tree

5 files changed

+172
-14
lines changed

5 files changed

+172
-14
lines changed

examples/basic/drug-outcomes.py

Lines changed: 141 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,141 @@
1+
"""
2+
ADE (Adverse Drug Event) probability estimation task:
3+
4+
Given a pair of (Drug Category, Adverse Event), have the LLM generate an estimate
5+
of the probability that the drug category is associated with an increased risk
6+
of the adverse event.
7+
8+
Run this N times (without caching) to get statistics on the estimates.
9+
Illustrates the use of `llm_response_batch`.
10+
11+
Default model is GPT4o, see how to specify alternative models below.
12+
13+
Example run:
14+
15+
python3 examples/basic/ drug-outcomes.py \
16+
--model litellm/claude-3-5-sonnet-20240620 --temp 0.1 \
17+
--pair "(Antibiotics, Acute Liver Injury)" --n 20 --reason true
18+
19+
Interesting models to try:
20+
- gpt-4o (default)
21+
- gpt-4
22+
- litellm/claude-3-5-sonnet-20240620
23+
- groq/llama3-70b-8192
24+
25+
See reference below for specific (DrugCategory, ADE) pairs to test.
26+
27+
References:
28+
- Guides to using Langroid with local and non-OpenAI models:
29+
https://langroid.github.io/langroid/tutorials/local-llm-setup/
30+
https://langroid.github.io/langroid/tutorials/non-openai-llms/
31+
- OMOP Ground Truth table of known Drug-ADE associations:
32+
(see page 16 for the table of Drug-ADE pairs)
33+
https://www.brookings.edu/wp-content/uploads/2012/04/OMOP-methods-review.pdf
34+
"""
35+
36+
import langroid as lr
37+
import langroid.language_models as lm
38+
from langroid.utils.configuration import settings
39+
import numpy as np
40+
import re
41+
from fire import Fire
42+
43+
# Turn off cache retrieval, to get independent estimates on each run
44+
settings.cache = False
45+
46+
MODEL = lm.OpenAIChatModel.GPT4o
47+
TEMP = 0.1
48+
PAIR = "(Antibiotics, Acute Liver Injury)"
49+
N = 20
50+
# should LLM include reasoning along with probability?
51+
# (meant to test whether including reasoning along with the probability
52+
# improves accuracy and/or variance of estimates)
53+
REASON: bool = False
54+
55+
56+
def extract_num(x: str) -> int:
57+
"""
58+
Extracts an integer from a string that contains a number.
59+
60+
Args:
61+
x (str): The input string containing the number.
62+
63+
Returns:
64+
int: The extracted integer.
65+
66+
Raises:
67+
ValueError: If no number is found in the expected format.
68+
"""
69+
match = re.search(r"\d+", x)
70+
if match:
71+
return int(match.group(0))
72+
else:
73+
return -1
74+
75+
76+
def main(
77+
model: str = MODEL,
78+
temp: float = TEMP,
79+
pair: str = PAIR,
80+
n: int = N,
81+
reason: bool = REASON,
82+
):
83+
REASONING_PROMPT = (
84+
"""
85+
IMPORTANT: Before showing your estimated probability,
86+
you MUST show 2-3 sentences with your REASONING, and THEN give your
87+
percent probability estimate in the range [0,100].
88+
"""
89+
if reason
90+
else ""
91+
)
92+
93+
agent = lr.ChatAgent(
94+
lr.ChatAgentConfig(
95+
llm=lm.OpenAIGPTConfig(
96+
temperature=temp,
97+
chat_model=model,
98+
),
99+
name="ADE-Estimator",
100+
system_message=f"""
101+
You are a clinician with deep knowledge of Adverse Drug Events (ADEs)
102+
of various drugs and categories of drugs.
103+
You will be given a (DRUG CATEGORY, ADVERSE OUTCOME) pair,
104+
you have to estimate the probability that this DRUG CATEGORY
105+
is associated with INCREASED RISK of the ADVERSE OUTCOME.
106+
107+
{REASONING_PROMPT}
108+
109+
You must give your probability estimate as a SINGLE NUMBER e.g. 56,
110+
which means 56%.
111+
DO NOT GIVE A RANGE OF PROBABILITIES, ONLY A SINGLE NUMBER.
112+
""",
113+
)
114+
)
115+
116+
results = lr.llm_response_batch(
117+
agent,
118+
[pair] * n,
119+
# ["(Beta Blockers, Mortality after Myocardial Infarction)"]*20,
120+
)
121+
probs = [extract_num(r.content) for r in results]
122+
cached = [r.metadata.cached for r in results]
123+
n_cached = sum(cached)
124+
# eliminate negatives (due to errs)
125+
probs = [p for p in probs if p >= 0]
126+
mean = np.mean(probs)
127+
std = np.std(probs)
128+
std_err = std / np.sqrt(len(probs))
129+
hi = max(probs)
130+
lo = min(probs)
131+
print(f"Stats for {pair} with {model} temp {temp} reason {reason}:")
132+
print(
133+
f"N: {len(probs)} ({n_cached} cached ) Mean: {mean:.2f}, Std: {std:.2f}, StdErr:"
134+
f" {std_err:.2f}, min: {lo:.2f}, max: {hi:.2f}"
135+
)
136+
toks, cost = agent.llm.tot_tokens_cost()
137+
print(f"Tokens: {toks}, Cost: {cost:.2f}")
138+
139+
140+
if __name__ == "__main__":
141+
Fire(main)

examples/basic/plan-subtasks.py

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616
"""
1717

1818
import langroid as lr
19-
from langroid.utils.constants import NO_ANSWER, DONE
19+
from langroid.utils.constants import NO_ANSWER, DONE, AT
2020

2121
planner = lr.ChatAgent(
2222
lr.ChatAgentConfig(
@@ -39,11 +39,12 @@
3939
* Send Divide operation to `Divider`
4040
4141
To clarify who you are sending the message to, preface your message with
42-
@<helper_name>, e.g. "@Multiplier multiply with 5"
42+
{AT}<helper_name>, e.g. "{AT}Multiplier multiply with 5"
4343
4444
When you have the final answer, say {DONE} and show it.
4545
46-
At the START, ask the user what they need help with, address them as "@user"
46+
At the START, ask the user what they need help with,
47+
address them as "{AT}user"
4748
4849
EXAMPLE:
4950
============
@@ -53,7 +54,7 @@
5354
1. multiply 4 with 5
5455
2. add 1 to the result
5556
3. divide result by 3
56-
@Multiplier multiply 4 with 5
57+
{AT}Multiplier multiply 4 with 5
5758
[... wait for result, then show your NEW PLAN and send a new request]
5859
and so on.
5960
@@ -102,7 +103,8 @@
102103
)
103104

104105

105-
planner_task = lr.Task(planner, interactive=False)
106+
task_config = lr.TaskConfig(addressing_prefix=AT)
107+
planner_task = lr.Task(planner, interactive=False, config=task_config)
106108
adder_task = lr.Task(adder, interactive=False, single_round=True)
107109
multiplier_task = lr.Task(multiplier, interactive=False, single_round=True)
108110
divider_task = lr.Task(divider, interactive=False, single_round=True)

examples/chainlit/chat-search-assistant-local.py

Lines changed: 16 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -4,15 +4,26 @@
44
Tested and works ok nous-hermes2-mixtral, but may still have issues.
55
See that script for details.
66
7-
You can specify a local in a few different ways, e.g. `-m local/localhost:8000/v1`
8-
or `-m ollama/mistral` etc. See here how to use Langroid with local LLMs:
7+
You can specify a local model in a few different ways, e.g. `groq/llama3-70b-8192`
8+
or `ollama/mistral` etc. See here how to use Langroid with local LLMs:
99
https://langroid.github.io/langroid/tutorials/local-llm-setup/
1010
11+
Since chainlit does not take cmd line args in the normal way, you have to specify
12+
the model via an environment variable, e.g. `MODEL=ollama/mistral` before the
13+
script is run, e.g.
14+
15+
MODEL=ollama/mistral chainlit run examples/chainlit/chat-search-assistant-local.py
16+
17+
Note - this is just an example of using an open/local LLM;
18+
it does not mean that this will work with ANY local LLM.
19+
20+
You may get good results using `groq/llama3-70b-8192` (see the above-linked guide
21+
to using open/local LLMs with Langroid for more details).
1122
1223
"""
1324

1425
from typing import List, Optional, Type
15-
26+
import os
1627
from dotenv import load_dotenv
1728
from textwrap import dedent
1829

@@ -279,8 +290,8 @@ async def llm_response_async(
279290

280291
@cl.on_chat_start
281292
async def main(
282-
debug: bool = False,
283-
model: str = "ollama/nous-hermes2-mixtral:latest",
293+
debug: bool = True,
294+
model: str = os.getenv("MODEL", "gpt-4o"),
284295
nocache: bool = True,
285296
) -> None:
286297
set_global(

examples/docqa/doc-aware-guide-2.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@
2929
from langroid.agent.chat_agent import ChatAgent, ChatAgentConfig
3030
from langroid.agent.task import Task
3131
from langroid.utils.configuration import set_global, Settings
32-
from langroid.utils.constants import DONE, NO_ANSWER
32+
from langroid.utils.constants import DONE, NO_ANSWER, AT
3333
from fire import Fire
3434

3535
os.environ["TOKENIZERS_PARALLELISM"] = "false"
@@ -137,9 +137,11 @@ def main(
137137
# recipients=["DocAgent", "User"], default="User"
138138
# )
139139
# guide_agent.enable_message(MyRecipientTool)
140+
task_config = lr.TaskConfig(addressing_prefix=AT)
140141
guide_task = Task(
141142
guide_agent,
142143
interactive=False,
144+
config=task_config,
143145
system_message=f"""
144146
You are VERY HELPFUL GUIDE, who wants to help a User with their inquiry.
145147
@@ -154,7 +156,7 @@ def main(
154156
155157
Since you could be talking to TWO people, in order to CLARIFY who you are
156158
addressing, you MUST ALWAYS EXPLICITLY ADDRESS either the
157-
"User" or the "DocAgent" using @User or @DocAgent, respectively.
159+
"User" or the "DocAgent" using {AT}User or {AT}DocAgent, respectively.
158160
159161
You must THINK like this at each step after receiving a question from the User:
160162

examples/docqa/doc-based-troubleshooting.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@
3131
from langroid.agent.chat_agent import ChatAgent, ChatAgentConfig
3232
from langroid.agent.task import Task
3333
from langroid.utils.configuration import set_global, Settings
34-
from langroid.utils.constants import DONE, NO_ANSWER
34+
from langroid.utils.constants import DONE, NO_ANSWER, AT
3535
from fire import Fire
3636

3737
os.environ["TOKENIZERS_PARALLELISM"] = "false"
@@ -144,9 +144,11 @@ def main(
144144
# recipients=["DocAgent", "User"], default="User"
145145
# )
146146
# guide_agent.enable_message(MyRecipientTool)
147+
task_config = lr.TaskConfig(addressing_prefix=AT)
147148
guide_task = Task(
148149
guide_agent,
149150
interactive=False,
151+
config=task_config,
150152
system_message=f"""
151153
You are a TROUBLESHOOTER, who wants to help a User with their PROBLEM.
152154
@@ -161,7 +163,7 @@ def main(
161163
162164
Since you could be talking to TWO people, in order to CLARIFY who you are
163165
addressing, you MUST ALWAYS EXPLICITLY ADDRESS either the
164-
"User" or the "DocAgent" using @User or @DocAgent, respectively.
166+
"User" or the "DocAgent" using {AT}User or {AT}DocAgent, respectively.
165167
166168
You must THINK like this at each step after receiving a question from the User:
167169

0 commit comments

Comments
 (0)