Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 11 additions & 3 deletions .env.example
Original file line number Diff line number Diff line change
@@ -1,9 +1,17 @@
# the model name must be gpt-4o-2024-08-06 as it is dependent on structured output from open ai
MODEL_NAME="gpt-4o-2024-08-06"
# The model name must be 'gpt-4o-2024-08-06' as it is dependent on structured output from OpenAI

LITELLM_LOG="ERROR"

OPENAI_API_KEY=""
# OpenAI's API settings
# Example TogetherAI endpoint
OPENAI_BASE_URL=https://api.together.xyz/v1
OPENAI_API_KEY=

# The model to use for the LLM
MODEL_NAME=meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo

# Port for Google crome dev mode
CHROME_PORT=9222

# you can skip adding langfuse api keys. refer to the readme on how to disable tracing with langfuse.
LANGFUSE_SECRET_KEY="sk-lf-"
Expand Down
21 changes: 10 additions & 11 deletions agentq/core/agent/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def __init__(
output_format: Type[BaseModel],
tools: Optional[List[Tuple[Callable, str]]] = None,
keep_message_history: bool = True,
client: str = "openai",
client: str = None
):
# Metdata
self.agent_name = name
Expand All @@ -44,13 +44,17 @@ def __init__(
litellm.logging = True
litellm.set_verbose = True

# Base URL & Model
base_url = os.getenv("OPENAI_BASE_URL", "https://api.openai.com/v1")
api_key = os.getenv("OPENAI_API_KEY", "")

# Llm client
if client == "openai":
if base_url == "https://api.openai.com/v1":
self.client = openai.Client()
elif client == "together":
else:
self.client = openai.OpenAI(
base_url="https://api.together.xyz/v1",
api_key=os.environ["TOGETHER_API_KEY"],
base_url=base_url,
api_key=api_key,
)

self.client = instructor.from_openai(self.client, mode=Mode.JSON)
Expand All @@ -75,8 +79,7 @@ async def run(
input_data: BaseModel,
screenshot: str = None,
session_id: str = None,
# model: str = "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
model: str = "gpt-4o-2024-08-06",
model: str = os.getenv("MODEL_NAME", "gpt-4o-mini"),
) -> BaseModel:
if not isinstance(input_data, self.input_format):
raise ValueError(f"Input data must be of type {self.input_format.__name__}")
Expand Down Expand Up @@ -131,10 +134,6 @@ async def run(
if len(self.tools_list) == 0:
response = self.client.chat.completions.create(
model=model,
# model="gpt-4o-2024-08-06",
# model="gpt-4o-mini",
# model="groq/llama3-groq-70b-8192-tool-use-preview",
# model="xlam-1b-fc-r",
messages=self.messages,
response_model=self.output_format,
max_retries=4,
Expand Down
3 changes: 2 additions & 1 deletion agentq/core/web_driver/playwright.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import os
import tempfile
import time
from typing import List, Union
Expand Down Expand Up @@ -190,7 +191,7 @@ async def create_browser_context(self):
)
else:
browser = await PlaywrightManager._playwright.chromium.connect_over_cdp(
"http://localhost:9222"
"http://localhost:{}".format(int(os.getenv("CHROME_PORT", "9222")))
)
PlaywrightManager._browser_context = browser.contexts[0]

Expand Down