LLM Agents — Build Autonomous AI Agents

Sanjeev SharmaSanjeev Sharma
4 min read

Advertisement

Introduction

LLM agents combine reasoning with tool use for autonomous task execution. This guide covers building agents that plan, act, and reflect.

Agent Architecture

User Query
Agent Reasoning
Tool Selection & Execution
Observe Results
Iterate or Return Answer

Simple Agent Implementation

from openai import OpenAI
import json

client = OpenAI()

def calculator(operation: str, a: float, b: float) -> float:
    """Simple calculator tool."""
    if operation == "add":
        return a + b
    elif operation == "multiply":
        return a * b
    elif operation == "divide":
        return a / b if b != 0 else None
    return None

def weather_lookup(city: str) -> str:
    """Mock weather lookup."""
    weather_db = {
        "NYC": "70F, Sunny",
        "LA": "75F, Clear",
        "Chicago": "65F, Cloudy"
    }
    return weather_db.get(city, "Unknown")

def agent_loop(user_query: str):
    """Run agent loop."""
    messages = [{"role": "user", "content": user_query}]

    for _ in range(5):  # Max iterations
        response = client.chat.completions.create(
            model="gpt-4",
            messages=messages,
            temperature=0
        )

        agent_response = response.choices[0].message.content

        # Check if agent wants to use a tool
        if "calculator" in agent_response.lower():
            # Parse and execute tool
            result = calculator("add", 5, 3)
            messages.append({
                "role": "assistant",
                "content": agent_response
            })
            messages.append({
                "role": "user",
                "content": f"Tool result: {result}"
            })
        elif "weather" in agent_response.lower():
            result = weather_lookup("NYC")
            messages.append({
                "role": "assistant",
                "content": agent_response
            })
            messages.append({
                "role": "user",
                "content": f"Weather: {result}"
            })
        else:
            return agent_response

    return agent_response

# Usage
result = agent_loop("What is 5 + 3 times 2?")
print(result)

ReAct (Reasoning + Acting) Agent

def react_agent(task: str):
    """ReAct: Reasoning + Acting agent."""
    system = """You are a reasoning agent. For each step:
1. Thought: What should I do?
2. Action: Use available tools
3. Observation: What did I learn?
4. Continue or Return: Final answer

Available tools: search, calculate, retrieve_data

Format:
Thought: ...
Action: tool_name[argument]
Observation: ...
(repeat as needed)
Final Answer: ..."""

    response = client.chat.completions.create(
        model="gpt-4",
        messages=[
            {"role": "system", "content": system},
            {"role": "user", "content": task}
        ]
    )

    return response.choices[0].message.content

Memory-Based Agent

class MemoryAgent:
    def __init__(self):
        self.memory = []
        self.client = OpenAI()

    def add_memory(self, memory_type: str, content: str):
        """Store memory for future reference."""
        self.memory.append({
            "type": memory_type,
            "content": content
        })

    def get_context(self) -> str:
        """Get relevant memory context."""
        return "\n".join([
            f"{m['type']}: {m['content']}"
            for m in self.memory[-5:]  # Last 5 memories
        ])

    def process(self, query: str) -> str:
        """Process query with memory."""
        context = self.get_context()

        messages = [
            {"role": "system", "content": f"Remember: {context}"},
            {"role": "user", "content": query}
        ]

        response = self.client.chat.completions.create(
            model="gpt-4",
            messages=messages
        )

        result = response.choices[0].message.content

        # Store interaction in memory
        self.add_memory("interaction", f"Q: {query}, A: {result[:50]}")

        return result

# Usage
agent = MemoryAgent()
agent.add_memory("preference", "User likes concise answers")
print(agent.process("Explain neural networks"))

Multi-Agent Conversation

class MultiAgentSystem:
    def __init__(self):
        self.agents = {}

    def add_agent(self, name: str, system_prompt: str):
        """Add an agent to the system."""
        self.agents[name] = {
            "system": system_prompt,
            "memory": []
        }

    def agent_communicate(self, agent1: str, agent2: str, query: str):
        """Agents discuss a topic."""
        conversation = []

        # Agent 1 responds
        response1 = client.chat.completions.create(
            model="gpt-4",
            messages=[
                {"role": "system", "content": self.agents[agent1]["system"]},
                {"role": "user", "content": query}
            ]
        )

        conversation.append((agent1, response1.choices[0].message.content))

        # Agent 2 responds to Agent 1
        response2 = client.chat.completions.create(
            model="gpt-4",
            messages=[
                {"role": "system", "content": self.agents[agent2]["system"]},
                {"role": "user", "content": f"{agent1} said: {response1.choices[0].message.content}"}
            ]
        )

        conversation.append((agent2, response2.choices[0].message.content))

        return conversation

# Usage
mas = MultiAgentSystem()
mas.add_agent("Optimist", "You see opportunities and positive aspects")
mas.add_agent("Skeptic", "You question everything and point out risks")

result = mas.agent_communicate("Optimist", "Skeptic", "What do you think of AI?")
for agent, response in result:
    print(f"{agent}: {response}\n")

Conclusion

Agents enable autonomous LLM behavior. From simple tool use to complex multi-agent systems, they're powerful for solving real-world problems.

FAQ

Q: Are LLM agents reliable? A: They're improving rapidly but still need supervision for critical tasks.

Q: What's the difference between agents and chatbots? A: Agents use tools and take actions; chatbots respond conversationally.

Q: How do I prevent agent hallucination? A: Use explicit tools, verify outputs, and implement feedback loops.

Advertisement

Sanjeev Sharma

Written by

Sanjeev Sharma

Full Stack Engineer · E-mopro