Skip to main content

Documentation Index

Fetch the complete documentation index at: https://docs.adrian.secureagentics.ai/llms.txt

Use this file to discover all available pages before exploring further.

Secureagentics integrates with LangChain through a custom callback handler. The handler extends LangChain’s BaseCallbackHandler and automatically forwards LLM and tool events to Secureagentics whenever your chain or agent runs. You add it once and all activity is captured without modifying your chain logic.

Prerequisites

  • A Secureagentics account with an API key. Find your key in Settings → API Keys.
  • LangChain installed in your Python environment.
pip install langchain langchain-openai requests

Steps

1

Register your LangChain agent

Register the agent with Secureagentics to receive an agent_id. Pass "langchain" as the framework value.
curl --request POST \
  --url https://api.secureagentics.ai/v1/agents \
  --header "Authorization: Bearer YOUR_SECUREAGENTICS_API_KEY" \
  --header "Content-Type: application/json" \
  --data '{
    "name": "my-langchain-agent",
    "framework": "langchain",
    "description": "Research agent built with LangChain"
  }'
Save the id value from the response:
{
  "id": "agt_01hx9z3k2m4p5q6r7s8t9u0v",
  "name": "my-langchain-agent",
  "framework": "langchain",
  "created_at": "2026-05-04T10:00:00Z"
}
2

Create the SecureagenicsCallbackHandler

Create a class that extends BaseCallbackHandler. Each callback method sends the appropriate event type to the Secureagentics events API.
The handler uses a trace_id tied to each LLM run’s run_id so that start and end events for the same invocation are linked in the Secureagentics event log.
python
import os
import requests
from typing import Any, Dict, List, Optional, Union
from langchain.callbacks.base import BaseCallbackHandler
from langchain.schema import LLMResult

class SecureagenicsCallbackHandler(BaseCallbackHandler):
    """LangChain callback handler that forwards events to Secureagentics."""

    def __init__(
        self,
        agent_id: str,
        api_key: str,
        chain_name: Optional[str] = None,
    ):
        self.agent_id = agent_id
        self.api_key = api_key
        self.chain_name = chain_name
        self._base_url = "https://api.secureagentics.ai/v1"

    def _post_event(self, event_type: str, payload: Dict[str, Any], trace_id: str) -> None:
        body: Dict[str, Any] = {
            "type": event_type,
            "trace_id": trace_id,
            "payload": payload,
        }
        if self.chain_name:
            body["payload"]["chain_name"] = self.chain_name

        try:
            requests.post(
                f"{self._base_url}/agents/{self.agent_id}/events",
                headers={
                    "Authorization": f"Bearer {self.api_key}",
                    "Content-Type": "application/json",
                },
                json=body,
                timeout=5,
            )
        except requests.RequestException:
            # Do not let Secureagentics errors interrupt your agent
            pass

    def on_llm_start(
        self,
        serialized: Dict[str, Any],
        prompts: List[str],
        *,
        run_id,
        **kwargs: Any,
    ) -> None:
        self._post_event(
            "prompt",
            {
                "model": serialized.get("name", "unknown"),
                "prompt": prompts[0] if prompts else "",
                "prompt_count": len(prompts),
            },
            trace_id=str(run_id),
        )

    def on_llm_end(
        self,
        response: LLMResult,
        *,
        run_id,
        **kwargs: Any,
    ) -> None:
        generations = response.generations
        response_text = ""
        if generations and generations[0]:
            response_text = generations[0][0].text

        token_usage = response.llm_output.get("token_usage", {}) if response.llm_output else {}

        self._post_event(
            "completion",
            {
                "response": response_text,
                "token_count": token_usage.get("completion_tokens", 0),
                "total_tokens": token_usage.get("total_tokens", 0),
            },
            trace_id=str(run_id),
        )

    def on_tool_start(
        self,
        serialized: Dict[str, Any],
        input_str: str,
        *,
        run_id,
        **kwargs: Any,
    ) -> None:
        self._post_event(
            "tool_call",
            {
                "tool": serialized.get("name", "unknown"),
                "input": input_str,
                "status": "start",
            },
            trace_id=str(run_id),
        )

    def on_tool_end(
        self,
        output: str,
        *,
        run_id,
        **kwargs: Any,
    ) -> None:
        self._post_event(
            "tool_call",
            {
                "output": output,
                "status": "end",
            },
            trace_id=str(run_id),
        )
3

Attach the handler to your chain or agent

Pass an instance of SecureagenicsCallbackHandler to your chain’s callbacks parameter. The handler runs automatically for every LLM and tool invocation.
python
import os
from langchain_openai import ChatOpenAI
from langchain.agents import AgentExecutor, create_openai_tools_agent
from langchain.tools import tool
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder

SECUREAGENTICS_API_KEY = os.environ["SECUREAGENTICS_API_KEY"]
AGENT_ID = os.environ["SECUREAGENTICS_AGENT_ID"]

# Create the Secureagentics handler
secureagentics_handler = SecureagenicsCallbackHandler(
    agent_id=AGENT_ID,
    api_key=SECUREAGENTICS_API_KEY,
    chain_name="research-agent",
)

# Define a sample tool
@tool
def get_word_count(text: str) -> int:
    """Return the number of words in the given text."""
    return len(text.split())

# Build the agent
llm = ChatOpenAI(model="gpt-4o", temperature=0)
tools = [get_word_count]

prompt = ChatPromptTemplate.from_messages([
    ("system", "You are a helpful assistant."),
    ("human", "{input}"),
    MessagesPlaceholder("agent_scratchpad"),
])

agent = create_openai_tools_agent(llm, tools, prompt)
executor = AgentExecutor(agent=agent, tools=tools, callbacks=[secureagentics_handler])

# Run the agent — events are sent automatically
result = executor.invoke({"input": "How many words are in 'The quick brown fox'?"})
print(result["output"])
Set the chain_name parameter to a meaningful identifier such as "customer-support" or "data-pipeline". The name is included in every event payload, making it easy to filter events by chain in the Secureagentics dashboard.
4

View events in the dashboard

Open the Secureagentics dashboard and go to Agents → your agent name → Events. Each LangChain run appears as a sequence of prompt, completion, and tool_call events grouped by trace_id.

Complete example

import os
import requests
from typing import Any, Dict, List, Optional
from langchain.callbacks.base import BaseCallbackHandler
from langchain.schema import LLMResult
from langchain_openai import ChatOpenAI
from langchain.agents import AgentExecutor, create_openai_tools_agent
from langchain.tools import tool
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder


class SecureagenicsCallbackHandler(BaseCallbackHandler):
    def __init__(self, agent_id: str, api_key: str, chain_name: Optional[str] = None):
        self.agent_id = agent_id
        self.api_key = api_key
        self.chain_name = chain_name
        self._base_url = "https://api.secureagentics.ai/v1"

    def _post_event(self, event_type: str, payload: Dict[str, Any], trace_id: str) -> None:
        body: Dict[str, Any] = {
            "type": event_type,
            "trace_id": trace_id,
            "payload": payload,
        }
        if self.chain_name:
            body["payload"]["chain_name"] = self.chain_name
        try:
            requests.post(
                f"{self._base_url}/agents/{self.agent_id}/events",
                headers={
                    "Authorization": f"Bearer {self.api_key}",
                    "Content-Type": "application/json",
                },
                json=body,
                timeout=5,
            )
        except requests.RequestException:
            pass

    def on_llm_start(self, serialized, prompts, *, run_id, **kwargs):
        self._post_event(
            "prompt",
            {"model": serialized.get("name", "unknown"), "prompt": prompts[0] if prompts else ""},
            trace_id=str(run_id),
        )

    def on_llm_end(self, response: LLMResult, *, run_id, **kwargs):
        text = response.generations[0][0].text if response.generations and response.generations[0] else ""
        usage = response.llm_output.get("token_usage", {}) if response.llm_output else {}
        self._post_event(
            "completion",
            {"response": text, "token_count": usage.get("completion_tokens", 0)},
            trace_id=str(run_id),
        )

    def on_tool_start(self, serialized, input_str, *, run_id, **kwargs):
        self._post_event(
            "tool_call",
            {"tool": serialized.get("name", "unknown"), "input": input_str, "status": "start"},
            trace_id=str(run_id),
        )

    def on_tool_end(self, output, *, run_id, **kwargs):
        self._post_event("tool_call", {"output": output, "status": "end"}, trace_id=str(run_id))


@tool
def get_word_count(text: str) -> int:
    """Return the number of words in the given text."""
    return len(text.split())


handler = SecureagenicsCallbackHandler(
    agent_id=os.environ["SECUREAGENTICS_AGENT_ID"],
    api_key=os.environ["SECUREAGENTICS_API_KEY"],
    chain_name="demo-agent",
)

llm = ChatOpenAI(model="gpt-4o", temperature=0)
tools = [get_word_count]
prompt = ChatPromptTemplate.from_messages([
    ("system", "You are a helpful assistant."),
    ("human", "{input}"),
    MessagesPlaceholder("agent_scratchpad"),
])

agent = create_openai_tools_agent(llm, tools, prompt)
executor = AgentExecutor(agent=agent, tools=tools, callbacks=[handler])

result = executor.invoke({"input": "How many words are in 'The quick brown fox'?"})
print(result["output"])
The _post_event method wraps the HTTP call in a try/except block. This ensures that a network error or a Secureagentics outage never raises an exception inside your LangChain agent.