Use this file to discover all available pages before exploring further.
Long-term memory lets your agent store and recall information across different conversations and sessions.
Unlike short-term memory, which is scoped to a single thread, long-term memory persists across threads and can be recalled at any time.Long-term memory is built on LangGraph stores, which save data as JSON documents organized by namespace and key.
To add long-term memory to an agent, create a store and pass it to create_agent:
InMemoryStore
PostgreSQL
from langchain.agents import create_agentfrom langchain_core.runnables import Runnablefrom langgraph.store.memory import InMemoryStore# InMemoryStore saves data to an in-memory dictionary. Use a DB-backed store in production use.store = InMemoryStore()agent: Runnable = create_agent( "claude-sonnet-4-6", tools=[], store=store,)
LangGraph stores long-term memories as JSON documents in a store.Each memory is organized under a custom namespace (similar to a folder) and a distinct key (like a file name). Namespaces often include user or org IDs or other labels that makes it easier to organize information.This structure enables hierarchical organization of memories. Cross-namespace searching is then supported through content filters.
InMemoryStore
PostgreSQL
from collections.abc import Sequencefrom langgraph.store.base import IndexConfigfrom langgraph.store.memory import InMemoryStoredef embed(texts: Sequence[str]) -> list[list[float]]: # Replace with an actual embedding function or LangChain embeddings object return [[1.0, 2.0] for _ in texts]# InMemoryStore saves data to an in-memory dictionary. Use a DB-backed store in production use.store = InMemoryStore(index=IndexConfig(embed=embed, dims=2))user_id = "my-user"application_context = "chitchat"namespace = (user_id, application_context)store.put( namespace, "a-memory", { "rules": [ "User likes short, direct language", "User only speaks English & python", ], "my-key": "my-value", },)# get the "memory" by IDitem = store.get(namespace, "a-memory")# search for "memories" within this namespace, filtering on content equivalence, sorted by vector similarityitems = store.search( namespace, filter={"my-key": "my-value"}, query="language preferences")
from collections.abc import Sequencefrom langgraph.store.base import IndexConfigfrom langgraph.store.postgres import PostgresStore # type: ignore[import-not-found]def embed(texts: Sequence[str]) -> list[list[float]]: # Replace with an actual embedding function or LangChain embeddings object return [[1.0, 2.0] for _ in texts]DB_URI = "postgresql://postgres:postgres@localhost:5442/postgres?sslmode=disable"with PostgresStore.from_conn_string( DB_URI, index=IndexConfig(embed=embed, dims=2), # type: ignore[arg-type]) as store: store.setup() user_id = "my-user" application_context = "chitchat" namespace = (user_id, application_context) store.put( namespace, "a-memory", { "rules": [ "User likes short, direct language", "User only speaks English & python", ], "my-key": "my-value", }, ) item = store.get(namespace, "a-memory") items = store.search( namespace, filter={"my-key": "my-value"}, query="language preferences" )
For more information about the memory store, see the Persistence guide.
from dataclasses import dataclassfrom langchain.agents import create_agentfrom langchain.tools import ToolRuntime, toolfrom langchain_core.runnables import Runnablefrom langgraph.store.memory import InMemoryStore@dataclassclass Context: user_id: str# InMemoryStore saves data to an in-memory dictionary. Use a DB-backed store in production.store = InMemoryStore()# Write sample data to the store using the put methodstore.put( ( "users", ), # Namespace to group related data together (users namespace for user data) "user_123", # Key within the namespace (user ID as key) { "name": "John Smith", "language": "English", }, # Data to store for the given user)@tooldef get_user_info(runtime: ToolRuntime[Context]) -> str: """Look up user info.""" # Access the store - same as that provided to `create_agent` assert runtime.store is not None user_id = runtime.context.user_id # Retrieve data from store - returns StoreValue object with value and metadata user_info = runtime.store.get(("users",), user_id) return str(user_info.value) if user_info else "Unknown user"agent: Runnable = create_agent( model="google_genai:gemini-3.1-pro-preview", tools=[get_user_info], # Pass store to agent - enables agent to access store when running tools store=store, context_schema=Context,)# Run the agentagent.invoke( {"messages": [{"role": "user", "content": "look up user information"}]}, context=Context(user_id="user_123"),)
from dataclasses import dataclassfrom langchain.agents import create_agentfrom langchain.tools import ToolRuntime, toolfrom langchain_core.runnables import Runnablefrom langgraph.store.postgres import PostgresStore # type: ignore[import-not-found]@dataclassclass Context: user_id: strDB_URI = "postgresql://postgres:postgres@localhost:5442/postgres?sslmode=disable"with PostgresStore.from_conn_string(DB_URI) as store: store.setup() store.put(("users",), "user_123", {"name": "John Smith", "language": "English"}) @tool def get_user_info(runtime: ToolRuntime[Context]) -> str: """Look up user info.""" assert runtime.store is not None user_info = runtime.store.get(("users",), runtime.context.user_id) return str(user_info.value) if user_info else "Unknown user" agent: Runnable = create_agent( "claude-sonnet-4-6", tools=[get_user_info], store=store, context_schema=Context, ) result = agent.invoke( {"messages": [{"role": "user", "content": "look up user information"}]}, context=Context(user_id="user_123"), )
from dataclasses import dataclassfrom langchain.agents import create_agentfrom langchain.tools import ToolRuntime, toolfrom langchain_core.runnables import Runnablefrom langgraph.store.memory import InMemoryStorefrom typing_extensions import TypedDict# InMemoryStore saves data to an in-memory dictionary. Use a DB-backed store in production.store = InMemoryStore()@dataclassclass Context: user_id: str# TypedDict defines the structure of user information for the LLMclass UserInfo(TypedDict): name: str# Tool that allows agent to update user information (useful for chat applications)@tooldef save_user_info(user_info: UserInfo, runtime: ToolRuntime[Context]) -> str: """Save user info.""" # Access the store - same as that provided to `create_agent` assert runtime.store is not None store = runtime.store user_id = runtime.context.user_id # Store data in the store (namespace, key, data) store.put(("users",), user_id, dict(user_info)) return "Successfully saved user info."agent: Runnable = create_agent( model="google_genai:gemini-3.1-pro-preview", tools=[save_user_info], store=store, context_schema=Context,)# Run the agentagent.invoke( {"messages": [{"role": "user", "content": "My name is John Smith"}]}, # user_id passed in context to identify whose information is being updated context=Context(user_id="user_123"),)# You can access the store directly to get the valueitem = store.get(("users",), "user_123")
from dataclasses import dataclassfrom langchain.agents import create_agentfrom langchain.tools import ToolRuntime, toolfrom langchain_core.runnables import Runnablefrom langgraph.store.postgres import PostgresStore # type: ignore[import-not-found]from typing_extensions import TypedDict@dataclassclass Context: user_id: strclass UserInfo(TypedDict): name: str@tooldef save_user_info(user_info: UserInfo, runtime: ToolRuntime[Context]) -> str: """Save user info.""" assert runtime.store is not None runtime.store.put(("users",), runtime.context.user_id, dict(user_info)) return "Successfully saved user info."DB_URI = "postgresql://postgres:postgres@localhost:5442/postgres?sslmode=disable"with PostgresStore.from_conn_string(DB_URI) as store: store.setup() agent: Runnable = create_agent( "claude-sonnet-4-6", tools=[save_user_info], store=store, context_schema=Context, ) agent.invoke( {"messages": [{"role": "user", "content": "My name is John Smith"}]}, context=Context(user_id="user_123"), )
Connect these docs to Claude, VSCode, and more via MCP for real-time answers.