import { GradientLLM } from "@langchain/community/llms/gradient_ai";// Note that inferenceParameters are optionalconst model = new GradientLLM({ modelSlug: "llama2-7b-chat", inferenceParameters: { maxGeneratedTokenCount: 20, temperature: 0, },});const res = await model.invoke( "What would be a good company name for a company that makes colorful socks?");console.log({ res });
The use your own custom adapter simply set adapterId during setup.
Copy
Ask AI
import { GradientLLM } from "@langchain/community/llms/gradient_ai";// Note that inferenceParameters are optionalconst model = new GradientLLM({ adapterId: process.env.GRADIENT_ADAPTER_ID, inferenceParameters: { maxGeneratedTokenCount: 20, temperature: 0, },});const res = await model.invoke( "What would be a good company name for a company that makes colorful socks?");console.log({ res });