Build AI apps faster—no LLM API keys, no cloud costs, no boilerplate. Just code.
LocalAI Framework is a zero-config, AI-native web framework that makes it easy to build applications with embedded LLMs. It provides a unified API for text generation, embeddings, and agentic workflows, all running locally on your machine.
- 🚀 Zero Configuration: Get started in seconds with our CLI
- 🤖 Embedded LLM: Ships with TinyLlama for instant local inference
- 🔌 Unified API: Simple React hooks for AI functionality
- 💻 Local-First: No API keys or cloud costs required
- 🔄 Hybrid Mode: Optional cloud provider fallback
- 🛠 Developer Tools: Built-in AI playground and performance monitoring
# Create a new project
npx create-localai@latest my-ai-app
# Navigate to the project
cd my-ai-app
# Start the development server
npm run dev
import { useLLM } from '@localai/framework';
function MyAIComponent() {
const { generate, isLoading } = useLLM();
const handleClick = async () => {
const response = await generate({
prompt: "Write a short sci-fi story."
});
console.log(response.text);
};
return (
<button onClick={handleClick} disabled={isLoading}>
Generate Story
</button>
);
}
// _app.tsx or similar
import { LLMProvider } from '@localai/framework';
function MyApp({ Component, pageProps }) {
return (
<LLMProvider config={{ model: 'tinyllama', temperature: 0.7 }}>
<Component {...pageProps} />
</LLMProvider>
);
}
import { defineAgent } from '@localai/framework';
const CodeAgent = defineAgent({
role: "Senior Developer",
tools: ['writeFile', 'runTests'],
model: "phind-codellama"
});
// Use the agent
const result = await CodeAgent.execute("Refactor this function to use async/await");
import { useRAG } from '@localai/framework';
const { query } = useRAG({
documents: ['doc1.pdf', 'doc2.pdf'],
model: 'tinyllama'
});
const answer = await query("What do the documents say about X?");
We welcome contributions! Please see our Contributing Guide for details.
MIT © LocalAI Team