[!WARNING] This TypeScript package is currently in beta and will likely change. It is not yet ready for production use.
The Sentient Agent Framework is a TypeScript implementation of the Sentient Agent Framework for building agents that serve Sentient Chat events. It provides a client-server architecture for interacting with the Sentient platform's API, similar to how streaming inference works.
In addition to supporting OpenAI API compatible agents, Sentient Chat supports a custom, open source event system for agent responses. These events can be rendered in Sentient Chat to provide a richer user experience. This is particularly useful for streaming responses from an AI agent, when you might want to show the agent's work while the response is being generated, rather than having the user wait for the final response.
# Using npm
npm install sentient-agent-framework
# Using yarn
yarn add sentient-agent-framework
# Using pnpm (recommended)
pnpm add sentient-agent-framework
The framework can be used in various environments:
// pages/api/agent.ts
import { AbstractAgent, DefaultServer } from 'sentient-agent-framework';
class MyAgent extends AbstractAgent {
constructor() {
super('My Agent');
}
async assist(session, query, responseHandler) {
// Emit a text block
await responseHandler.emitTextBlock('THINKING', 'Processing your query...');
// Create a text stream for the final response
const stream = responseHandler.createTextStream('RESPONSE');
// Stream the response in chunks
await stream.emitChunk('Hello, ');
await stream.emitChunk('world!');
// Complete the stream
await stream.complete();
// Complete the response
await responseHandler.complete();
}
}
// Create the agent and server
const agent = new MyAgent();
const server = new DefaultServer(agent);
// Export the handler for Next.js API routes
export default async function handler(req, res) {
return server.handleRequest(req, res);
}
// server.js
import express from 'express';
import { AbstractAgent, DefaultServer } from 'sentient-agent-framework';
class MyAgent extends AbstractAgent {
constructor() {
super('My Agent');
}
async assist(session, query, responseHandler) {
// Emit a text block
await responseHandler.emitTextBlock('THINKING', 'Processing your query...');
// Emit a JSON document with the query details
await responseHandler.emitJson('QUERY_DETAILS', {
query: query.prompt,
timestamp: new Date().toISOString(),
session_id: session.processor_id
});
// Create a text stream for the response
const stream = responseHandler.createTextStream('RESPONSE');
// Stream the response word by word
const words = `Hello! You asked: "${query.prompt}". This is a streaming response from the Express server.`.split(' ');
for (const word of words) {
await stream.emitChunk(word + ' ');
// Small delay between words
await new Promise(resolve => setTimeout(resolve, 100));
}
// Complete the stream
await stream.complete();
// Complete the response
await responseHandler.complete();
}
}
// Create Express app
const app = express();
app.use(express.json());
// Create the agent and server
const agent = new MyAgent();
const server = new DefaultServer(agent);
// Mount the server at /assist endpoint
app.use('/assist', (req, res) => server.handleRequest(req, res));
// Start the server
app.listen(3000, () => {
console.log('Server running on http://localhost:3000');
});
// server.js
import Fastify from 'fastify';
import { AbstractAgent, DefaultServer } from 'sentient-agent-framework';
class MyAgent extends AbstractAgent {
constructor() {
super('My Agent');
}
async assist(session, query, responseHandler) {
// Emit a text block
await responseHandler.emitTextBlock('THINKING', 'Processing your query...');
// Emit a JSON document with the query details
await responseHandler.emitJson('QUERY_DETAILS', {
query: query.prompt,
timestamp: new Date().toISOString(),
session_id: session.processor_id
});
// Create a text stream for the response
const stream = responseHandler.createTextStream('RESPONSE');
// Stream the response word by word
const words = `Hello! You asked: "${query.prompt}". This is a streaming response from the Fastify server.`.split(' ');
for (const word of words) {
await stream.emitChunk(word + ' ');
// Small delay between words
await new Promise(resolve => setTimeout(resolve, 100));
}
// Complete the stream
await stream.complete();
// Complete the response
await responseHandler.complete();
}
}
// Create Fastify app
const fastify = Fastify({
logger: true
});
// Register JSON parser
fastify.register(require('@fastify/formbody'));
// Create the agent and server
const agent = new MyAgent();
const server = new DefaultServer(agent);
// Add route for the agent
fastify.post('/assist', async (request, reply) => {
return server.handleRequest(request.raw, reply.raw);
});
// Start the server
fastify.listen({ port: 3000 }, (err) => {
if (err) {
fastify.log.error(err);
process.exit(1);
}
});
// client.ts
import {
SentientAgentClient,
EventContentType,
ResponseEvent,
TextChunkEvent,
DocumentEvent,
TextBlockEvent,
ErrorEvent
} from 'sentient-agent-framework';
/**
* Type guards for event types
*/
function isTextChunkEvent(event: ResponseEvent): event is TextChunkEvent {
return event.content_type === EventContentType.TEXT_STREAM;
}
function isDocumentEvent(event: ResponseEvent): event is DocumentEvent {
return event.content_type === EventContentType.JSON;
}
function isTextBlockEvent(event: ResponseEvent): event is TextBlockEvent {
return event.content_type === EventContentType.TEXTBLOCK;
}
function isErrorEvent(event: ResponseEvent): event is ErrorEvent {
return event.content_type === EventContentType.ERROR;
}
async function main() {
// Create a client
const client = new SentientAgentClient();
// Query an agent
try {
console.log("Querying agent...");
// Track stream IDs to handle multiple streams
let streamId: string | null = null;
for await (const event of client.queryAgent('What is the weather today?', 'http://localhost:3000/assist')) {
// Process events based on their type
switch (event.content_type) {
case EventContentType.TEXTBLOCK:
if (isTextBlockEvent(event)) {
console.log(`\n${event.event_name}: ${event.content}`);
}
break;
case EventContentType.TEXT_STREAM:
if (isTextChunkEvent(event)) {
if (streamId !== event.stream_id) {
// New stream started
streamId = event.stream_id;
console.log(`\n${event.event_name}:`);
}
// Print stream content without line break
process.stdout.write(event.content);
// Add line break if stream is complete
if (event.is_complete) {
console.log();
}
}
break;
case EventContentType.JSON:
if (isDocumentEvent(event)) {
console.log(`\n${event.event_name}:`);
console.log(JSON.stringify(event.content, null, 2));
}
break;
case EventContentType.ERROR:
if (isErrorEvent(event)) {
console.error(`\nError: ${event.content.error_message}`);
if (event.content.details) {
console.error(JSON.stringify(event.content.details, null, 2));
}
}
break;
case EventContentType.DONE:
console.log('\nDone!');
break;
}
}
} catch (error) {
console.error('Error querying agent:', error);
}
}
main();
## Testing with the CLI
The framework includes a CLI client for testing agents. To use it:
1. Start the example server:
```bash
# Install dependencies if you haven't already
pnpm install
# Start the example server
pnpm run example-server
- In another terminal, run the CLI client:
pnpm run cli
- Enter the URL of the server (e.g.,
http://localhost:3000
) and start chatting with the agent.- The CLI will automatically append "/assist" to the URL if it's not already included
The CLI client will display the events received from the agent, including:
- Text blocks
- JSON documents
- Streaming text
- Error messages
## Event Types
The framework supports several event types, each with a specific purpose:
### 1. TextBlockEvent (`EventContentType.TEXTBLOCK`)
- **Purpose**: For sending complete text messages in a single event
- **Use Case**: Sending thinking steps, intermediate results, or any non-streaming text content
- **Example**:
```typescript
await responseHandler.emitTextBlock('THINKING', 'Processing your query...');
- Purpose: For sending structured JSON data
- Use Case: Sending search results, data visualizations, or any structured data
-
Example:
await responseHandler.emitJson('SEARCH_RESULTS', { results: [ { title: 'Result 1', url: 'https://example.com/1' }, { title: 'Result 2', url: 'https://example.com/2' } ] });
- Purpose: For streaming text in chunks
- Use Case: Streaming the agent's response in real-time
-
Example:
const stream = responseHandler.createTextStream('RESPONSE'); await stream.emitChunk('Hello, '); await stream.emitChunk('world!'); await stream.complete();
- Purpose: For sending error messages
- Use Case: Reporting errors that occur during processing
-
Example:
await responseHandler.emitError('Failed to process query', 500, { details: 'API rate limit exceeded' });
- Purpose: For signaling the end of a response
- Use Case: Indicating that the agent has completed processing
-
Example:
await responseHandler.complete(); // Automatically emits a DoneEvent
The framework follows a client-server architecture:
graph TD
Client[Client] -->|HTTP Request| Server[Server]
Server -->|SSE Events| Client
Server -->|Creates| Session[Session]
Server -->|Creates| ResponseHandler[ResponseHandler]
Agent[Agent] -->|Uses| ResponseHandler
ResponseHandler -->|Creates| TextStream[TextStream]
ResponseHandler -->|Emits| Events[Events]
TextStream -->|Emits| Events
Events -->|Via| Hook[Hook]
Hook -->|To| Client
To publish this package to npm:
- Ensure all tests pass:
pnpm run test
- Build the package:
pnpm run build
- Update the version in package.json following semantic versioning:
# For patch releases (bug fixes)
pnpm version patch
# For minor releases (new features, backward compatible)
pnpm version minor
# For major releases (breaking changes)
pnpm version major
- Publish to npm:
pnpm publish
- Create a GitHub release with release notes.
Contributions are welcome! Please feel free to submit a Pull Request.
- Fork the repository
- Create your feature branch (
git checkout -b feature/amazing-feature
) - Commit your changes (
git commit -m 'Add some amazing feature'
) - Push to the branch (
git push origin feature/amazing-feature
) - Open a Pull Request
This project is licensed under the MIT License - see the LICENSE file for details.