import { FlowExecutor } from 'openflow-nodejs-sdk';
const executor = new FlowExecutor({
concurrency: {
global_limit: "max_concurrent_flows (integer, 1-50, default: 3)"
},
providers: {
llm: {
"provider_name (e.g., 'grok', 'openai', 'anthropic')": {
apiKey: "api_key_credential (environment variable or direct key)",
rate_limit: {
concurrent_limit: "max_concurrent_requests (integer, 1-100, default: 3)",
rps: "requests_per_second (integer, 1-1000, default: 8)",
retry_strategy: {
max_retries: "max_retry_attempts (integer, 0-10, default: 3)",
backoff: "backoff_type (enum: 'exponential'|'linear'|'fixed')",
base_delay: "base_delay_ms (integer, 100-10000, default: 1000)"
}
}
}
},
vectorDB: {
"vector_provider_name (e.g., 'pinecone', 'chroma', 'weaviate')": {
provider: "provider_identifier (matches the key name)",
index_name: "vector_index_name (provider-specific database/collection name)",
apiKey: "vector_db_api_key (authentication credential)",
rate_limit: {
concurrent_limit: "max_concurrent_vector_operations (integer, 1-100, default: 5)",
rps: "vector_requests_per_second (integer, 1-1000, default: 10)",
retry_strategy: "retry_config (RetryStrategy object or null to disable)"
}
}
},
embeddings: {
"embedding_provider_name (e.g., 'openai', 'cohere', 'huggingface')": {
apiKey: "embedding_api_key (authentication credential)",
rate_limit: {
concurrent_limit: "max_concurrent_embedding_requests (integer, 1-100, default: 10)",
rps: "embedding_requests_per_second (integer, 1-1000, default: 20)",
retry_strategy: {
max_retries: "embedding_max_retries (integer, 0-10, default: 2)",
backoff: "embedding_backoff_strategy (enum: 'linear'|'exponential'|'fixed')",
base_delay: "embedding_base_delay_ms (integer, 100-5000, default: 500)"
}
}
}
}
},
timeout: "global_timeout_ms (integer, 5000-300000, default: 30000)",
logLevel: "logging_level (enum: 'debug'|'info'|'warn'|'error', default: 'warn')",
tempDir: "temporary_directory_path (string, relative or absolute path, default: './of_tmp')"
});