Assistants and Squads
A onAssistantRequest{}
call is used to define the behavior of an application. It is required in all application descriptions. A call to onAssistantRequest{}
should define a single assistant{}
, squad{}
, assistantId{}
, or squadId{}
. Calls to assistant{}
and assistantId{}
can also include a call to assistantOverrides{}
.
Assistants
An assistant{}
call creates a temporary assistant that lasts for the duration of the call.
Assistant Example
fun Vapi4kConfig.assistantExample() {
inboundCallApplication {
onAssistantRequest { requestContext: RequestContext ->
assistant {
openAIModel {
modelType = OpenAIModelType.GPT_4O
systemMessage = "You are a helpful agent."
}
firstMessage = "Hello, how can I help you today?"
}
}
}
}
Squads
A squad{}
call creates a temporary squad that lasts for the duration of the call.
Squad Example
fun Vapi4kConfig.squadExample() {
inboundCallApplication {
onAssistantRequest { requestContext: RequestContext ->
squad {
members {
member {
assistant {
name = "Assistant 1"
// Define assistant1 here
}
destinations {
destination {
assistantName = "Assistant 2"
}
}
}
member {
assistant {
name = "Assistant 2"
// Define assistant2 here
}
destinations {
destination {
assistantName = "Assistant 1"
}
}
}
}
}
}
}
}
AssistantIds
An assistantId{}
call references a static assistant defined in the Vapi dashboard.
A assistantOverrides{}
call is used to override the default assistant settings, and its argument has an AssistantOverrides
context.
Assistant Id Example
fun Vapi4kConfig.assistantIdExample() {
inboundCallApplication {
onAssistantRequest { requestContext: RequestContext ->
assistantId {
id = "41ba80bc-807c-4cf5-a8c3-0a88a5a5882g"
assistantOverrides {
// Define the assistant overrides here
}
}
}
}
}
SquadIds
A squadId{}
call references a static squad defined in the Vapi dashboard.
Squad Id Example
fun Vapi4kConfig.squadIdExample() {
inboundCallApplication {
onAssistantRequest { requestContext: RequestContext ->
squadId {
id = "51ba90bc-807c-4cf5-a8c4-1a88a5a5882h"
}
}
}
}
Models
Model Examples
fun Vapi4kConfig.anthropicExample() {
inboundCallApplication {
onAssistantRequest { requestContext: RequestContext ->
assistant {
anthropicModel {
modelType = AnthropicModelType.CLAUDE_3_HAIKU
emotionRecognitionEnabled = true
maxTokens = 250
temperature = 0.5
systemMessage = "You're a polite AI assistant named Vapi who is fun to talk with."
}
}
}
}
}
fun Vapi4kConfig.anyscaleExample() {
inboundCallApplication {
onAssistantRequest { requestContext: RequestContext ->
assistant {
anyscaleModel {
model = "Model_Description"
emotionRecognitionEnabled = true
maxTokens = 250
temperature = 0.5
systemMessage = "You're a polite AI assistant named Vapi who is fun to talk with."
}
}
}
}
}
fun Vapi4kConfig.customLLMExample() {
inboundCallApplication {
onAssistantRequest { requestContext: RequestContext ->
assistant {
customLLMModel {
model = "Model_Description"
url = "Model_URL"
emotionRecognitionEnabled = true
maxTokens = 250
temperature = 0.5
systemMessage = "You're a polite AI assistant named Vapi who is fun to talk with."
}
}
}
}
}
fun Vapi4kConfig.deepInfraExample() {
inboundCallApplication {
onAssistantRequest { requestContext: RequestContext ->
assistant {
deepInfraModel {
model = "Model_Description"
emotionRecognitionEnabled = true
maxTokens = 250
temperature = 0.5
systemMessage = "You're a polite AI assistant named Vapi who is fun to talk with."
}
}
}
}
}
fun Vapi4kConfig.groqExample() {
inboundCallApplication {
onAssistantRequest { requestContext: RequestContext ->
assistant {
groqModel {
modelType = GroqModelType.LLAMA3_70B
emotionRecognitionEnabled = true
maxTokens = 250
temperature = 0.5
systemMessage = "You're a polite AI assistant named Vapi who is fun to talk with."
}
}
}
}
}
fun Vapi4kConfig.openAIExample() {
inboundCallApplication {
onAssistantRequest { requestContext: RequestContext ->
assistant {
openAIModel {
modelType = OpenAIModelType.GPT_4_TURBO
semanticCachingEnabled = true
emotionRecognitionEnabled = true
maxTokens = 250
temperature = 0.5
fallbackModelTypes += OpenAIModelType.GPT_4O
systemMessage = "You're a polite AI assistant named Vapi who is fun to talk with."
}
}
}
}
}
fun Vapi4kConfig.openRouterExample() {
inboundCallApplication {
onAssistantRequest { requestContext: RequestContext ->
assistant {
openRouterModel {
model = "Model_Description"
emotionRecognitionEnabled = true
maxTokens = 250
temperature = 0.5
systemMessage = "You're a polite AI assistant named Vapi who is fun to talk with."
}
}
}
}
}
fun Vapi4kConfig.perplexityAIExample() {
inboundCallApplication {
onAssistantRequest { requestContext: RequestContext ->
assistant {
perplexityAIModel {
model = "Model_Description"
emotionRecognitionEnabled = true
maxTokens = 250
temperature = 0.5
systemMessage = "You're a polite AI assistant named Vapi who is fun to talk with."
}
}
}
}
}
fun Vapi4kConfig.togetherAIExample() {
inboundCallApplication {
onAssistantRequest { requestContext: RequestContext ->
assistant {
togetherAIModel {
model = "Model_Description"
emotionRecognitionEnabled = true
maxTokens = 250
temperature = 0.5
systemMessage = "You're a polite AI assistant named Vapi who is fun to talk with."
}
}
}
}
}
fun Vapi4kConfig.vapiExample() {
inboundCallApplication {
onAssistantRequest { requestContext: RequestContext ->
assistant {
vapiModel {
model = "Model_Description"
emotionRecognitionEnabled = true
maxTokens = 250
temperature = 0.5
systemMessage = "You're a polite AI assistant named Vapi who is fun to talk with."
}
}
}
}
}
Voices
Voice Examples
fun Vapi4kConfig.azureExample() {
inboundCallApplication {
onAssistantRequest { requestContext: RequestContext ->
assistant {
azureVoice {
voiceIdType = AzureVoiceIdType.BRIAN
}
}
}
}
}
fun Vapi4kConfig.cartesiaExample() {
inboundCallApplication {
onAssistantRequest { requestContext: RequestContext ->
assistant {
cartesiaVoice {
modelType = CartesiaVoiceModelType.SONIC_ENGLISH
languageType = CartesiaVoiceLanguageType.ENGLISH
}
}
}
}
}
fun Vapi4kConfig.deepgramExample() {
inboundCallApplication {
onAssistantRequest { requestContext: RequestContext ->
assistant {
deepgramVoice {
voiceIdType = DeepGramVoiceIdType.ASTERIA
}
}
}
}
}
fun Vapi4kConfig.elevenLabsExample() {
inboundCallApplication {
onAssistantRequest { requestContext: RequestContext ->
assistant {
elevenLabsVoice {
modelType = ElevenLabsVoiceModelType.ELEVEN_TURBO_V2_5
}
}
}
}
}
fun Vapi4kConfig.lmntExample() {
inboundCallApplication {
onAssistantRequest { requestContext: RequestContext ->
assistant {
lmntVoice {
voiceIdType = LMNTVoiceIdType.DANIEL
}
}
}
}
}
fun Vapi4kConfig.neetsExample() {
inboundCallApplication {
onAssistantRequest { requestContext: RequestContext ->
assistant {
neetsVoice {
voiceIdType = NeetsVoiceIdType.VITS
}
}
}
}
}
fun Vapi4kConfig.openAIExample() {
inboundCallApplication {
onAssistantRequest { requestContext: RequestContext ->
assistant {
openAIVoice {
voiceIdType = OpenAIVoiceIdType.ONYX
}
}
}
}
}
fun Vapi4kConfig.playHTExample() {
inboundCallApplication {
onAssistantRequest { requestContext: RequestContext ->
assistant {
playHTVoice {
voiceIdType = PlayHTVoiceIdType.DONNA
}
}
}
}
}
fun Vapi4kConfig.rimeAIExample() {
inboundCallApplication {
onAssistantRequest { requestContext: RequestContext ->
assistant {
rimeAIVoice {
modelType = RimeAIVoiceModelType.MIST
}
}
}
}
}
Transcribers
Transcriber Examples
fun Vapi4kConfig.deepgramExample() {
inboundCallApplication {
onAssistantRequest { requestContext: RequestContext ->
assistant {
deepgramTranscriber {
transcriberModel = DeepgramModelType.NOVA_MEDICAL
transcriberLanguage = DeepgramLanguageType.INDONESIAN
}
}
}
}
}
fun Vapi4kConfig.gladiaExample() {
inboundCallApplication {
onAssistantRequest { requestContext: RequestContext ->
assistant {
gladiaTranscriber {
transcriberModel = GladiaModelType.FAST
transcriberLanguage = GladiaLanguageType.ICELANDIC
}
}
}
}
}
fun Vapi4kConfig.talkscriberExample() {
inboundCallApplication {
onAssistantRequest { requestContext: RequestContext ->
assistant {
talkscriberTranscriber {
transcriberModel = TalkscriberModelType.WHISPER
transcriberLanguage = TalkscriberLanguageType.VIETNAMESE
}
}
}
}
}
Last modified: 04 October 2024