> ## Documentation Index
> Fetch the complete documentation index at: https://www.bolna.ai/docs/llms.txt
> Use this file to discover all available pages before exploring further.

# Retrieve Voice AI Agent Details API

> Retrieve detailed Voice AI agent information, including configuration, status, and tasks, using Bolna APIs.



## OpenAPI

````yaml GET /v2/agent/{agent_id}
openapi: 3.1.0
info:
  title: Bolna API
  description: >-
    Use and leverage Bolna Voice AI using APIs through HTTP requests from any
    language in your applications and workflows.
  license:
    name: MIT
  version: 1.0.0
servers:
  - url: https://api.bolna.ai
    description: Production server
security:
  - bearerAuth: []
paths:
  /v2/agent/{agent_id}:
    get:
      description: Retrieve an agent
      parameters:
        - in: path
          name: agent_id
          required: true
          schema:
            type: string
            format: uuid
      responses:
        '200':
          description: Agent response
          content:
            application/json:
              schema:
                $ref: '#/components/schemas/AgentV2'
        '400':
          description: unexpected error
          content:
            application/json:
              schema:
                $ref: '#/components/schemas/Error'
components:
  schemas:
    AgentV2:
      properties:
        id:
          type: string
          format: uuid
          description: Unique identifier for the agent
        agent_name:
          description: Human-readable agent name
          type: string
          example: Alfred
        agent_type:
          description: Type of agent
          type: string
          example: other
        agent_status:
          description: Current status of the agent
          type: string
          enum:
            - seeding
            - processed
          example: processed
        created_at:
          type: string
          format: date-time
          example: '2024-01-23T01:14:37Z'
          description: Timestamp of agent creation
        updated_at:
          type: string
          format: date-time
          example: '2024-01-29T18:31:22Z'
          description: Timestamp of last update for the agent
        tasks:
          items:
            $ref: '#/components/schemas/TasksConfigV2'
          type: array
          description: An array of tasks that the agent can perform
        ingest_source_config:
          $ref: '#/components/schemas/IngestSourceConfig'
        agent_prompts:
          $ref: '#/components/schemas/AgentPrompt'
          description: >-
            Prompts to be provided to the agent. It can have multiple tasks of
            the form `task_<task_id>`
      type: object
    Error:
      required:
        - error
        - message
      type: object
      properties:
        error:
          type: integer
          format: int32
        message:
          type: string
    TasksConfigV2:
      properties:
        task_type:
          type: string
          description: Type of task
          enum:
            - conversation
            - extraction
            - summarization
        tools_config:
          $ref: '#/components/schemas/ToolsConfigV2'
          type: object
          description: Configuration of multiple tools that form a task
        toolchain:
          $ref: '#/components/schemas/Toolchain'
          type: object
          description: Agent will execute these tools in the specified order
        task_config:
          $ref: '#/components/schemas/ConversationConfig'
          type: object
          description: >-
            Should be used onkly in conversation task for now and it consists of
            all the required configuration for conversational nuances
      type: object
    IngestSourceConfig:
      type: object
      description: >-
        Configuration for ingestion source used for inbound agents. Required
        fields vary by `source_type`.
      properties:
        source_type:
          type: string
          enum:
            - api
            - csv
            - google_sheet
          description: Type of CRM ingestion source
          example: api
        source_url:
          type: string
          format: uri
          nullable: true
          description: API or Google Sheet URL
          example: https://example.com/api/data
        source_auth_token:
          type: string
          nullable: true
          description: Bearer token for API authentication
          example: abc123
        source_name:
          type: string
          nullable: true
          description: File or sheet name
          example: leads_sheet_june.csv
      allOf:
        - if:
            properties:
              source_type:
                const: api
          then:
            required:
              - source_url
              - source_auth_token
        - if:
            properties:
              source_type:
                const: csv
          then:
            required:
              - source_name
        - if:
            properties:
              source_type:
                const: google_sheet
          then:
            required:
              - source_name
              - source_url
    AgentPrompt:
      properties:
        task_1:
          type: object
          properties:
            system_prompt:
              description: The system prompt fed into the agent
              type: string
              example: >-
                What is the Ultimate Question of Life, the Universe, and
                Everything?
          required:
            - system_prompt
      type: object
    ToolsConfigV2:
      properties:
        llm_agent:
          $ref: '#/components/schemas/LlmAgentV2'
          type: object
          description: Configuration of LLM model for the agent task
        synthesizer:
          $ref: '#/components/schemas/Synthesizer'
          type: object
          description: Configuration of Synthesizer model for the agent task
        transcriber:
          $ref: '#/components/schemas/Transcriber'
          type: object
          description: Configuration of Transcriber model for the agent task
        input:
          $ref: '#/components/schemas/InputOutput'
          type: object
          description: Configuration of Input handler
        output:
          $ref: '#/components/schemas/InputOutput'
          type: object
          description: Configuration of Output handler
        api_tools:
          $ref: '#/components/schemas/ApiTools'
          type: object
          description: Api tools you'd like the agents to have access to
          default: null
          nullable: true
      type: object
      required:
        - llm_agent
        - synthesizer
        - transcriber
        - input
        - output
    Toolchain:
      properties:
        execution:
          type: string
          enum:
            - parallel
            - sequential
        pipelines:
          type: array
          items:
            type: string
            enum:
              - transcriber
              - llm
              - synthesizer
          example:
            - - transcriber
              - llm
              - synthesizer
      type: object
      required:
        - execution
        - pipelines
    ConversationConfig:
      properties:
        hangup_after_silence:
          anyOf:
            - type: integer
          title: Hangup After Silence
          description: >-
            Time to wait in seconds before hanging up in case user doesn't speak
            a thing
          default: 10
          example: 10
        incremental_delay:
          anyOf:
            - type: integer
          title: Incremental Delay
          description: >-
            Since we work with interim results, this will dictate the linear
            delay to add before speaking everytime we get a partial transcript
            from ASR
          default: 400
          example: 400
        number_of_words_for_interruption:
          anyOf:
            - type: integer
          title: Number Of Words For Interruption
          description: >-
            To avoid accidental interruption, how many words should we wait for
            before interrupting
          default: 2
          example: 2
        hangup_after_LLMCall:
          type: boolean
          title: Hangup After Llmcall
          description: >-
            Weather to use LLM prompt to hang up or not. Pretty soon this will
            be replaced by predefined function
          default: false
          example: false
        call_cancellation_prompt:
          anyOf:
            - type: string
          title: Call Cancellation Prompt
          example: null
        backchanneling:
          anyOf:
            - type: boolean
          title: Backchanneling
          description: >-
            This will enable agent to acknowledge when user is speaking long
            sentences
          default: false
          example: false
        backchanneling_message_gap:
          anyOf:
            - type: integer
          title: Backchanneling Message Gap
          description: >-
            Gap between every successive acknowledgement. We will also add a
            random jitter to this value to make it more random
          default: 5
          example: 5
        backchanneling_start_delay:
          anyOf:
            - type: integer
          title: Backchanneling Start Delay
          description: Basic delay after which we should start with backchanneling
          default: 5
          example: 5
        ambient_noise_track:
          type: string
          title: Ambient Noise Track
          description: >-
            ID of the ambient noise track to play during calls. Use a preset
            track ID (e.g. `coffee-shop`, `office-ambience`, `call-center`) or
            the ID of a custom track uploaded via `POST /ambient-sounds/custom`.
            Set to `null` to disable ambient noise. Only supported with Plivo
            and Vobiz telephony providers.
          default: null
          example: coffee-shop
        call_terminate:
          anyOf:
            - type: integer
          title: Terminate a call after specified number of seconds
          description: The call automatically disconnects reaching this limit
          default: 90
          example: 90
        voicemail:
          type: boolean
          default: false
          description: >-
            Enable voicemail detection. Agent will automatically disconnect the
            call if voicemail is detected
        inbound_limit:
          type: integer
          default: -1
          description: >-
            Set the number of times each phone number is allowed to call. Put
            `-1` to allow unlimited calls.
        whitelist_phone_numbers:
          type: array
          example: null
          items:
            type: string
            format: uuid
          description: >-
            Add phone numbers here that should never be restricted by the call
            limits (ideal for internal or testing numbers). Phone number should
            have acountry code (in [E.164](https://en.wikipedia.org/wiki/E.164)
            format)
        disallow_unknown_numbers:
          type: boolean
          default: false
          description: >-
            Only allow incoming calls from the numbers you've sourced using
            IngestSourceConfig.
      type: object
      title: ConversationConfig
    LlmAgentV2:
      properties:
        agent_type:
          type: string
          enum:
            - simple_llm_agent
            - knowledgebase_agent
          default: simple_llm_agent
        agent_flow_type:
          type: string
          enum:
            - streaming
          default: streaming
        routes:
          $ref: '#/components/schemas/Routes'
          type: object
          description: Semantic routing layer
        llm_config:
          oneOf:
            - $ref: '#/components/schemas/SimpleLlmAgent'
            - $ref: '#/components/schemas/KnowledgebaseAgent'
          description: LLM configuration
      type: object
    Synthesizer:
      properties:
        provider:
          type: string
          enum:
            - polly
            - elevenlabs
            - deepgram
            - styletts
          example: elevenlabs
        provider_config:
          oneOf:
            - $ref: '#/components/schemas/ElevenLabsConfig'
            - $ref: '#/components/schemas/PollyConfig'
            - $ref: '#/components/schemas/DeepgramConfig'
        stream:
          type: boolean
          default: true
        buffer_size:
          type: integer
          default: 250
          example: 250
        audio_format:
          type: string
          default: wav
          enum:
            - wav
      type: object
      required:
        - provider
        - provider_config
    Transcriber:
      oneOf:
        - $ref: '#/components/schemas/DeepgramTranscriberConfig'
        - $ref: '#/components/schemas/BodhiTranscriberConfig'
      type: object
    InputOutput:
      properties:
        provider:
          type: string
          default: plivo
          enum:
            - twilio
            - plivo
            - exotel
        format:
          type: string
          default: wav
          enum:
            - wav
      type: object
      required:
        - provider
        - format
    ApiTools:
      type: object
      properties:
        tools:
          type: array
          items:
            oneOf:
              - $ref: '#/components/schemas/TransferCallTools'
          description: >-
            Description of all the tools you'd like to add to the agent. It
            needs to be a JSON string as this will be passed to LLM.
        tools_params:
          $ref: '#/components/schemas/TransferCallToolParams'
          type: object
          description: >-
            Parameters for each tool, where keys must match the `name` field in
            the `tools` array.
      default: null
    Routes:
      properties:
        embedding_model:
          type: string
          title: Embedding Model
          default: snowflake/snowflake-arctic-embed-m
          example: snowflake/snowflake-arctic-embed-m
          description: >-
            Since we use fastembed all models supported by fastembed are
            supported by us
        routes:
          items:
            $ref: '#/components/schemas/Route'
          type: array
          title: route
          description: >-
            These are predefined routes that can be used to answer FAQs, or set
            basic guardrails, or do a static function call.
      type: object
      title: Routes
    SimpleLlmAgent:
      title: SimpleLlmAgent
      properties:
        agent_flow_type:
          type: string
          enum:
            - streaming
          default: streaming
        provider:
          type: string
          default: openai
          example: openai
        family:
          type: string
          default: openai
          example: openai
        model:
          type: string
          default: gpt-4.1-mini
          example: gpt-4.1-mini
        summarization_details:
          type: string
          default: null
          example: null
          nullable: true
        extraction_details:
          type: string
          default: null
          example: null
          nullable: true
        max_tokens:
          type: integer
          default: 100
          example: 150
        presence_penalty:
          type: number
          format: float
          default: 0
          example: 0
        frequency_penalty:
          type: number
          format: float
          default: 0
          example: 0
        base_url:
          type: string
          default: https://api.openai.com/v1
          example: https://api.openai.com/v1
        top_p:
          type: number
          format: float
          default: 0.9
          example: 0.9
        min_p:
          type: number
          format: float
          default: 0.1
          example: 0.1
        top_k:
          type: number
          format: integer
          default: 0
          example: 0
        temperature:
          type: number
          format: float
          default: 0.1
          example: 0.1
        request_json:
          type: boolean
          default: false
      type: object
    KnowledgebaseAgent:
      title: KnowledgebaseAgent
      allOf:
        - type: object
          properties:
            vector_store:
              $ref: '#/components/schemas/VectorStore'
              type: object
              description: >-
                Vector Store for knowledgebase. Use [Knowledgebase
                APIs](/knowledgebase/create) to upload PDFs or URLs. You can
                select multiple knowledgebases using `vector_ids`.
        - $ref: '#/components/schemas/SimpleLlmAgent'
    ElevenLabsConfig:
      title: ElevenLabs
      properties:
        voice:
          type: string
          description: Name of voice
          enum:
            - Nila
        voice_id:
          type: string
          description: Unique voice id
          enum:
            - V9LCAAi4tTlqe9JadbCo
        model:
          type: string
          description: Model to be used
          enum:
            - eleven_turbo_v2_5
            - eleven_flash_v2_5
          example: eleven_turbo_v2_5
      required:
        - voice
        - voice_id
        - model
    PollyConfig:
      title: Polly
      properties:
        voice:
          type: string
          description: Name of voice
          enum:
            - Matthew
        engine:
          type: string
          description: Engine of voice
          enum:
            - generative
        sampling_rate:
          type: string
          description: Sampling rate of voice
          default: '8000'
          enum:
            - '8000'
            - '16000'
        language:
          type: string
          description: Language of voice
          enum:
            - en-US
      required:
        - voice
        - engine
        - language
    DeepgramConfig:
      title: Deepgram
      properties:
        voice:
          type: string
          description: Name of voice
          enum:
            - Asteria
        model:
          type: string
          description: Model of voice
          example: aura-asteria-en
        sampling_rate:
          type: string
          description: Sampling rate of voice
          default: '24000'
      required:
        - voice
        - model
    DeepgramTranscriberConfig:
      title: Deepgram
      properties:
        provider:
          type: string
          description: Identification provider for Deepgram
          enum:
            - deepgram
        model:
          enum:
            - nova-3
            - nova-2
            - nova-2-meeting
            - nova-2-phonecall
            - nova-2-finance
            - nova-2-conversationalai
            - nova-2-medical
            - nova-2-drivethru
            - nova-2-automotive
          example: nova-3
        language:
          enum:
            - en
            - hi
            - es
            - fr
          example: hi
        stream:
          type: boolean
          default: true
        sampling_rate:
          type: integer
          default: 16000
          example: 16000
        encoding:
          type: string
          default: linear16
          enum:
            - linear16
        endpointing:
          type: integer
          default: 250
          example: 250
      required:
        - provider
        - model
        - language
    BodhiTranscriberConfig:
      title: Bodhi
      properties:
        provider:
          type: string
          description: Identification provider for Bodhi
          enum:
            - bodhi
        model:
          enum:
            - hi-general-v2-8khz
            - kn-general-v2-8khz
            - mr-general-v2-8khz
            - ta-general-v2-8khz
            - bn-general-v2-8khz
          example: hi-general-v2-8khz
        stream:
          type: boolean
          default: true
        sampling_rate:
          type: integer
          default: 16000
          example: 16000
        encoding:
          type: string
          default: linear16
          enum:
            - linear16
        endpointing:
          type: integer
          default: 100
          example: 100
        language:
          enum:
            - hi
            - kn
            - mr
            - ta
            - bn
          example: hi
      required:
        - provider
        - model
    TransferCallTools:
      title: transfer_call
      properties:
        name:
          type: string
          description: Any unique name for this function tool
          example: transfer_call_support
        key:
          type: string
          enum:
            - transfer_call
          default: transfer_call
        description:
          type: string
          description: Use this tool to transfer the call
          example: Use this tool to transfer the call
        parameters:
          type: object
          properties:
            type:
              type: string
              example: object
            properties:
              type: object
              properties:
                call_sid:
                  type: object
                  properties:
                    type:
                      type: string
                      example: string
                    description:
                      type: string
                      description: unique call id
                      example: unique call id
            required:
              type: array
              items:
                type: string
                enum:
                  - call_sid
              example:
                - - call_sid
    TransferCallToolParams:
      type: object
      properties:
        transfer_call_support:
          type: object
          properties:
            method:
              type: string
              enum:
                - POST
                - GET
              default: GET
              description: Type of request
              example: POST
            url:
              type: string
              format: uri
              description: Link of the URL to control the transferring of call
              example: null
              nullable: true
            api_token:
              type: string
              example: null
              nullable: true
              description: API Token in case the URL needs authentication
            param:
              description: Stringified JSON of the tool schema
              type: string
              example: >-
                {"call_transfer_number": "+19876543210","call_sid":
                "%(call_sid)s"}
    Route:
      properties:
        route_name:
          type: string
          title: Route Name
          example: politics
        utterances:
          items:
            type: string
          type: array
          title: Utterances
          example:
            - Who do you think will win the elections?
            - Whom would you vote for?
          description: >-
            This is an array of utterances which when spoken you want to send a
            static response
        response:
          anyOf:
            - items:
                type: string
              type: array
            - type: string
          example: Hey, thanks but I do not have opinions on politics
          title: Response
          description: >-
            It can be a stand alone string or array of responses. If it's an
            array the length should be same as number of utterances and a
            particular index will be matched before returning
        score_threshold:
          description: Similarity score threshold
          type: number
          title: Score Threshold
          default: 0.85
          example: 0.9
      type: object
      required:
        - route_name
        - utterances
        - response
      title: Route
    VectorStore:
      properties:
        provider:
          type: string
          enum:
            - lancedb
          default: lancedb
          description: Provider vector store database
        provider_config:
          $ref: '#/components/schemas/LanceDbConfig'
          type: object
          description: Configuration of the vector store database
      type: object
      title: VectorStore
    LanceDbConfig:
      properties:
        vector_id:
          type: string
          format: uuid
          description: >-
            Vector id of a single knowledgebase (legacy, use `vector_ids` for
            multiple)
        vector_ids:
          type: array
          items:
            type: string
            format: uuid
          description: Array of vector ids to use multiple knowledgebases simultaneously
          example:
            - 3c90c3cc-0d44-4b50-8822-8dd25736052a
            - 4d91c4dd-1e55-5c61-9933-9ee36847163b
      type: object
      title: LanceDbConfig
  securitySchemes:
    bearerAuth:
      type: http
      scheme: bearer

````