interface ChatCompletionCreateParamsNonStreaming {
    messages: {
        content: string;
        role: "user" | "assistant";
        [k: string]: unknown;
    }[];
    project_id: number;
    frequency_penalty?: number;
    logit_bias?: {
        [k: string]: unknown;
    };
    max_tokens?: number;
    model?: string;
    presence_penalty?: number;
    repositories?: {
        ids?: number[];
        limit?: number;
        similarity_threshold?: number;
        [k: string]: unknown;
    };
    response_format?: {
        [k: string]: unknown;
    };
    seed?: number;
    session_id?: string;
    stop?: string;
    stream?: false;
    system_prompt?: string;
    temperature?: number;
    tools?: {
        [k: string]: unknown;
    }[];
    top_p?: number;
    user?: string;
}

Hierarchy

  • CreateChatCompletionRequest
    • ChatCompletionCreateParamsNonStreaming

Properties

messages: {
    content: string;
    role: "user" | "assistant";
    [k: string]: unknown;
}[]

A list of messages comprising the conversation so far.

Type declaration

  • [k: string]: unknown
  • content: string

    The content of the message.

  • role: "user" | "assistant"

    The role of the sender (e.g., 'user' or 'assistant').

    • user - user
    • assistant - assistant
project_id: number

The ID of the project to use.

frequency_penalty?: number

Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency.

logit_bias?: {
    [k: string]: unknown;
}

JSON object that maps tokens to an associated bias value from -100 to 100.

max_tokens?: number

The maximum number of tokens to generate in the chat completion.

model?: string

ID of the model to use. See the model endpoint compatibility table for details.

presence_penalty?: number

Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far.

repositories?: {
    ids?: number[];
    limit?: number;
    similarity_threshold?: number;
    [k: string]: unknown;
}

Options for Retrieval Augmented Generation (RAG). Will override launched model settings

Type declaration

  • [k: string]: unknown
  • Optionalids?: number[]

    The IDs of the repositories to use.

  • Optionallimit?: number
  • Optionalsimilarity_threshold?: number
response_format?: {
    [k: string]: unknown;
}

An object specifying the format that the model must output.

seed?: number

This feature is in Beta. If specified, our system will make a best effort to sample deterministically.

session_id?: string

The ID of the session to use. It helps to track the chat history.

stop?: string

Up to 4 sequences where the API will stop generating further tokens.

stream?: false

If set, partial message deltas will be sent, like in ChatGPT.

system_prompt?: string

The system prompt to use.

temperature?: number

What sampling temperature to use, between 0 and 2.

tools?: {
    [k: string]: unknown;
}[]

A list of tools the model may call. Currently, only functions are supported as a tool.

top_p?: number

An alternative to sampling with temperature, called nucleus sampling.

user?: string

A unique identifier representing your end-user.