GroqCloud Api Reference
GroqCloud Api Reference
Chat
Chat
Request Body
frequency_penalty number or null Optional Defaults to 0
Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in
the text so far, decreasing the model's likelihood to repeat the same line verbatim.
Returns
Returns a chat completion object, or a streamed sequence of chat completion chunk objects if the request
is streamed.
1 // Default
2 import Groq from "groq-sdk";
3
4 const groq = new Groq({ apiKey: process.env.GROQ_API_KEY });
5
6 async function main() {
7 const completion = await groq.chat.completions
8 .create({
9 messages: [
10 {
11 role: "user",
12 content: "Explain the importance of fast language models",
13 },
14 ],
15 model: "mixtral-8x7b-32768",
16 })
17 .then((chatCompletion) => {
18 console.log(chatCompletion.choices[0]?.message?.content || "");
19 });
20 }
21
{
22 main();
"id": "34a9110d-c39d-423b-9ab9-9c748747b204",
"object": "chat.completion",
"created": 1708045122,
"model": "mixtral-8x7b-32768",
"system_fingerprint": null,
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": "Low latency Large Language Models (LLMs) are important in the field of artificial intelligence and natural la
},
"finish_reason": "stop",
"logprobs": null
}
],
"usage": {
"prompt_tokens": 24,
"completion_tokens": 377,
"total_tokens": 401,
"prompt_time": 0.009,
"completion_time": 0.774,
"total_time": 0.783
}
}
Audio
Create transcription
POST https://api.groq.com/openai/v1/audio/transcriptions
Request Body
file string Required
The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, mpga,
m4a, ogg, wav, or webm.
language string Optional
The language of the input audio. Supplying the input language in ISO-639-1 format will improve accuracy
and latency.
Returns
Returns an audio transcription object
{
"text": "Your transcribed text appears here...",
"x_groq": {
"id": "req_unique_id"
}
}
Create translation
POST https://api.groq.com/openai/v1/audio/translations
Translates audio into English.
Request Body
file string Required
The audio file object (not file name) translate, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a,
ogg, wav, or webm.
Returns
Returns an audio translation object
1 // Default
2 import fs from "fs";
3 import Groq from "groq-sdk";
4
5 const groq = new Groq();
6 async function main() {
7 const translation = await groq.audio.translations.create({
8 file: fs.createReadStream("sample_audio.m4a"),
9 model: "whisper-large-v3",
10 prompt: "Specify context or spelling", // Optional
11 response_format: "json", // Optional
12 temperature: 0.0, // Optional
13 });
14 console.log(translation.text);
15 }
16 main();
{
"text": "Your translated text appears here...",
"x_groq": {
"id": "req_unique_id"
}
}
Models
List models
GET https://api.groq.com/openai/v1/models
List models
Returns
A list of models
{
"object": "list",
"data": [
{
"id": "gemma-7b-it",
"object": "model",
"created": 1693721698,
"owned_by": "Google",
"active": true,
"context_window": 8192
},
{
"id": "llama2-70b-4096",
"object": "model",
"created": 1693721698,
"owned_by": "Meta",
"active": true,
"context_window": 4096
},
{
"id": "mixtral-8x7b-32768",
"object": "model",
"created": 1693721698,
"owned_by": "Mistral AI",
"active": true,
"context_window": 32768
}
]
}
Retrieve model
GET https://api.groq.com/openai/v1/models/{model}
Get model
Returns
A model object
{
"id": "llama2-70b-4096",
"object": "model",
"created": 1693721698,
"owned_by": "Meta",
"active": true,
"context_window": 4096
}