Skip to content

Openai call

openai_call(messages, llm_model_name='gpt-3.5-turbo')

Call the OpenAI API to get the response Args: messages (List[dict]): The messages to send to the OpenAI API llm_model_name (str): The name of the LLM model

Returns:

Name Type Description
response_json_str str

The response from the OpenAI API

cost float

The cost of the response

Source code in Docs2KG/modules/llm/openai_call.py
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
def openai_call(
    messages: List[dict], llm_model_name: str = "gpt-3.5-turbo"
) -> Tuple[str, float]:
    """
    Call the OpenAI API to get the response
    Args:
        messages (List[dict]): The messages to send to the OpenAI API
        llm_model_name (str): The name of the LLM model


    Returns:
        response_json_str (str): The response from the OpenAI API
        cost (float): The cost of the response
    """
    result_json_str = ""
    cost = 0
    while True:
        response = client.chat.completions.create(
            model=llm_model_name,
            response_format={"type": "json_object"},
            messages=messages,
            temperature=0.0,
        )
        logger.debug(response)
        content = response.choices[0].message.content
        logger.debug(content)
        result_json_str += content
        cost += track_usage(response)
        # if finish_reason is length, then it is not complete
        logger.debug(response.choices[0].finish_reason)
        if response.choices[0].finish_reason != "length":
            break
        else:
            messages.append(
                {
                    "role": "assistant",
                    "content": content,
                }
            )
            messages.append(
                {
                    "role": "user",
                    "content": "Continue the response",
                }
            )

    return result_json_str, cost