1
Fork 0
mirror of https://github.com/RGBCube/chatgpt.v synced 2025-07-27 08:47:44 +00:00

Add raw responses

This commit is contained in:
RGBCube 2022-12-06 23:15:11 +03:00
parent 0731fa804f
commit c646289f1c
3 changed files with 147 additions and 0 deletions

15
src/body.v Normal file
View file

@ -0,0 +1,15 @@
module chatgpt
// Body is the body for the POST request sent to the ChatGPT API.
struct Body {
model string = 'text-davinci-003'
prompt string
max_tokens int
stop string
n u8
temperature f32
top_p f32
frequency_penalty f32
presence_penalty f32
best_of int
}

60
src/client.v Normal file
View file

@ -0,0 +1,60 @@
module chatgpt
import net.http
import json
const openai_api_url = 'https://api.openai.com/v1/completions'
// make_header returns a http.Header struct with the appropriate information.
[inline]
fn make_header(token string) http.Header {
return http.new_header_from_map({
.authorization: 'Bearer ' + token
.content_type: 'application/json'
})
}
// new_client returns a new client for the ChatGPT API with the given token.
pub fn new_client(token string) Client {
return Client{make_header(token)}
}
// new_client_pointer returns a new client pointer for the ChatGPT API with the given token.
// This is useful for long-lived instances of Client.
pub fn new_client_pointer(token string) &Client {
return &Client{make_header(token)}
}
// Client is a client for the ChatGPT API.
[noinit]
pub struct Client {
header http.Header
}
pub fn (c Client) generate(prompt string, config GenerationConfig) !http.Response {
return c.generate_multiple(prompt, 1, config)!
}
pub fn (c Client) generate_multiple(prompt string, n u8, config GenerationConfig) !http.Response {
if n < 0 || n > 10 {
return error('n must be between 1 and 10')
}
config.verify()!
return http.fetch(
url: chatgpt.openai_api_url
method: .post
header: c.header
data: dump(json.encode(Body{
prompt: prompt
max_tokens: config.max_tokens
stop: config.stop or { '' }
n: n
temperature: config.temperature
top_p: config.top_p
frequency_penalty: config.frequency_penalty
presence_penalty: config.presence_penalty
}))
)!
}

72
src/config.v Normal file
View file

@ -0,0 +1,72 @@
module chatgpt
// GenerationConfig are the options you can use to customize
// the response that will be generated by ChatGPT.
[params]
pub struct GenerationConfig {
// This specifies the maximum number of tokens (common sequences of characters
// found in text) (basically words) ChatGPT should generate in its response.
max_tokens int = 256 // Min: 1, Max: 10240.
// This specifies a sequence of characters that, when encountered by the
// model, will cause it to stop generating text. By default, it is set to
// none, which means that ChatGPT will not stop generating text until it
// reaches the maximum number of tokens specified by max_tokens.
stop ?string
// This specifies the level of "creativity" or "randomness" to use when
// generating text. A higher temperature will produce more varied and
// creative completions, while a lower temperature will produce more
// predictable and repetitive completions.
temperature f32 // Min: 0, Max: 2.
// This parameter is used to specify the fraction of the mass of the
// distribution to keep when selecting the next token. For example,
// if you set top_p to 0.5, ChatGPT will only consider the tokens
// with the highest probabilities (up to 50% of the total probability
// mass) when generating text. This can be used to produce more predictable
// and consistent completions, as the model will only select tokens from the
// most likely options.
top_p f32 = 1 // Min: 0, Max: 1.
// This parameter is used to specify a penalty to apply to the log probability
// of each token, based on how often it has been generated previously in
// the sequence. For example, if you set frequency_penalty to 0.1, ChatGPT
// will penalize tokens that have been generated more frequently in the
// sequence, making them less likely to be selected. This can be used to produce
// more diverse and interesting completions, as the model will avoid repeating
// the same tokens over and over.
frequency_penalty f32 // Min: 0, Max: 1.
// This parameter is used to specify a penalty to apply to the log probability
// of each token, based on how often it appears in the training data. For
// example, if you set presence_penalty to 0.1, the model will penalize
// tokens that are less common in the training data, making them less likely
// to be selected. This can be used to produce more realistic and fluent
// completions, as the model will avoid generating rare or unusual tokens
// that do not appear often in real-world text.
presence_penalty f32 // Min: 0, Max: 1.
// This parameter is used to specify the number of completions to generate
// for each prompt, and then return the highest-scoring completion(s). For example,
// if you set best_of to 3, the model will generate 3 completions for each prompt,
// and then return the highest-scoring completion(s). This can be useful if you want
// to ensure that the model returns the best possible completion(s) for each prompt.
best_of int = 1 // Min: 1, Max: 100.
}
// verify verifies that the SingularGenerationConfig is valid.
fn (c GenerationConfig) verify() ! {
if c.max_tokens < 1 || c.max_tokens > 10240 {
return error('max_tokens must be between 1 and 10240')
}
if c.temperature < 0 || c.temperature > 2 {
return error('temperature must be between 0 and 2')
}
if c.top_p < 0 || c.top_p > 1 {
return error('top_p must be between 0 and 1')
}
if c.frequency_penalty < 0 || c.frequency_penalty > 1 {
return error('frequency_penalty must be between 0 and 1')
}
if c.presence_penalty < 0 || c.presence_penalty > 1 {
return error('presence_penalty must be between 0 and 1')
}
if c.best_of < 1 || c.best_of > 100 {
return error('best_of must be between 1 and 100')
}
}