@ -1,16 +1,81 @@
#!/bin/bash
CHAT_INIT_PROMPT = "You are ChatGPT, a Large Language Model trained by OpenAI. You will be answering questions from users. You answer as concisely as possible for each response (e.g. don’t be verbose). If you are generating a list, do not have too many items. Keep the number of items short. Before each user prompt you will be given the chat history in Q&A form. Output your answer directly, with no labels in front. Do not start your answers with A or Anwser. You were trained on data up until 2021"
CHAT_INIT_PROMPT = " You are ChatGPT, a Large Language Model trained by OpenAI. You will be answering questions from users. You answer as concisely as possible for each response (e.g. don’t be verbose). If you are generating a list, do not have too many items. Keep the number of items short. Before each user prompt you will be given the chat history in Q&A form. Output your answer directly, with no labels in front. Do not start your answers with A or Anwser. You were trained on data up until 2021. Today's date is $( date +%d/%m/%Y) "
# Error handling function
# $1 should be the response body
handleE rror( ) {
handle_e rror( ) {
if echo " $1 " | jq -e '.error' >/dev/null; then
echo -e " Your request to Open AI API failed: \033[0;31m $( echo $1 | jq -r '.error.type' ) \033[0m "
echo $1 | jq -r '.error.message'
exit 1
fi
}
# request to OpenAI API completetions endpoint function
# $1 should be the request prompt
request_to_completions( ) {
request_prompt = $1
response = $( curl https://api.openai.com/v1/completions \
-sS \
-H 'Content-Type: application/json' \
-H " Authorization: Bearer $OPENAI_KEY " \
-d ' {
"model" : "'" $MODEL "'" ,
"prompt" : "'" ${ request_prompt } "'" ,
"max_tokens" : '$MAX_TOKENS' ,
"temperature" : '$TEMPERATURE'
} ' )
}
# request to OpenAI API image generations endpoint function
# $1 should be the prompt
request_to_image( ) {
prompt = $1
image_response = $( curl https://api.openai.com/v1/images/generations \
-sS \
-H 'Content-Type: application/json' \
-H " Authorization: Bearer $OPENAI_KEY " \
-d ' {
"prompt" : "'" ${ prompt #*image : } "'" ,
"n" : 1,
"size" : "'" $SIZE "'"
} ' )
}
# build chat context before each request
# $1 should be the chat context
# $2 should be the escaped prompt
build_chat_context( ) {
chat_context = $1
escaped_prompt = $2
if [ -z " $chat_context " ] ; then
chat_context = " $CHAT_INIT_PROMPT \nQ: $escaped_prompt "
else
chat_context = " $chat_context \nQ: $escaped_prompt "
fi
request_prompt = " ${ chat_context // $'\n' / \\ n } "
}
# maintain chat context function, builds cc from response,
# keeps chat context length under max token limit
# $1 should be the chat context
# $2 should be the response data (only the text)
maintain_chat_context( ) {
chat_context = $1
response_data = $2
# add response to chat context as answer
chat_context = " $chat_context ${ chat_context : + \n } \nA: ${ response_data // $'\n' / \\ n } "
# check prompt length, 1 word =~ 1.3 tokens
# reserving 100 tokens for next user prompt
while ( ( $( echo " $chat_context " | wc -c) * 1, 3 > ( MAX_TOKENS - 100) ) ) ; do
# remove first/oldest QnA from prompt
chat_context = $( echo " $chat_context " | sed -n '/Q:/,$p' | tail -n +2)
# add init prompt so it is always on top
chat_context = " $CHAT_INIT_PROMPT $chat_context "
done
}
# parse command line arguments
while [ [ " $# " -gt 0 ] ] ; do
case $1 in
@ -69,16 +134,8 @@ while $running; do
if [ " $prompt " = = "exit" ] ; then
running = false
elif [ [ " $prompt " = ~ ^image: ] ] ; then
image_response = $( curl https://api.openai.com/v1/images/generations \
-sS \
-H 'Content-Type: application/json' \
-H " Authorization: Bearer $OPENAI_KEY " \
-d ' {
"prompt" : "'" ${ prompt #*image : } "'" ,
"n" : 1,
"size" : "'" $SIZE "'"
} ' )
handleError " $image_response "
request_to_image " $prompt "
handle_error " $image_response "
image_url = $( echo $image_response | jq -r '.data[0].url' )
echo -e " \n\033[36mchatgpt \033[0mYour image was created. \n\nLink: ${ image_url } \n "
@ -99,16 +156,16 @@ while $running; do
models_response = $( curl https://api.openai.com/v1/models \
-sS \
-H " Authorization: Bearer $OPENAI_KEY " )
handleE rror " $models_response "
handle_e rror " $models_response "
models_data = $( echo $models_response | jq -r -C '.data[] | {id, owned_by, created}' )
echo -e " \n\033[36mchatgpt \033[0m This is a list of models currently available at OpenAI API:\n ${ models_data } "
elif [ [ " $prompt " = ~ ^model: ] ] ; then
models_response = $( curl https://api.openai.com/v1/models \
-sS \
-H " Authorization: Bearer $OPENAI_KEY " )
handleE rror " $models_response "
handle_e rror " $models_response "
model_data = $( echo $models_response | jq -r -C '.data[] | select(.id=="' " ${ prompt #*model : } " '")' )
echo -e " \n\033[36mchatgpt \033[0m Complete data for model: ${ prompt #*model : } \n ${ model_data } "
echo -e " \n\033[36mchatgpt \033[0m Complete details for model: ${ prompt #*model : } \n ${ model_data } "
else
# escape quotation marks
escaped_prompt = $( echo " $prompt " | sed 's/"/\\"/g' )
@ -116,44 +173,19 @@ while $running; do
request_prompt = ${ escaped_prompt // $'\n' / ' ' }
if [ " $CONTEXT " = true ] ; then
# build chat context
if [ -z " $chat_context " ] ; then
chat_context = " $CHAT_INIT_PROMPT \nQ: $escaped_prompt "
else
chat_context = " $chat_context \nQ: $escaped_prompt "
fi
request_prompt = " ${ chat_context // $'\n' / \\ n } "
build_chat_context " $chat_context " " $escaped_prompt "
fi
# request to OpenAI API
response = $( curl https://api.openai.com/v1/completions \
-sS \
-H 'Content-Type: application/json' \
-H " Authorization: Bearer $OPENAI_KEY " \
-d ' {
"model" : "'" $MODEL "'" ,
"prompt" : "'" ${ request_prompt } "'" ,
"max_tokens" : '$MAX_TOKENS' ,
"temperature" : '$TEMPERATURE'
} ' )
handleError " $response "
request_to_completions " $request_prompt "
handle_error " $response "
response_data = $( echo $response | jq -r '.choices[].text' | sed '1,2d; s/^A://g' )
echo -e " \n\033[36mchatgpt \033[0m ${ response_data } "
if [ " $CONTEXT " = true ] ; then
# add response to chat context as answer
chat_context = " $chat_context ${ chat_context : + \n } \nA: ${ response_data // $'\n' / \\ n } "
# check prompt length, 1 word =~ 1.3 tokens
# reserving 100 tokens for next user prompt
while ( ( $( echo " $chat_context " | wc -c) *1,3 > ( MAX_TOKENS-100) ) ) ; do
# remove first/oldest QnA from prompt
chat_context = $( echo " $chat_context " | sed -n '/Q:/,$p' | tail -n +2)
# add init prompt so it is always on top
chat_context = " $CHAT_INIT_PROMPT $chat_context "
done
maintain_chat_context " $chat_context " " $response_data "
fi
timestamp = $( date +"%d/%m/%Y %H:%M" )
echo -e " $timestamp $prompt \n $response_data \n " >>~/.chatgpt_history
fi
done
done