mirror of
https://github.com/0xacx/chatGPT-shell-cli.git
synced 2024-11-24 11:25:31 +00:00
363 lines
11 KiB
Bash
Executable File
363 lines
11 KiB
Bash
Executable File
#!/bin/bash
|
||
GLOBIGNORE="*"
|
||
|
||
CHAT_INIT_PROMPT="You are ChatGPT, a Large Language Model trained by OpenAI. You will be answering questions from users. You answer as concisely as possible for each response (e.g. don’t be verbose). If you are generating a list, do not have too many items. Keep the number of items short. Before each user prompt you will be given the chat history in Q&A form. Output your answer directly, with no labels in front. Do not start your answers with A or Anwser. You were trained on data up until 2021. Today's date is $(date +%d/%m/%Y)"
|
||
|
||
SYSTEM_PROMPT="You are ChatGPT, a large language model trained by OpenAI. Answer as concisely as possible. Current date: $(date +%d/%m/%Y). Knowledge cutoff: 9/1/2021."
|
||
|
||
COMMAND_GENERATION_PROMPT="Return a one-line bash command with the functionality I will describe. Return ONLY the command ready to run in the terminal. The command should do the following:"
|
||
|
||
CHATGPT_CYAN_LABEL="\n\033[36mchatgpt \033[0m"
|
||
|
||
PROCESSING_LABEL="\033[90mprocessing... \033[0m"
|
||
|
||
# error handling function
|
||
# $1 should be the response body
|
||
handle_error() {
|
||
if echo "$1" | jq -e '.error' >/dev/null; then
|
||
echo -e "Your request to Open AI API failed: \033[0;31m$(echo $1 | jq -r '.error.type')\033[0m"
|
||
echo $1 | jq -r '.error.message'
|
||
exit 1
|
||
fi
|
||
}
|
||
|
||
# request to OpenAI API completions endpoint function
|
||
# $1 should be the request prompt
|
||
request_to_completions() {
|
||
request_prompt="$1"
|
||
|
||
response=$(curl https://api.openai.com/v1/completions \
|
||
-sS \
|
||
-H 'Content-Type: application/json' \
|
||
-H "Authorization: Bearer $OPENAI_KEY" \
|
||
-d '{
|
||
"model": "'"$MODEL"'",
|
||
"prompt": "'"${request_prompt}"'",
|
||
"max_tokens": '$MAX_TOKENS',
|
||
"temperature": '$TEMPERATURE'
|
||
}')
|
||
}
|
||
|
||
# request to OpenAI API image generations endpoint function
|
||
# $1 should be the prompt
|
||
request_to_image() {
|
||
prompt="$1"
|
||
image_response=$(curl https://api.openai.com/v1/images/generations \
|
||
-sS \
|
||
-H 'Content-Type: application/json' \
|
||
-H "Authorization: Bearer $OPENAI_KEY" \
|
||
-d '{
|
||
"prompt": "'"${prompt#*image:}"'",
|
||
"n": 1,
|
||
"size": "'"$SIZE"'"
|
||
}')
|
||
}
|
||
|
||
# request to OpenAPI API chat completion endpoint function
|
||
# $1 should be the message(s) formatted with role and content
|
||
request_to_chat() {
|
||
message="$1"
|
||
response=$(curl https://api.openai.com/v1/chat/completions \
|
||
-sS \
|
||
-H 'Content-Type: application/json' \
|
||
-H "Authorization: Bearer $OPENAI_KEY" \
|
||
-d '{
|
||
"model": "'"$MODEL"'",
|
||
"messages": [
|
||
{"role": "system", "content": "'"$SYSTEM_PROMPT"'"},
|
||
'"$message"'
|
||
],
|
||
"max_tokens": '$MAX_TOKENS',
|
||
"temperature": '$TEMPERATURE'
|
||
}')
|
||
}
|
||
|
||
# build chat context before each request for /completions (all models except
|
||
# gpt turbo and gpt 4)
|
||
# $1 should be the chat context
|
||
# $2 should be the escaped prompt
|
||
build_chat_context() {
|
||
chat_context="$1"
|
||
escaped_prompt="$2"
|
||
if [ -z "$chat_context" ]; then
|
||
chat_context="$CHAT_INIT_PROMPT\nQ: $escaped_prompt"
|
||
else
|
||
chat_context="$chat_context\nQ: $escaped_prompt"
|
||
fi
|
||
request_prompt="${chat_context//$'\n'/\\n}"
|
||
}
|
||
|
||
# maintain chat context function for /completions (all models except
|
||
# gpt turbo and gpt 4)
|
||
# builds chat context from response,
|
||
# keeps chat context length under max token limit
|
||
# $1 should be the chat context
|
||
# $2 should be the response data (only the text)
|
||
maintain_chat_context() {
|
||
chat_context="$1"
|
||
response_data="$2"
|
||
# add response to chat context as answer
|
||
chat_context="$chat_context${chat_context:+\n}\nA: ${response_data//$'\n'/\\n}"
|
||
# check prompt length, 1 word =~ 1.3 tokens
|
||
# reserving 100 tokens for next user prompt
|
||
while (($(echo "$chat_context" | wc -c) * 1, 3 > (MAX_TOKENS - 100))); do
|
||
# remove first/oldest QnA from prompt
|
||
chat_context=$(echo "$chat_context" | sed -n '/Q:/,$p' | tail -n +2)
|
||
# add init prompt so it is always on top
|
||
chat_context="$CHAT_INIT_PROMPT $chat_context"
|
||
done
|
||
}
|
||
|
||
# build user chat message function for /chat/completions (gpt models)
|
||
# builds chat message before request,
|
||
# $1 should be the chat message
|
||
# $2 should be the escaped prompt
|
||
build_user_chat_message() {
|
||
chat_message="$1"
|
||
escaped_prompt="$2"
|
||
if [ -z "$chat_message" ]; then
|
||
chat_message="{\"role\": \"user\", \"content\": \"$escaped_prompt\"}"
|
||
else
|
||
chat_message="$chat_message, {\"role\": \"user\", \"content\": \"$escaped_prompt\"}"
|
||
fi
|
||
|
||
request_prompt="$chat_message"
|
||
}
|
||
|
||
# adds the assistant response to the message in (chatml) format
|
||
# for /chat/completions (gpt models)
|
||
# keeps messages length under max token limit
|
||
# $1 should be the chat message
|
||
# $2 should be the response data (only the text)
|
||
add_assistant_response_to_chat_message() {
|
||
chat_message="$1"
|
||
local local_response_data="$2"
|
||
|
||
# replace new line characters from response with space
|
||
local_response_data=$(echo "$local_response_data" | tr '\n' ' ')
|
||
# add response to chat context as answer
|
||
chat_message="$chat_message, {\"role\": \"assistant\", \"content\": \"$local_response_data\"}"
|
||
|
||
# transform to json array to parse with jq
|
||
chat_message_json="[ $chat_message ]"
|
||
# check prompt length, 1 word =~ 1.3 tokens
|
||
# reserving 100 tokens for next user prompt
|
||
while (($(echo "$chat_message" | wc -c) * 1, 3 > (MAX_TOKENS - 100))); do
|
||
# remove first/oldest QnA from prompt
|
||
chat_message=$(echo "$chat_message_json" | jq -c '.[2:] | .[] | {role, content}')
|
||
done
|
||
}
|
||
|
||
# parse command line arguments
|
||
while [[ "$#" -gt 0 ]]; do
|
||
case $1 in
|
||
-i | --init-prompt)
|
||
CHAT_INIT_PROMPT="$2"
|
||
SYSTEM_PROMPT="$2"
|
||
CONTEXT=true
|
||
shift
|
||
shift
|
||
;;
|
||
--init-prompt-from-file)
|
||
CHAT_INIT_PROMPT=$(cat "$2")
|
||
SYSTEM_PROMPT=$(cat "$2")
|
||
CONTEXT=true
|
||
shift
|
||
shift
|
||
;;
|
||
-p | --prompt)
|
||
prompt="$2"
|
||
shift
|
||
shift
|
||
;;
|
||
--prompt-from-file)
|
||
prompt=$(cat "$2")
|
||
shift
|
||
shift
|
||
;;
|
||
-t | --temperature)
|
||
TEMPERATURE="$2"
|
||
shift
|
||
shift
|
||
;;
|
||
--max-tokens)
|
||
MAX_TOKENS="$2"
|
||
shift
|
||
shift
|
||
;;
|
||
-m | --model)
|
||
MODEL="$2"
|
||
shift
|
||
shift
|
||
;;
|
||
-s | --size)
|
||
SIZE="$2"
|
||
shift
|
||
shift
|
||
;;
|
||
-c | --chat-context)
|
||
CONTEXT=true
|
||
shift
|
||
;;
|
||
*)
|
||
echo "Unknown parameter: $1"
|
||
exit 1
|
||
;;
|
||
esac
|
||
done
|
||
|
||
# set defaults
|
||
TEMPERATURE=${TEMPERATURE:-0.7}
|
||
MAX_TOKENS=${MAX_TOKENS:-1024}
|
||
MODEL=${MODEL:-gpt-3.5-turbo}
|
||
SIZE=${SIZE:-512x512}
|
||
CONTEXT=${CONTEXT:-false}
|
||
|
||
# create history file
|
||
if [ ! -f ~/.chatgpt_history ]; then
|
||
touch ~/.chatgpt_history
|
||
chmod 600 ~/.chatgpt_history
|
||
fi
|
||
|
||
running=true
|
||
# check input source and determine run mode
|
||
|
||
# prompt from argument, run on pipe mode (run once, no chat)
|
||
if [ -n "$prompt" ]; then
|
||
pipe_mode_prompt=${prompt}
|
||
# if input file_descriptor is a terminal, run on chat mode
|
||
elif [ -t 0 ]; then
|
||
echo -e "Welcome to chatgpt. You can quit with '\033[36mexit\033[0m' or '\033[36mq\033[0m'."
|
||
# prompt from pipe or redirected stdin, run on pipe mode
|
||
else
|
||
pipe_mode_prompt+=$(cat -)
|
||
fi
|
||
|
||
while $running; do
|
||
|
||
if [ -z "$pipe_mode_prompt" ]; then
|
||
echo -e "\nEnter a prompt:"
|
||
read -e prompt
|
||
if [ "$prompt" != "exit" ] && [ "$prompt" != "q" ]; then
|
||
echo -e $PROCESSING_LABEL
|
||
fi
|
||
else
|
||
# set vars for pipe mode
|
||
prompt=${pipe_mode_prompt}
|
||
running=false
|
||
CHATGPT_CYAN_LABEL=""
|
||
fi
|
||
|
||
if [ "$prompt" == "exit" ] || [ "$prompt" == "q" ]; then
|
||
running=false
|
||
elif [[ "$prompt" =~ ^image: ]]; then
|
||
request_to_image "$prompt"
|
||
handle_error "$image_response"
|
||
image_url=$(echo $image_response | jq -r '.data[0].url')
|
||
echo -e "${CHATGPT_CYAN_LABEL}Your image was created. \n\nLink: ${image_url}\n"
|
||
|
||
if [[ "$TERM_PROGRAM" == "iTerm.app" ]]; then
|
||
curl -sS $image_url -o temp_image.png
|
||
imgcat temp_image.png
|
||
rm temp_image.png
|
||
else
|
||
echo "Would you like to open it? (Yes/No)"
|
||
read -e answer
|
||
if [ "$answer" == "Yes" ] || [ "$answer" == "yes" ] || [ "$answer" == "y" ] || [ "$answer" == "Y" ] || [ "$answer" == "ok" ]; then
|
||
open "${image_url}"
|
||
fi
|
||
fi
|
||
elif [[ "$prompt" == "history" ]]; then
|
||
echo -e "\n$(cat ~/.chatgpt_history)"
|
||
elif [[ "$prompt" == "models" ]]; then
|
||
models_response=$(curl https://api.openai.com/v1/models \
|
||
-sS \
|
||
-H "Authorization: Bearer $OPENAI_KEY")
|
||
handle_error "$models_response"
|
||
models_data=$(echo $models_response | jq -r -C '.data[] | {id, owned_by, created}')
|
||
echo -e "${CHATGPT_CYAN_LABEL}This is a list of models currently available at OpenAI API:\n ${models_data}"
|
||
elif [[ "$prompt" =~ ^model: ]]; then
|
||
models_response=$(curl https://api.openai.com/v1/models \
|
||
-sS \
|
||
-H "Authorization: Bearer $OPENAI_KEY")
|
||
handle_error "$models_response"
|
||
model_data=$(echo $models_response | jq -r -C '.data[] | select(.id=="'"${prompt#*model:}"'")')
|
||
echo -e "${CHATGPT_CYAN_LABEL}Complete details for model: ${prompt#*model:}\n ${model_data}"
|
||
elif [[ "$prompt" =~ ^command: ]]; then
|
||
# escape quotation marks
|
||
escaped_prompt=$(echo "$prompt" | sed 's/"/\\"/g')
|
||
# escape new lines
|
||
if [[ "$prompt" =~ ^command: ]]; then
|
||
escaped_prompt=${prompt#command:}
|
||
request_prompt=$COMMAND_GENERATION_PROMPT${escaped_prompt//$'\n'/' '}
|
||
fi
|
||
build_user_chat_message "$chat_message" "$request_prompt"
|
||
request_to_chat "$request_prompt"
|
||
handle_error "$response"
|
||
response_data=$(echo $response | jq -r '.choices[].message.content')
|
||
|
||
if [[ "$prompt" =~ ^command: ]]; then
|
||
echo -e "${CHATGPT_CYAN_LABEL} ${response_data}" | fold -s -w $COLUMNS
|
||
dangerous_commands=("rm" ">" "mv" "mkfs" ":(){:|:&};" "dd" "chmod" "wget" "curl")
|
||
|
||
for dangerous_command in "${dangerous_commands[@]}"; do
|
||
if [[ "$response_data" == *"$dangerous_command"* ]]; then
|
||
echo "Warning! This command can change your file system or download external scripts & data. Please do not execute code that you don't understand completely."
|
||
fi
|
||
done
|
||
echo "Would you like to execute it? (Yes/No)"
|
||
read run_answer
|
||
if [ "$run_answer" == "Yes" ] || [ "$run_answer" == "yes" ] || [ "$run_answer" == "y" ] || [ "$run_answer" == "Y" ]; then
|
||
echo -e "\nExecuting command: $response_data\n"
|
||
eval $response_data
|
||
fi
|
||
fi
|
||
escaped_response_data=$(echo "$response_data" | sed 's/"/\\"/g')
|
||
add_assistant_response_to_chat_message "$chat_message" "$escaped_response_data"
|
||
|
||
timestamp=$(date +"%d/%m/%Y %H:%M")
|
||
echo -e "$timestamp $prompt \n$response_data \n" >>~/.chatgpt_history
|
||
|
||
elif [[ "$MODEL" =~ ^gpt- ]]; then
|
||
# escape quotation marks
|
||
escaped_prompt=$(echo "$prompt" | sed 's/"/\\"/g')
|
||
# escape new lines
|
||
request_prompt=${escaped_prompt//$'\n'/' '}
|
||
|
||
build_user_chat_message "$chat_message" "$request_prompt"
|
||
request_to_chat "$request_prompt"
|
||
handle_error "$response"
|
||
response_data=$(echo "$response" | jq -r '.choices[].message.content')
|
||
|
||
echo -e "${CHATGPT_CYAN_LABEL}${response_data}" | fold -s -w $COLUMNS
|
||
|
||
escaped_response_data=$(echo "$response_data" | sed 's/"/\\"/g')
|
||
add_assistant_response_to_chat_message "$chat_message" "$escaped_response_data"
|
||
|
||
timestamp=$(date +"%d/%m/%Y %H:%M")
|
||
echo -e "$timestamp $prompt \n$response_data \n" >>~/.chatgpt_history
|
||
else
|
||
# escape quotation marks
|
||
escaped_prompt=$(echo "$prompt" | sed 's/"/\\"/g')
|
||
# escape new lines
|
||
request_prompt=${escaped_prompt//$'\n'/' '}
|
||
|
||
if [ "$CONTEXT" = true ]; then
|
||
build_chat_context "$chat_context" "$escaped_prompt"
|
||
fi
|
||
|
||
request_to_completions "$request_prompt"
|
||
handle_error "$response"
|
||
response_data=$(echo "$response" | jq -r '.choices[].text' | sed '1,2d; s/^A://g')
|
||
echo -e "${CHATGPT_CYAN_LABEL}${response_data}" | fold -s -w $COLUMNS
|
||
|
||
if [ "$CONTEXT" = true ]; then
|
||
escaped_response_data=$(echo "$response_data" | sed 's/"/\\"/g')
|
||
maintain_chat_context "$chat_context" "$escaped_response_data"
|
||
fi
|
||
|
||
timestamp=$(date +"%d/%m/%Y %H:%M")
|
||
echo -e "$timestamp $prompt \n$response_data \n" >>~/.chatgpt_history
|
||
fi
|
||
done
|