Skip to content

Commit

Permalink
Openai compatability (#231)
Browse files Browse the repository at this point in the history
* fix: ๐Ÿ› fix OPENAI key setting issue and update readme

* feat: ๐ŸŽธ update gpt4o

* style: format code with Black

This commit fixes the style issues introduced in 99581a8 according to the output
from Black.

Details: #229

* fix: ๐Ÿ› fix OPENAI_KEY typo

* style: format code with Black

This commit fixes the style issues introduced in 8f9091c according to the output
from Black.

Details: #230

* feat: ๐ŸŽธ update openai python sdk

---------

Co-authored-by: deepsource-autofix[bot] <62050782+deepsource-autofix[bot]@users.noreply.github.com>
  • Loading branch information
GreyDGL and deepsource-autofix[bot] authored May 15, 2024
1 parent f072f90 commit bb768c1
Show file tree
Hide file tree
Showing 6 changed files with 27 additions and 18 deletions.
2 changes: 1 addition & 1 deletion pentestgpt/_version.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
__version__ = '"0.13.1"'
__version__ = '"0.13.3"'
27 changes: 15 additions & 12 deletions pentestgpt/utils/APIs/chatgpt_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
import openai
import tiktoken
from langfuse.model import InitialGeneration, Usage
from openai import OpenAI
from tenacity import *

from pentestgpt.utils.llm_api import LLMAPI
Expand Down Expand Up @@ -46,6 +47,8 @@ def __eq__(self, other):
class ChatGPTAPI(LLMAPI):
def __init__(self, config_class, use_langfuse_logging=False):
self.name = str(config_class.model)
api_key = os.getenv("OPENAI_API_KEY", None)
self.client = OpenAI(api_key=api_key, base_url=config_class.api_base)

if use_langfuse_logging:
# use langfuse.openai to shadow the default openai library
Expand All @@ -58,9 +61,7 @@ def __init__(self, config_class, use_langfuse_logging=False):
from langfuse import Langfuse

self.langfuse = Langfuse()

openai.api_key = os.getenv("OPENAI_API_KEY", None)
openai.api_base = config_class.api_base

self.model = config_class.model
self.log_dir = config_class.log_dir
self.history_length = 5 # maintain 5 messages in the history. (5 chat memory)
Expand All @@ -69,7 +70,9 @@ def __init__(self, config_class, use_langfuse_logging=False):

logger.add(sink=os.path.join(self.log_dir, "chatgpt.log"), level="WARNING")

def _chat_completion(self, history: List, model=None, temperature=0.5) -> str:
def _chat_completion(
self, history: List, model=None, temperature=0.5, image_url: str = None
) -> str:
generationStartTime = datetime.now()
# use model if provided, otherwise use self.model; if self.model is None, use gpt-4-1106-preview
if model is None:
Expand All @@ -78,12 +81,12 @@ def _chat_completion(self, history: List, model=None, temperature=0.5) -> str:
else:
model = self.model
try:
response = openai.ChatCompletion.create(
response = self.client.chat.completions.create(
model=model,
messages=history,
temperature=temperature,
)
except openai.error.APIConnectionError as e: # give one more try
except openai._exceptions.APIConnectionError as e: # give one more try
logger.warning(
"API Connection Error. Waiting for {} seconds".format(
self.error_wait_time
Expand All @@ -96,7 +99,7 @@ def _chat_completion(self, history: List, model=None, temperature=0.5) -> str:
messages=history,
temperature=temperature,
)
except openai.error.RateLimitError as e: # give one more try
except openai._exceptions.RateLimitError as e: # give one more try
logger.warning("Rate limit reached. Waiting for 5 seconds")
logger.error("Rate Limit Error: ", e)
time.sleep(5)
Expand All @@ -105,7 +108,7 @@ def _chat_completion(self, history: List, model=None, temperature=0.5) -> str:
messages=history,
temperature=temperature,
)
except openai.error.InvalidRequestError as e: # token limit reached
except openai._exceptions.RateLimitError as e: # token limit reached
logger.warning("Token size limit reached. The recent message is compressed")
logger.error("Token size error; will retry with compressed message ", e)
# compress the message in two ways.
Expand Down Expand Up @@ -151,14 +154,14 @@ def _chat_completion(self, history: List, model=None, temperature=0.5) -> str:
model=self.model,
modelParameters={"temperature": str(temperature)},
prompt=history,
completion=response["choices"][0]["message"]["content"],
completion=response.choices[0].message.content,
usage=Usage(
promptTokens=response["usage"]["prompt_tokens"],
completionTokens=response["usage"]["completion_tokens"],
promptTokens=response.usage.prompt_tokens,
completionTokens=response.usage.completion_tokens,
),
)
)
return response["choices"][0]["message"]["content"]
return response.choices[0].message.content


if __name__ == "__main__":
Expand Down
7 changes: 6 additions & 1 deletion pentestgpt/utils/APIs/chatgpt_vision_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,12 @@ def __init__(self, config_class, use_langfuse_logging=False):

logger.add(sink=os.path.join(self.log_dir, "chatgpt.log"), level="WARNING")

def _chat_completion(self, history: List, model=None, temperature=0.5) -> str:
def _chat_completion(
self,
history: List,
model=None,
temperature=0.5,
) -> str:
generationStartTime = datetime.now()
# use model if provided, otherwise use self.model; if self.model is None, use gpt-4-1106-preview
if model is None:
Expand Down
4 changes: 2 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "pentestgpt"
version = "0.13.1"
version = "0.13.3"
description = "PentestGPT is an LLM-powered penetration testing tool."
authors = ["Gelei Deng <[email protected]>"]
license = "MIT"
Expand All @@ -19,7 +19,7 @@ rich = "^13.7.1"
prompt-toolkit = "^3.0.43"
google = "^3.0.0"
pytest = "^8.1.1"
openai = ">=0.27.8,<0.28.0"
openai = "^1.29.0"
langchain = "^0.1.13"
tiktoken = "^0.6.0"
pycookiecheat = "^0.6.0"
Expand Down
3 changes: 2 additions & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ colorama==0.4.6 ; python_version >= "3.10" and python_version < "4.0"
cryptography==41.0.4 ; python_version >= "3.10" and python_version < "4.0"
cssselect==1.2.0 ; python_version >= "3.10" and python_version < "4.0"
dataclasses-json==0.6.6 ; python_version >= "3.10" and python_version < "4.0"
distro==1.9.0 ; python_version >= "3.10" and python_version < "4.0"
exceptiongroup==1.2.1 ; python_version >= "3.10" and python_version < "3.11"
feedfinder2==0.0.4 ; python_version >= "3.10" and python_version < "4.0"
feedparser==6.0.11 ; python_version >= "3.10" and python_version < "4.0"
Expand Down Expand Up @@ -64,7 +65,7 @@ mypy-extensions==1.0.0 ; python_version >= "3.10" and python_version < "4.0"
newspaper3k==0.2.8 ; python_version >= "3.10" and python_version < "4.0"
nltk==3.8.1 ; python_version >= "3.10" and python_version < "4.0"
numpy==1.26.4 ; python_version >= "3.10" and python_version < "4.0"
openai==0.27.10 ; python_version >= "3.10" and python_version < "4.0"
openai==1.29.0 ; python_version >= "3.10" and python_version < "4.0"
orjson==3.10.3 ; python_version >= "3.10" and python_version < "4.0"
packaging==23.2 ; python_version >= "3.10" and python_version < "4.0"
pathspec==0.12.1 ; python_version >= "3.10" and python_version < "4.0"
Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@

setup(
name="pentestgpt",
version="0.13.1",
version="0.13.3",
description="PentestGPT, a GPT-empowered penetration testing tool",
long_description="""
PentestGPT is a penetration testing tool empowered by ChatGPT.
Expand Down

0 comments on commit bb768c1

Please sign in to comment.