Skip to content

Support custom LLM implementation #803

Support custom LLM implementation

Support custom LLM implementation #803

Workflow file for this run

# this runs on every commit/PR to test that we are properly building binaries that work
# we test this on each commit/PR to catch build problems early
name: Build and test HolmesGPT
on: [push, pull_request, workflow_dispatch]
jobs:
build:
strategy:
matrix:
python-version: ["3.9", "3.10", "3.11", "3.12"]
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Install Python dependencies and build
# if you change something here, you must also change it in .github/workflows/build-binaries-and-brew.yaml
run: |
python -m pip install --upgrade pip setuptools pyinstaller
curl -sSL https://install.python-poetry.org | python3 - --version 1.4.0
poetry config virtualenvs.create false
poetry install --no-root
poetry run python -m playwright install --with-deps firefox
sudo apt-get install -y binutils
pyinstaller holmes.py --add-data 'holmes/plugins/runbooks/*:holmes/plugins/runbooks' --add-data 'holmes/plugins/prompts/*:holmes/plugins/prompts' --add-data 'holmes/plugins/toolsets/*:holmes/plugins/toolsets' --hidden-import=tiktoken_ext.openai_public --hidden-import=tiktoken_ext --hiddenimport litellm.llms.tokenizers --collect-data litellm
ls dist
- name: Run tests
shell: bash
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
run: |
poetry run pytest
- name: Test the binary
shell: bash
run: |
dist/holmes/holmes version
if [ $? -ne 0 ]; then
echo "Binary test failed"
exit 1
fi
echo "Binary test passed"