Skip to content

Commit

Permalink
chore: rename project to llmpa (LLM Personal Assistant/Agent)
Browse files Browse the repository at this point in the history
  • Loading branch information
nuffin committed Oct 18, 2024
1 parent 4b94d34 commit 89bb625
Show file tree
Hide file tree
Showing 49 changed files with 39 additions and 38 deletions.
2 changes: 1 addition & 1 deletion .env.develop
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
SERVER_NAME = LLM Search Server (develop)
HOST = 0.0.0.0
PORT = 58030
DATABASE_URI = postgresql+asyncpg://postgres:postgres@localhost:55432/llmsearchdb
DATABASE_URI = postgresql+asyncpg://postgres:postgres@localhost:55432/llmpadb
PUBLIC_PATH = public
UPLOADS_PATH = data/uploads
LOG_LEVEL = DEBUG
2 changes: 1 addition & 1 deletion .env.example
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
SERVER_NAME = LLM Search Server
HOST = 0.0.0.0
PORT = 58000
DATABASE_URI = postgresql+asyncpg://postgres:postgres@localhost:55432/llmsearchdb
DATABASE_URI = postgresql+asyncpg://postgres:postgres@localhost:55432/llmpadb
PUBLIC_PATH = public
UPLOADS_PATH = data/uploads
# SQLALCHEMY_ENGINE_POOL_SIZE = 10 # Pool size (number of connections to keep in the pool)
Expand Down
2 changes: 1 addition & 1 deletion .env.production
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
SERVER_NAME = LLM Search Server
HOST = 0.0.0.0
PORT = 58000
DATABASE_URI = postgresql+asyncpg://postgres:postgres@localhost:55432/llmsearchdb
DATABASE_URI = postgresql+asyncpg://postgres:postgres@localhost:55432/llmpadb
PUBLIC_PATH = public
UPLOADS_PATH = data/uploads
# SQLALCHEMY_ENGINE_POOL_SIZE = 10 # Pool size (number of connections to keep in the pool)
Expand Down
8 changes: 4 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,20 +21,20 @@ By uploading documents in various file formats, re-training can be performed acc
#### With docker

```sh
docker compose -p llmsearch -f docker/docker-compose.yml up -d
docker compose -p llmpa -f docker/docker-compose.yml up -d
```

the default docker-compose.yml contains ollama and localai containers, in which the ollama image is built with `scripts/build-ollama-image.sh`

### Run the llmsearch engine
### Run the llmpa engine

#### With docker

There is already llm engine entry in docker/docker-comopose.yml. or you can run it with the following command:

```sh
scripts/build-image.sh
docker run -d -p 58000:58000 --name llmsearch llmsearch
docker run -d -p 58000:58000 --name llmpa llmpa
```

#### Without docker
Expand All @@ -43,7 +43,7 @@ Assumed you are in the root directory of the project:

```sh
pip install -r requirements.txt --extra-index-url https://download.pytorch.org/whl/cu124
python3 -m llmsearch
python3 -m llmpa
```

## TODO
Expand Down
2 changes: 1 addition & 1 deletion alembic.ini
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ version_path_separator = os # Use os.pathsep. Default configuration used for ne
# are written from script.py.mako
# output_encoding = utf-8

sqlalchemy.url = postgresql+asyncpg://postgres:postgres@localhost:55432/llmsearchdb
sqlalchemy.url = postgresql+asyncpg://postgres:postgres@localhost:55432/llmpadb


[post_write_hooks]
Expand Down
2 changes: 1 addition & 1 deletion alembic/env.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@

# Add the src/ directory to the Python path
sys.path.insert(
0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "llmsearch"))
0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "llmpa"))
)
from logging.config import fileConfig

Expand Down
4 changes: 2 additions & 2 deletions docker/Dockerfile
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
FROM llmsearch-base:latest
FROM llmpa-base:latest

WORKDIR /app

COPY docker/gunicorn.conf.py ./
COPY .env.production ./.env
COPY llmsearch/ .
COPY llmpa/ .
COPY public/ ./public

# Expose the port the app runs on (change if necessary)
Expand Down
43 changes: 22 additions & 21 deletions docker/docker-compose.yml
Original file line number Diff line number Diff line change
@@ -1,46 +1,46 @@
services:
llmsearch:
container_name: llmsearch-llmsearch
image: llmsearch:latest
llmpa:
container_name: llmpa-llmpa
image: llmpa:latest
ports:
- "58000:58000"
environment:
- FLASK_APP=llmsearch
- FLASK_APP=llmpa
- FLASK_ENV=development
- FLASK_RUN_HOST=

postgres:
container_name: llmsearch-postgres
container_name: llmpa-postgres
# image: postgres:17.0-bookworm
image: postgres-pgvector:latest
environment:
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=postgres
- POSTGRES_DB=llmsearchdb
- POSTGRES_DB=llmpadb
# networks:
# - llmsearch
# - llmpa
volumes:
- pgdata:/var/lib/postgresql/data
ports:
- 55432:5432
restart: always

pgadmin:
container_name: llmsearch-pgadmin
container_name: llmpa-pgadmin
image: dpage/pgadmin4:8.12
environment:
- [email protected]
- PGADMIN_DEFAULT_PASSWORD=123456
# networks:
# - llmsearch
# - llmpa
volumes:
- pgadmin-data:/var/lib/pgadmin
ports:
- 55050:80
restart: always

elasticsearch:
container_name: llmsearch-elasticsearch
container_name: llmpa-elasticsearch
image: docker.elastic.co/elasticsearch/elasticsearch:8.15.0
environment:
- discovery.type=single-node
Expand All @@ -52,12 +52,12 @@ services:
- "59200:9200"
- "59300:9300"
# networks:
# - llmsearch
# - llmpa
volumes:
- esdata:/usr/share/elasticsearch/data

localai:
container_name: llmsearch-localai
container_name: llmpa-localai
image: localai/localai:latest-aio-gpu-nvidia-cuda-12
ports:
- "58080:8080"
Expand All @@ -69,10 +69,10 @@ services:
devices:
- capabilities: [gpu]
# networks:
# - llmsearch
# - llmpa

ollama:
container_name: llmsearch-ollama
container_name: llmpa-ollama
image: ollama:latest
ports:
- "51400:51400"
Expand All @@ -86,10 +86,10 @@ services:
devices:
- capabilities: [gpu]
# networks:
# - llmsearch
# - llmpa

# opensearch-node1:
# container_name: llmsearch-opensearch-node1
# container_name: llmpa-opensearch-node1
# image: opensearchproject/opensearch:2.5.0 # You can choose the version you need
# environment:
# - cluster.name=opensearch-cluster
Expand All @@ -113,26 +113,26 @@ services:
# - "59200:9200" # OpenSearch REST API
# - "59600:9600" # Performance Analyzer
# # networks:
# # - llmsearch
# # - llmpa
# healthcheck:
# test: ["CMD", "curl", "-f", "http://localhost:9200"]
# interval: 30s
# timeout: 10s
# retries: 5

# opensearch-dashboards:
# container_name: llmsearch-opensearch-dashboards
# container_name: llmpa-opensearch-dashboards
# image: opensearchproject/opensearch-dashboards:2.5.0
# ports:
# - "55601:5601" # OpenSearch Dashboards
# environment:
# OPENSEARCH_HOSTS: '["http://opensearch-node1:9200"]'
# OPENSEARCH_SECURITY_ADMIN_PASSWORD: admin_password # Set the admin password here for dashboards
# # networks:
# # - llmsearch
# # - llmpa

# networks:
# llmsearch:
# llmpa:
# driver: bridge

volumes:
Expand All @@ -141,6 +141,7 @@ volumes:
esdata:
driver: local
opensearch-data:
local-ai-models:
global-local-ai-models:
external: true
ollama-models:

File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
2 changes: 1 addition & 1 deletion llmsearch/env.py → llmpa/env.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
"SERVER_NAME": "LLM Search Server",
"HOST": "0.0.0.0",
"PORT": 58000,
"DATABASE_URI": "postgresql+psycopg2://postgres:postgres@localhost:35432/llmsearch",
"DATABASE_URI": "postgresql+psycopg2://postgres:postgres@localhost:35432/llmpa",
"PUBLIC_PATH": "public",
"UPLOADS_PATH": "data/uploads",
"SQLALCHEMY_ENGINE_POOL_SIZE": 10, # Pool size (number of connections to keep in the pool)
Expand Down
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
2 changes: 1 addition & 1 deletion scripts/build-base-image.sh
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ ROOTPATH=$(realpath ${FILEPATH}/..)

cd ${ROOTPATH}

IMAGE_NAME="llmsearch-base"
IMAGE_NAME="llmpa-base"

TAG="${IMAGE_NAME}:$(date +%m%d_%H%M)"

Expand Down
2 changes: 1 addition & 1 deletion scripts/build-image.sh
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ ROOTPATH=$(realpath ${FILEPATH}/..)

cd ${ROOTPATH}

IMAGE_NAME="llmsearch"
IMAGE_NAME="llmpa"

TAG="${IMAGE_NAME}:$(date +%m%d_%H%M)"

Expand Down
2 changes: 1 addition & 1 deletion start-containers.sh
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
#!/usr/bin/env bash

docker compose -p llmsearch -f docker/docker-compose.yml up -d
docker compose -p llmpa -f docker/docker-compose.yml up -d
2 changes: 1 addition & 1 deletion start.sh
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,4 @@

ENVNAME=$1

env ENVIRONMENT=${ENVNAME} python3 src/app.py
env ENVIRONMENT=${ENVNAME} python3 -m llmpa
2 changes: 1 addition & 1 deletion web/package.json
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
{
"name": "llmsearch-web",
"name": "llmpa-web",
"version": "0.1.0",
"private": true,
"scripts": {
Expand Down

0 comments on commit 89bb625

Please sign in to comment.