diff --git a/.github/workflows/backend.yml b/.github/workflows/backend.yml new file mode 100644 index 00000000..0802be96 --- /dev/null +++ b/.github/workflows/backend.yml @@ -0,0 +1,208 @@ +name: Backend-CI + +on: + push: + branches: [main] + pull_request: + branches: [main] + +permissions: + contents: read + pull-requests: write + +jobs: + build: + name: Build + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: 20 + + - name: Setup pnpm + uses: pnpm/action-setup@v3 + with: + version: 9.10.0 + + - name: Get pnpm store directory + id: pnpm-cache + shell: bash + run: | + echo "STORE_PATH=$(pnpm store path)" >> $GITHUB_OUTPUT + + - name: Setup pnpm cache + uses: actions/cache@v4 + with: + path: ${{ steps.pnpm-cache.outputs.STORE_PATH }} + key: ${{ runner.os }}-pnpm-store-${{ hashFiles('pnpm-lock.yaml') || github.sha }} + restore-keys: | + ${{ runner.os }}-pnpm-store- + + - name: Install dependencies + run: pnpm install + + - name: Install turbo + run: pnpm add turbo@latest -g + + - name: Build + run: turbo build + + - name: Create config file + run: | + mkdir -p packages/agents + cat > packages/agents/config.toml << 'EOL' + [API_KEYS] + OPENAI = "${{ secrets.OPENAI }}" + ANTHROPIC = "${{ secrets.ANTHROPIC }}" + GEMINI = "${{ secrets.GEMINI }}" + + [VECTOR_DB] + POSTGRES_USER = "${{ secrets.POSTGRES_USER }}" + POSTGRES_HOST = "postgres" + POSTGRES_ROOT_DB = "${{ secrets.POSTGRES_ROOT_DB }}" + POSTGRES_PASSWORD = "${{ secrets.POSTGRES_PASSWORD }}" + POSTGRES_PORT = "${{ secrets.POSTGRES_PORT }}" + + [GENERAL] + PORT = 3001 + SIMILARITY_MEASURE = "cosine" + + [HOSTED_MODE] + DEFAULT_CHAT_PROVIDER = "gemini" + DEFAULT_CHAT_MODEL = "Gemini Flash 2.5" + DEFAULT_FAST_CHAT_PROVIDER = "gemini" + DEFAULT_FAST_CHAT_MODEL = "Gemini Flash 2.5" + DEFAULT_EMBEDDING_PROVIDER = "openai" + DEFAULT_EMBEDDING_MODEL = "Text embedding 3 large" + + [VERSIONS] + STARKNET_FOUNDRY = "0.37.0" + SCARB = "2.9.2" + EOL + + - name: Create env file + run: | + cat > .env << 'EOL' + POSTGRES_USER = "${{ secrets.POSTGRES_USER }}" + POSTGRES_HOST = "localhost" + POSTGRES_ROOT_DB = "${{ secrets.POSTGRES_ROOT_DB }}" + POSTGRES_PASSWORD = "${{ secrets.POSTGRES_PASSWORD }}" + POSTGRES_PORT = "${{ secrets.POSTGRES_PORT }}" + EOL + + - name: Create backend env file + run: | + touch packages/backend/.env + + - name: Run unit tests + run: pnpm run test:unit + + - name: Build docker image + run: docker build -t cairo-coder-backend:${{ github.sha }} -f backend.dockerfile . + + - name: Run backend and database integration tests + run: | + docker compose up -d postgres backend + echo "Waiting for services to be ready..." + sleep 20 + + chmod +x ./scripts/integration-tests.sh + chmod +x ./scripts/database-connection.sh + + echo -e "\n=== Running basic integration tests ===" + ./scripts/integration-tests.sh + INTEGRATION_RESULT=$? + + echo -e "\n=== Running database connection test via chat/completions endpoint ===" + ./scripts/database-connection.sh + DB_CONNECTION_RESULT=$? + + if [ $INTEGRATION_RESULT -ne 0 ] || [ $DB_CONNECTION_RESULT -ne 0 ]; then + echo "❌ Integration tests failed!" + exit 1 + else + echo "✅ All integration tests passed!" + fi + + - name: Build ingester image + run: docker compose --profile ingester build ingester + + - name: Run data ingestion + run: docker compose --profile ingester up -d ingester + + - name: Import snak repository + run: | + mkdir -p ./snak + git clone https://github.com/KasarLabs/snak ./snak + cd ./snak + git checkout fix/server-for-reuse + + - name: Create snak env file + run: | + cd ./snak + cat > .env << 'EOL' + + STARKNET_PUBLIC_ADDRESS="${{ secrets.STARKNET_PUBLIC_ADDRESS }}" + STARKNET_PRIVATE_KEY="${{ secrets.STARKNET_PRIVATE_KEY }}" + STARKNET_RPC_URL="${{ secrets.STARKNET_RPC_URL }}" + + AI_PROVIDER_API_KEY="${{ secrets.ANTHROPIC }}" + AI_MODEL="claude-3-5-sonnet-latest" + AI_PROVIDER="anthropic" + + NODE_ENV="development" + + SERVER_API_KEY="${{ secrets.SNAK_SERVER_KEY }}" + SERVER_PORT="${{ secrets.SNAK_SERVER_PORT }}" + + POSTGRES_USER="${{ secrets.POSTGRES_USER }}" + POSTGRES_PASSWORD="${{ secrets.POSTGRES_PASSWORD }}" + POSTGRES_ROOT_DB="${{ secrets.POSTGRES_ROOT_DB }}" + POSTGRES_HOST="localhost" + POSTGRES_PORT="${{ secrets.POSTGRES_PORT }}" + + CAIRO_UPLOAD_DIR="plugins/cairocoder/uploads/" + CAIRO_GENERATION_API_URL="http://127.0.0.1:3001/chat/completions" + + EOL + + - name: Cache snak node modules + uses: actions/cache@v4 + with: + path: ./snak/node_modules + key: ${{ runner.os }}-snak-modules-${{ hashFiles('./snak/pnpm-lock.yaml') }} + restore-keys: | + ${{ runner.os }}-snak-modules- + + - name: Install snak dependencies + run: | + cd ./snak + pnpm install --filter="@snakagent/server..." + + - name: Start snak server + run: | + cd ./snak + pnpm run build --filter="@snakagent/server..." && lerna run start --scope "@snakagent/server" & + + echo "Waiting for server to start..." + sleep 120 + + - name: Create cairo code generation test env file + run: | + cd ./packages/agents + cat > .env.test << 'EOL' + + API_KEY="${{ secrets.SNAK_SERVER_KEY }}" + API_URL="http://localhost:${{ secrets.SNAK_SERVER_PORT }}" + + EOL + + - name: Run cairo code generation test + run: | + chmod +x ./scripts/snak-test.sh + bash scripts/snak-test.sh diff --git a/.github/workflows/generate-embeddings.yml b/.github/workflows/ingester.yml similarity index 60% rename from .github/workflows/generate-embeddings.yml rename to .github/workflows/ingester.yml index d6f66cd0..1c534139 100644 --- a/.github/workflows/generate-embeddings.yml +++ b/.github/workflows/ingester.yml @@ -40,7 +40,7 @@ jobs: uses: actions/cache@v4 with: path: ${{ steps.pnpm-cache.outputs.STORE_PATH }} - key: ${{ runner.os }}-pnpm-store-${{ hashFiles('**/pnpm-lock.yaml') }} + key: ${{ runner.os }}-pnpm-store-${{ hashFiles('pnpm-lock.yaml') || github.sha }} restore-keys: | ${{ runner.os }}-pnpm-store- @@ -69,35 +69,43 @@ jobs: mkdir -p packages/agents cat > packages/agents/config.toml << 'EOL' [API_KEYS] - OPENAI = "${{ secrets.OPENAI_API_KEY }}" - GROQ = "${{ secrets.GROQ_API_KEY }}" - ANTHROPIC = "${{ secrets.ANTHROPIC_API_KEY }}" - DEEPSEEK = "${{ secrets.DEEPSEEK_API_KEY }}" - GEMINI = "${{ secrets.GEMINI_API_KEY }}" - - [API_ENDPOINTS] - OLLAMA = "${{ secrets.OLLAMA_ENDPOINT }}" + OPENAI = "${{ secrets.OPENAI }}" + ANTHROPIC = "${{ secrets.ANTHROPIC }}" + GEMINI = "${{ secrets.GEMINI }}" [VECTOR_DB] - MONGODB_URI = "${{ secrets.MONGODB_URI }}" - DB_NAME = "${{ secrets.DB_NAME }}" - COLLECTION_NAME = "${{ secrets.COLLECTION_NAME }}" + POSTGRES_USER = "${{ secrets.POSTGRES_USER }}" + POSTGRES_HOST = "postgres" + POSTGRES_ROOT_DB = "${{ secrets.POSTGRES_ROOT_DB }}" + POSTGRES_PASSWORD = "${{ secrets.POSTGRES_PASSWORD }}" + POSTGRES_PORT = "${{ secrets.POSTGRES_PORT }}" [GENERAL] PORT = 3001 SIMILARITY_MEASURE = "cosine" [HOSTED_MODE] - DEFAULT_CHAT_PROVIDER = "${{ secrets.DEFAULT_CHAT_PROVIDER }}" - DEFAULT_CHAT_MODEL = "${{ secrets.DEFAULT_CHAT_MODEL }}" - DEFAULT_FAST_CHAT_PROVIDER = "${{ secrets.DEFAULT_FAST_CHAT_PROVIDER }}" - DEFAULT_FAST_CHAT_MODEL = "${{ secrets.DEFAULT_FAST_CHAT_MODEL }}" - DEFAULT_EMBEDDING_PROVIDER = "${{ secrets.DEFAULT_EMBEDDING_PROVIDER }}" - DEFAULT_EMBEDDING_MODEL = "${{ secrets.DEFAULT_EMBEDDING_MODEL }}" + DEFAULT_CHAT_PROVIDER = "gemini" + DEFAULT_CHAT_MODEL = "Gemini Flash 2.5" + DEFAULT_FAST_CHAT_PROVIDER = "gemini" + DEFAULT_FAST_CHAT_MODEL = "Gemini Flash 2.5" + DEFAULT_EMBEDDING_PROVIDER = "openai" + DEFAULT_EMBEDDING_MODEL = "Text embedding 3 large" [VERSIONS] - STARKNET_FOUNDRY = "${{ secrets.STARKNET_FOUNDRY_VERSION }}" - SCARB = "${{ secrets.SCARB_VERSION }}" + STARKNET_FOUNDRY = "0.37.0" + SCARB = "2.9.2" + EOL + + - name: Create env file + run: | + mkdir -p packages/agents + cat > packages/agents/.env << 'EOL' + POSTGRES_USER = "${{ secrets.POSTGRES_USER }}" + POSTGRES_HOST = "localhost" + POSTGRES_ROOT_DB = "${{ secrets.POSTGRES_ROOT_DB }}" + POSTGRES_PASSWORD = "${{ secrets.POSTGRES_PASSWORD }}" + POSTGRES_PORT = "${{ secrets.POSTGRES_PORT }}" EOL - name: Generate embeddings diff --git a/.github/workflows/task-build-image.yml b/.github/workflows/task-build-image.yml new file mode 100644 index 00000000..82f3f039 --- /dev/null +++ b/.github/workflows/task-build-image.yml @@ -0,0 +1,64 @@ +name: Create and publish a Docker image + +on: + workflow_dispatch: + workflow_call: + inputs: + registry: + description: Container registry domain + required: true + type: string + image-name: + description: Name for the Docker image + required: true + type: string + image-file: + description: Dockerfile used to build the image + required: true + type: string + +permissions: + contents: read + packages: write + attestations: write + id-token: write + +jobs: + build-and-push-image: + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Extract metadata + id: meta-backend + uses: docker/metadata-action@v5 + with: + images: ${{ inputs.registry }}/${{ inputs.image-name }}/${{ inputs.image-file }} + tags: | + type=ref,event=branch + type=sha,format=short + type=raw,value=latest,enable=${{ github.ref == 'refs/heads/main' }} + + - name: Log in to the Container registry + uses: docker/login-action@v3 + with: + registry: ${{ inputs.registry }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Build and push Docker image + id: push-image + uses: docker/build-push-action@v6 + with: + context: . + push: true + file: ./${{ inputs.image-file }}.dockerfile + tags: ${{ steps.meta-backend.outputs.tags }} + labels: ${{ steps.meta-backend.outputs.labels }} + cache-from: type=gha + cache-to: type=gha,mode=max diff --git a/.github/workflows/test-build.yml b/.github/workflows/test-build.yml new file mode 100644 index 00000000..1fd0b49b --- /dev/null +++ b/.github/workflows/test-build.yml @@ -0,0 +1,78 @@ +name: Create and publish a Docker image + +on: + workflow_dispatch: + pull_request: + branches: [main] + +env: + REGISTRY: ghcr.io + IMAGE_NAME: ${{ github.repository }} + +jobs: + build-and-push-image: + runs-on: ubuntu-latest + + permissions: + contents: read + packages: write + attestations: write + id-token: write + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Extract metadata for backend + id: meta-backend + uses: docker/metadata-action@v5 + with: + images: ${{ env.REGISTRY }}/${{ github.repository }}/backend + tags: | + type=ref,event=branch + type=sha,format=short + type=raw,value=latest,enable=${{ github.ref == 'refs/heads/main' }} + + - name: Extract metadata for ingester + id: meta-ingester + uses: docker/metadata-action@v5 + with: + images: ${{ env.REGISTRY }}/${{ github.repository }}/ingester + tags: | + type=ref,event=branch + type=sha,format=short + type=raw,value=latest,enable=${{ github.ref == 'refs/heads/main' }} + + - name: Log in to the Container registry + uses: docker/login-action@v3 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Build and push Docker image for backend + id: push-backend + uses: docker/build-push-action@v6 + with: + context: . + push: true + file: ./backend.dockerfile + tags: ${{ steps.meta-backend.outputs.tags }} + labels: ${{ steps.meta-backend.outputs.labels }} + cache-from: type=gha + cache-to: type=gha,mode=max + + - name: Build and push Docker image for ingester + id: push-ingester + uses: docker/build-push-action@v6 + with: + context: . + push: true + file: ./ingester.dockerfile + tags: ${{ steps.meta-ingester.outputs.tags }} + labels: ${{ steps.meta-ingester.outputs.labels }} + cache-from: type=gha + cache-to: type=gha,mode=max diff --git a/.github/workflows/trunk-check.yaml b/.github/workflows/trunk-check.yaml new file mode 100644 index 00000000..322ca59a --- /dev/null +++ b/.github/workflows/trunk-check.yaml @@ -0,0 +1,22 @@ +name: Pull Request +on: [pull_request] +concurrency: + group: ${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +permissions: read-all + +jobs: + trunk_check: + name: Trunk Code Quality Runner + runs-on: ubuntu-latest + permissions: + checks: write # For trunk to post annotations + contents: read # For repo checkout + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Trunk Code Quality + uses: trunk-io/trunk-action@v1 diff --git a/.gitignore b/.gitignore index 8273d3d8..586a656b 100644 --- a/.gitignore +++ b/.gitignore @@ -20,7 +20,7 @@ yarn-error.log .env.test.local .env.production.local -packages/ui/.env +packages/agents/.env.test # Config files config.toml @@ -43,3 +43,7 @@ packages/**/node_modules packages/**/dist /data +.secrets +.actrc + +.pnpm-store/ diff --git a/.trunk/.gitignore b/.trunk/.gitignore new file mode 100644 index 00000000..10fcf035 --- /dev/null +++ b/.trunk/.gitignore @@ -0,0 +1,12 @@ +*out +*logs +*actions +*notifications +*tools +plugins +user_trunk.yaml +user.yaml +tmp + +!.trunk/trunk.yaml +!.trunk/configs \ No newline at end of file diff --git a/.trunk/configs/.hadolint.yaml b/.trunk/configs/.hadolint.yaml new file mode 100644 index 00000000..98bf0cd2 --- /dev/null +++ b/.trunk/configs/.hadolint.yaml @@ -0,0 +1,4 @@ +# Following source doesn't work in most setups +ignored: + - SC1090 + - SC1091 diff --git a/.trunk/configs/.markdownlint.yaml b/.trunk/configs/.markdownlint.yaml new file mode 100644 index 00000000..b40ee9d7 --- /dev/null +++ b/.trunk/configs/.markdownlint.yaml @@ -0,0 +1,2 @@ +# Prettier friendly markdownlint config (all formatting rules disabled) +extends: markdownlint/style/prettier diff --git a/.trunk/configs/.yamllint.yaml b/.trunk/configs/.yamllint.yaml new file mode 100644 index 00000000..841de67e --- /dev/null +++ b/.trunk/configs/.yamllint.yaml @@ -0,0 +1,5 @@ +rules: + quoted-strings: disable + key-duplicates: {} + octal-values: + forbid-implicit-octal: true diff --git a/.trunk/configs/svgo.config.mjs b/.trunk/configs/svgo.config.mjs new file mode 100644 index 00000000..b86ef082 --- /dev/null +++ b/.trunk/configs/svgo.config.mjs @@ -0,0 +1,14 @@ +export default { + plugins: [ + { + name: 'preset-default', + params: { + overrides: { + removeViewBox: false, // https://github.com/svg/svgo/issues/1128 + sortAttrs: true, + removeOffCanvasPaths: true, + }, + }, + }, + ], +}; diff --git a/.trunk/trunk.yaml b/.trunk/trunk.yaml new file mode 100644 index 00000000..a9c87935 --- /dev/null +++ b/.trunk/trunk.yaml @@ -0,0 +1,40 @@ +# This file controls the behavior of Trunk: https://docs.trunk.io/cli +# To learn more about the format of this file, see https://docs.trunk.io/reference/trunk-yaml +version: 0.1 +cli: + version: 1.22.10 +# Trunk provides extensibility via plugins. (https://docs.trunk.io/plugins) +plugins: + sources: + - id: trunk + ref: v1.6.7 + uri: https://github.com/trunk-io/plugins +# Many linters and tools depend on runtimes - configure them here. (https://docs.trunk.io/runtimes) +runtimes: + enabled: + - node@18.20.5 + - python@3.10.8 +# This is the section where you manage your linters. (https://docs.trunk.io/check/configuration) +lint: + ignore: + - linters: [ALL] + paths: + - '*.dockerfile' + enabled: + - checkov@3.2.370 + - git-diff-check + - hadolint@2.12.1-beta + - markdownlint@0.44.0 + - osv-scanner@1.9.2 + - oxipng@9.1.3 + - prettier@3.5.1 + - svgo@3.3.2 + - taplo@0.9.3 + - trufflehog@3.88.8 + - yamllint@1.35.1 +actions: + enabled: + - trunk-announce + - trunk-check-pre-push + - trunk-fmt-pre-commit + - trunk-upgrade-available diff --git a/backend.dockerfile b/backend.dockerfile index 1f90a220..adc71b0f 100644 --- a/backend.dockerfile +++ b/backend.dockerfile @@ -2,6 +2,9 @@ FROM node:23.7-bullseye-slim WORKDIR /app +# Installation de ping pour tester la connectivité réseau +RUN apt-get update && apt-get install -y iputils-ping && rm -rf /var/lib/apt/lists/* + # Copy root workspace files COPY pnpm-workspace.yaml ./ COPY package.json ./ diff --git a/docker-compose.dev.yml b/docker-compose.dev.yml new file mode 100644 index 00000000..d699750d --- /dev/null +++ b/docker-compose.dev.yml @@ -0,0 +1,53 @@ +services: + cairo-coder-postgres: + image: pgvector/pgvector:pg17 + container_name: 'cairo-coder-postgres' + shm_size: 1g + env_file: + - .env + expose: + - 5432 + volumes: + - postgres_data:/var/lib/postgresql/data + restart: unless-stopped + networks: + - cairo_coder + healthcheck: + test: ['CMD-SHELL', 'pg_isready -U yo -d cairo_coder_db'] + interval: 5s + timeout: 5s + retries: 5 + start_period: 10s + + cairo-coder-backend: + container_name: 'cairo-coder-backend' + build: + context: . + dockerfile: backend.dockerfile + ports: + - 3001:3001 + env_file: + - packages/backend/.env + depends_on: + cairo-coder-postgres: + condition: service_healthy + restart: unless-stopped + networks: + - cairo_coder + + cairo-coder-ingester: + build: + context: . + dockerfile: ingester.dockerfile + profiles: ['ingester'] + depends_on: + cairo-coder-postgres: + condition: service_healthy + networks: + - cairo_coder + +networks: + cairo_coder: + +volumes: + postgres_data: diff --git a/docker-compose.prod.yml b/docker-compose.prod.yml new file mode 100644 index 00000000..4cce8797 --- /dev/null +++ b/docker-compose.prod.yml @@ -0,0 +1,66 @@ +services: + cairo-coder-postgres: + image: pgvector/pgvector:pg17 + container_name: cairo-coder-postgres + shm_size: 1g + env_file: + - ./config/.env + volumes: + - postgres_data:/var/lib/postgresql/data + restart: unless-stopped + networks: + - cairo_coder + healthcheck: + test: ['CMD-SHELL', 'pg_isready -U yo -d cairo_coder_db'] + interval: 5s + timeout: 5s + retries: 5 + start_period: 10s + + cairo-coder-backend: + image: ghcr.io/kasarlabs/cairo-coder/backend:sha-9822262 + container_name: cairo-coder-backend + depends_on: + cairo-coder-postgres: + condition: service_healthy + volumes: + - ./config/config.toml:/app/packages/agents/config.toml + restart: unless-stopped + networks: + - cairo_coder + - services + deploy: + resources: + limits: + memory: 4G + labels: + - 'traefik.enable=true' + - 'traefik.docker.network=services' + - 'traefik.http.routers.cairo-coder-backend.rule=Host(`cairo-coder.kasar.io`)' + - 'traefik.http.routers.cairo-coder-backend.entrypoints=websecure' + - 'traefik.http.routers.cairo-coder-backend.tls.certresolver=letsencrypt' + - 'traefik.http.services.cairo-coder-backend.loadbalancer.server.port=3001' + + cairo-coder-ingester: + image: ghcr.io/kasarlabs/cairo-coder/ingester:sha-9822262 + container_name: cairo-coder-ingester + profiles: ['ingester'] + volumes: + - ./config/config.toml:/app/packages/agents/config.toml + depends_on: + cairo-coder-postgres: + condition: service_healthy + cairo-coder-backend: + condition: service_started + restart: 'no' + networks: + - cairo_coder + +networks: + cairo_coder: + driver: bridge + services: + external: true + +volumes: + postgres_data: diff --git a/docker-compose.yml b/docker-compose.yml deleted file mode 100644 index 03ac6b06..00000000 --- a/docker-compose.yml +++ /dev/null @@ -1,51 +0,0 @@ -version: '3.8' - -services: - postgres: - image: pgvector/pgvector:pg17 - container_name: "postgresql" - shm_size: 1g - env_file: - - .env - ports: - - 5432:5432 - volumes: - - ./data:/var/lib/postgresql/data - restart: unless-stopped - networks: - - cairo_coder - - backend: - build: - context: . - dockerfile: backend.dockerfile - ports: - - 3001:3001 - extra_hosts: - - host.docker.internal:host-gateway - env_file: - - packages/backend/.env - depends_on: - postgres: - condition: service_started - restart: unless-stopped - networks: - - cairo_coder - - ingester: - build: - context: . - dockerfile: ingest.dockerfile - profiles: ["ingester"] - depends_on: - postgres: - condition: service_started - networks: - - cairo_coder - -networks: - cairo_coder: - -volumes: - postgres_data: - diff --git a/ingest.dockerfile b/ingester.dockerfile similarity index 93% rename from ingest.dockerfile rename to ingester.dockerfile index d90b2dee..6e47c202 100644 --- a/ingest.dockerfile +++ b/ingester.dockerfile @@ -21,6 +21,8 @@ RUN pnpm install --frozen-lockfile RUN npm install -g turbo # Install Antora +RUN apt update && apt install -y ca-certificates openssl + RUN npm install -g @antora/cli @antora/site-generator # Install mdbook diff --git a/package.json b/package.json index 149463c4..43ed52d7 100644 --- a/package.json +++ b/package.json @@ -7,6 +7,10 @@ "dev": "turbo run dev", "lint": "turbo run lint", "start": "turbo run start", + "test": "turbo run test", + "doc-quality": "turbo run doc-quality", + "test:unit": "turbo run test:unit", + "test:code-quality": "turbo run test:code-quality", "generate-embeddings": "turbo run generate-embeddings", "generate-embeddings:yes": "turbo run generate-embeddings:yes", "clean": "find packages -type d -name 'dist' -exec rm -rf {} +; find packages -type d -name '.turbo' -exec rm -rf {} +", diff --git a/packages/agents/__tests__/code-quality/snak.test.ts b/packages/agents/__tests__/code-quality/snak.test.ts new file mode 100644 index 00000000..d1440886 --- /dev/null +++ b/packages/agents/__tests__/code-quality/snak.test.ts @@ -0,0 +1,317 @@ +import request from 'supertest'; +require('dotenv').config({ path: '.env.test' }); + +if (!process.env.API_KEY) { + throw new Error('API_KEY not found in .env.test file'); +} +if (!process.env.API_URL) { + throw new Error('API_URL not found in .env.test file'); +} + +const API_KEY = process.env.API_KEY; +const API_URL = process.env.API_URL; + +// Agent est défini au niveau global pour être utilisé dans beforeAll et dans les tests +const agent = request(API_URL); + +// Le beforeAll est placé au niveau global, en dehors du describe +beforeAll(async () => { + console.log('Setting up test environment - Installing Scarb...'); + + try { + const installResponse = await agent + .post('/api/key/request') + .set('Content-Type', 'application/json') + .set('x-api-key', API_KEY) + .send({ + request: 'Can you install scarb?', + }); + + console.log('Scarb Installation Status:', installResponse.status); + console.log( + 'Scarb Installation Response:', + installResponse.body.output + ? JSON.stringify(installResponse.body.output[0], null, 2) + : 'No output', + ); + + const isSuccess = + installResponse.status === 201 && + installResponse.body.output && + installResponse.body.output[0].status === 'success'; + + if (!isSuccess) { + console.error( + '⚠️ Warning: Scarb installation failed. : ', + installResponse.body.output[0].text, + ); + } else { + console.log('✅ Scarb installation successful'); + } + + // Attendre que l'installation soit traitée + await new Promise((resolve) => setTimeout(resolve, 5000)); + } catch (error) { + console.error('❌ Error during Scarb installation:', error); + console.warn('⚠️ Tests may fail if Scarb is not properly installed'); + } +}, 60000); // Timeout de 60 secondes pour l'installation + +describe('Code Generation and Compilation Tests', () => { + async function generateAndCompile( + project_name: string, + prompt_content: string, + index: number, + ): Promise<{ success: boolean; error?: string }> { + console.log(`\n=== Test #${index}: ${project_name} ===`); + console.log(`Generating code for: ${prompt_content}`); + + try { + const generation_prompt = `Test #${index}: Generate Cairo code for ${prompt_content} + + 1. First, register a new project named "${project_name}" using the cairocoder_register_project tool + 2. Then, generate the Cairo code using the cairocoder_generate_code tool + + If generation is successful: + - Return the generated Cairo code with syntax highlighting + + If generation fails: + - Return only the error message from the tool + - Do not try to fix or retry the generation + + Do not perform any additional actions.`; + const generateResponse = await agent + .post('/api/key/request') + .set('Content-Type', 'application/json') + .set('x-api-key', API_KEY) + .send({ + request: generation_prompt, + }); + + console.log('CODE GENERATION STATUS:', generateResponse.status); + + if (generateResponse.status !== 201) { + return { + success: false, + error: `Generation HTTP request failed with status ${generateResponse.status}: ${JSON.stringify(generateResponse.body)}`, + }; + } + + console.log( + 'CODE GENERATION RESPONSE:', + JSON.stringify(generateResponse.body.output[0], null, 2), + ); + const sucessfulGeneration = generateResponse.body.output[0].text + .toLowerCase() + .includes('```cairo'); + + if ( + generateResponse.body.output[0].status !== 'success' || + !sucessfulGeneration + ) { + return { + success: false, + error: `Generation failed: ${JSON.stringify(generateResponse.body.output[0].text)}`, + }; + } + + console.log('✅ Code generated successfully'); + + const compilation_prompt = `Test #${index}: Compile the project "${project_name}" using the scarb_compile_contract tool. + + After compilation, report whether it succeeded or failed. + + For successful compilation: Report "Compilation successful" and include any relevant output. + For failed compilation: Report "Compilation failed" and include the specific error messages. + + Only use the compilation tool and no other tools. + If another tool is used, instead or additionally to the compilation tool, report it as a failure.`; + + const compileResponse = await agent + .post('/api/key/request') + .set('Content-Type', 'application/json') + .set('x-api-key', API_KEY) + .send({ + request: compilation_prompt, + }); + + console.log('COMPILATION STATUS:', compileResponse.status); + + if (compileResponse.status !== 201) { + return { + success: false, + error: `Compilation HTTP request failed with status ${compileResponse.status}: ${JSON.stringify(compileResponse.body)}`, + }; + } + + console.log( + 'COMPILATION RESPONSE:', + JSON.stringify(compileResponse.body.output[0], null, 2), + ); + + const sucessfulCompilation = + compileResponse.body.output[0].text + .toLowerCase() + .includes('compilation') && + !compileResponse.body.output[0].text + .toLowerCase() + .includes('failure') && + !compileResponse.body.output[0].text.toLowerCase().includes('failed') && + !compileResponse.body.output[0].text.toLowerCase().includes('error'); + + if ( + compileResponse.body.output[0].status !== 'success' || + !sucessfulCompilation + ) { + return { + success: false, + error: `Compilation failed: ${JSON.stringify(compileResponse.body.output[0].text)}`, + }; + } + + console.log('✅ Compilation successful'); + await new Promise((resolve) => setTimeout(resolve, 5000)); + + return { success: true }; + } catch (error) { + console.error(`❌ Unexpected error in Test #${index}:`, error); + return { + success: false, + error: `Unexpected error: ${error.message}`, + }; + } + } + + describe('Cairo Functions and Basic Algorithms', () => { + test('Hello World test', async () => { + const project_name = 'hello_world'; + const prompt_content = 'a cairo function that returns "Hello World"'; + const result = await generateAndCompile(project_name, prompt_content, 0); + + if (!result.success) { + console.error(`❌ TEST FAILED: ${result.error}`); + } + + expect(result.success).toBe(true); + }, 100000); + + test('Fibonacci function', async () => { + const project_name = 'fibonacci'; + const prompt_content = + 'a Cairo function that calculates the Fibonacci sequence'; + const result = await generateAndCompile(project_name, prompt_content, 1); + + if (!result.success) { + console.error(`❌ TEST FAILED: ${result.error}`); + } + + expect(result.success).toBe(true); + }, 100000); + + test('Max value in array', async () => { + const project_name = 'max_value'; + const prompt_content = + 'a Cairo function that finds the maximum value in an array'; + const result = await generateAndCompile(project_name, prompt_content, 2); + + if (!result.success) { + console.error(`❌ TEST FAILED: ${result.error}`); + } + + expect(result.success).toBe(true); + }, 100000); + + test('Simple sorting algorithm', async () => { + const project_name = 'sorting'; + const prompt_content = 'a sorting algorithm'; + const result = await generateAndCompile(project_name, prompt_content, 3); + + if (!result.success) { + console.error(`❌ TEST FAILED: ${result.error}`); + } + + expect(result.success).toBe(true); + }, 100000); + }); + + describe('Simple Starknet Contracts', () => { + test('Basic contract with storage', async () => { + const project_name = 'basic_contract'; + const prompt_content = + 'a basic Starknet contract with a storage variable and getter/setter functions'; + const result = await generateAndCompile(project_name, prompt_content, 4); + + if (!result.success) { + console.error(`❌ TEST FAILED: ${result.error}`); + } + + expect(result.success).toBe(true); + }, 100000); + + test('Counter contract', async () => { + const project_name = 'counter'; + const prompt_content = + 'a Starknet contract that maintains a counter with increment and decrement functions'; + const result = await generateAndCompile(project_name, prompt_content, 5); + + if (!result.success) { + console.error(`❌ TEST FAILED: ${result.error}`); + } + + expect(result.success).toBe(true); + }, 100000); + + test('Simple voting system', async () => { + const project_name = 'voting'; + const prompt_content = + 'a Starknet contract for a simple voting system where users can vote only once'; + const result = await generateAndCompile(project_name, prompt_content, 6); + + if (!result.success) { + console.error(`❌ TEST FAILED: ${result.error}`); + } + + expect(result.success).toBe(true); + }, 100000); + }); + + describe('Standard and Complex Contracts', () => { + test('ERC-20 token contract', async () => { + const project_name = 'erc20'; + const prompt_content = 'a minimal Starknet ERC-20 token contract'; + const result = await generateAndCompile(project_name, prompt_content, 7); + + if (!result.success) { + console.error(`❌ TEST FAILED: ${result.error}`); + } + + expect(result.success).toBe(true); + }, 100000); + + test('ERC-721 NFT contract', async () => { + const project_name = 'erc721'; + const prompt_content = + 'a Starknet ERC-721 NFT contract with minting functionality'; + const result = await generateAndCompile(project_name, prompt_content, 8); + + if (!result.success) { + console.error(`❌ TEST FAILED: ${result.error}`); + } + + expect(result.success).toBe(true); + }, 100000); + + test('Multisig wallet contract', async () => { + const project_name = 'multisig'; + const prompt_content = + 'a Starknet multisig wallet contract that requires multiple approvals for transactions'; + const result = await generateAndCompile(project_name, prompt_content, 9); + + if (!result.success) { + console.error(`❌ TEST FAILED: ${result.error}`); + } + + expect(result.success).toBe(true); + }, 100000); + }); +}); diff --git a/packages/agents/__tests__/answerGenerator.test.ts b/packages/agents/__tests__/unit/answerGenerator.test.ts similarity index 97% rename from packages/agents/__tests__/answerGenerator.test.ts rename to packages/agents/__tests__/unit/answerGenerator.test.ts index 8a758475..6db476be 100644 --- a/packages/agents/__tests__/answerGenerator.test.ts +++ b/packages/agents/__tests__/unit/answerGenerator.test.ts @@ -1,4 +1,4 @@ -import { AnswerGenerator } from '../src/core/pipeline/answerGenerator'; +import { AnswerGenerator } from '../../src/core/pipeline/answerGenerator'; import { BaseChatModel } from '@langchain/core/language_models/chat_models'; import { RagInput, @@ -6,23 +6,24 @@ import { RetrievedDocuments, RagSearchConfig, DocumentSource, -} from '../src/types/index'; +} from '../../src/types/index'; import { Document } from '@langchain/core/documents'; import { mockDeep, MockProxy } from 'jest-mock-extended'; import { IterableReadableStream } from '@langchain/core/utils/stream'; import { BaseMessage, BaseMessageChunk } from '@langchain/core/messages'; import { BaseLanguageModelInput } from '@langchain/core/language_models/base'; - // Mock the formatChatHistoryAsString utility -jest.mock('../src/utils/index', () => ({ +jest.mock('../../src/utils/index', () => ({ __esModule: true, - formatChatHistoryAsString: jest.fn().mockImplementation(() => 'mocked chat history'), + formatChatHistoryAsString: jest + .fn() + .mockImplementation(() => 'mocked chat history'), logger: { info: jest.fn(), debug: jest.fn(), error: jest.fn(), - } + }, })); // No need to separately mock the logger since it's now mocked as part of utils/index diff --git a/packages/agents/__tests__/documentRetriever.test.ts b/packages/agents/__tests__/unit/documentRetriever.test.ts similarity index 96% rename from packages/agents/__tests__/documentRetriever.test.ts rename to packages/agents/__tests__/unit/documentRetriever.test.ts index e7af11e9..74007efb 100644 --- a/packages/agents/__tests__/documentRetriever.test.ts +++ b/packages/agents/__tests__/unit/documentRetriever.test.ts @@ -1,22 +1,22 @@ -import { DocumentRetriever } from '../src/core/pipeline/documentRetriever'; +import { DocumentRetriever } from '../../src/core/pipeline/documentRetriever'; import { Embeddings } from '@langchain/core/embeddings'; import { DocumentSource, ProcessedQuery, RagSearchConfig, -} from '../src/types/index'; +} from '../../src/types/index'; import { Document } from '@langchain/core/documents'; import { mockDeep, MockProxy } from 'jest-mock-extended'; // Mock all utils including computeSimilarity and logger -jest.mock('../src/utils/index', () => ({ +jest.mock('../../src/utils/index', () => ({ __esModule: true, computeSimilarity: jest.fn().mockImplementation(() => 0.75), // Default high similarity logger: { info: jest.fn(), debug: jest.fn(), error: jest.fn(), - } + }, })); describe('DocumentRetriever', () => { @@ -158,7 +158,7 @@ describe('DocumentRetriever', () => { // Import the real computeSimilarity function to control scores const computeSimilarityMock = jest.requireMock( - '../src/utils/index', + '../../src/utils/index', ).computeSimilarity; // Set up different similarity scores for different documents diff --git a/packages/agents/__tests__/queryProcessor.test.ts b/packages/agents/__tests__/unit/queryProcessor.test.ts similarity index 94% rename from packages/agents/__tests__/queryProcessor.test.ts rename to packages/agents/__tests__/unit/queryProcessor.test.ts index 6c007041..9eec2c1a 100644 --- a/packages/agents/__tests__/queryProcessor.test.ts +++ b/packages/agents/__tests__/unit/queryProcessor.test.ts @@ -1,22 +1,24 @@ -import { QueryProcessor } from '../src/core/pipeline/queryProcessor'; +import { QueryProcessor } from '../../src/core/pipeline/queryProcessor'; import { BaseChatModel } from '@langchain/core/language_models/chat_models'; import { RagInput, RagSearchConfig, DocumentSource, -} from '../src/types/index'; +} from '../../src/types/index'; import { mockDeep, MockProxy } from 'jest-mock-extended'; import { AIMessage } from '@langchain/core/messages'; // Mock the logger -jest.mock('../src/utils/index', () => ({ +jest.mock('../../src/utils/index', () => ({ logger: { info: jest.fn(), debug: jest.fn(), error: jest.fn(), }, - formatChatHistoryAsString: jest.fn((history) => - history.map((message) => `${message._getType()}: ${message.content}`).join('\n') + formatChatHistoryAsString: jest.fn((history) => + history + .map((message) => `${message._getType()}: ${message.content}`) + .join('\n'), ), parseXMLContent: jest.fn((xml, tag) => { const regex = new RegExp(`<${tag}>(.*?)`, 'gs'); diff --git a/packages/agents/__tests__/ragAgentFactory.test.ts b/packages/agents/__tests__/unit/ragAgentFactory.test.ts similarity index 89% rename from packages/agents/__tests__/ragAgentFactory.test.ts rename to packages/agents/__tests__/unit/ragAgentFactory.test.ts index 60725fa8..60f3a554 100644 --- a/packages/agents/__tests__/ragAgentFactory.test.ts +++ b/packages/agents/__tests__/unit/ragAgentFactory.test.ts @@ -1,15 +1,15 @@ -import { RagAgentFactory } from '../src/core/agentFactory'; -import { RagPipeline } from '../src/core/pipeline/ragPipeline'; -import { AvailableAgents, LLMConfig, DocumentSource } from '../src/types'; +import { RagAgentFactory } from '../../src/core/agentFactory'; +import { RagPipeline } from '../../src/core/pipeline/ragPipeline'; +import { AvailableAgents, LLMConfig, DocumentSource } from '../../src/types'; import { Embeddings } from '@langchain/core/embeddings'; import { BaseChatModel } from '@langchain/core/language_models/chat_models'; -import { VectorStore } from '../src/db/vectorStore'; +import { VectorStore } from '../../src/db/postgresVectorStore'; import { mockDeep, MockProxy } from 'jest-mock-extended'; import { BaseMessage } from '@langchain/core/messages'; import EventEmitter from 'events'; // Mock the agent configuration and RagPipeline -jest.mock('../src/config/agent', () => ({ +jest.mock('../../src/config/agent', () => ({ getAgentConfig: jest.fn().mockImplementation(() => ({ name: 'Cairo Coder', prompts: { @@ -27,7 +27,7 @@ jest.mock('../src/config/agent', () => ({ })), })); -jest.mock('../src/core/pipeline/ragPipeline', () => ({ +jest.mock('../../src/core/pipeline/ragPipeline', () => ({ RagPipeline: jest.fn().mockImplementation(() => ({ execute: jest.fn().mockReturnValue(new EventEmitter()), })), @@ -103,7 +103,7 @@ describe('RagAgentFactory', () => { // Assert expect(RagPipeline).toHaveBeenCalledTimes(1); expect(emitter).toBeInstanceOf(EventEmitter); - + // Check streaming option is passed const executeSpy = (RagPipeline as jest.Mock).mock.results[0].value .execute; diff --git a/packages/agents/__tests__/ragPipeline.test.ts b/packages/agents/__tests__/unit/ragPipeline.test.ts similarity index 90% rename from packages/agents/__tests__/ragPipeline.test.ts rename to packages/agents/__tests__/unit/ragPipeline.test.ts index 20e23b1f..35a0b012 100644 --- a/packages/agents/__tests__/ragPipeline.test.ts +++ b/packages/agents/__tests__/unit/ragPipeline.test.ts @@ -1,7 +1,7 @@ -import { RagPipeline } from '../src/core/pipeline/ragPipeline'; -import { QueryProcessor } from '../src/core/pipeline/queryProcessor'; -import { DocumentRetriever } from '../src/core/pipeline/documentRetriever'; -import { AnswerGenerator } from '../src/core/pipeline/answerGenerator'; +import { RagPipeline } from '../../src/core/pipeline/ragPipeline'; +import { QueryProcessor } from '../../src/core/pipeline/queryProcessor'; +import { DocumentRetriever } from '../../src/core/pipeline/documentRetriever'; +import { AnswerGenerator } from '../../src/core/pipeline/answerGenerator'; import { Embeddings } from '@langchain/core/embeddings'; import { BookChunk, @@ -9,7 +9,7 @@ import { RagInput, RagSearchConfig, RetrievedDocuments, -} from '../src/types/index'; +} from '../../src/types/index'; import { Document } from '@langchain/core/documents'; import { BaseChatModel } from '@langchain/core/language_models/chat_models'; import { IterableReadableStream } from '@langchain/core/utils/stream'; @@ -18,17 +18,17 @@ import { mockDeep, MockProxy } from 'jest-mock-extended'; import EventEmitter from 'events'; // Mock the dependencies at the module level -jest.mock('../src/core/pipeline/queryProcessor'); -jest.mock('../src/core/pipeline/documentRetriever'); -jest.mock('../src/core/pipeline/answerGenerator'); +jest.mock('../../src/core/pipeline/queryProcessor'); +jest.mock('../../src/core/pipeline/documentRetriever'); +jest.mock('../../src/core/pipeline/answerGenerator'); // Mock the utils including logger -jest.mock('../src/utils/index', () => ({ +jest.mock('../../src/utils/index', () => ({ logger: { info: jest.fn(), debug: jest.fn(), error: jest.fn(), - } + }, })); describe('RagPipeline', () => { diff --git a/packages/agents/package.json b/packages/agents/package.json index 156ec76f..db1fd45e 100644 --- a/packages/agents/package.json +++ b/packages/agents/package.json @@ -5,7 +5,9 @@ "scripts": { "build": "tsc -p tsconfig.json", "test": "jest", - "test-doc-quality": "ts-node src/testDocQuality.ts" + "doc-quality": "ts-node src/doc-quality/testDocQuality.ts", + "test:unit": "jest --config jest.config.js --testMatch=\"**/__tests__/unit/**/*.test.[jt]s?(x)\"", + "test:code-quality": "jest --config jest.config.js --testMatch=\"**/__tests__/code-quality/**/*.test.[jt]s?(x)\"" }, "dependencies": { "@iarna/toml": "^2.2.5", diff --git a/packages/agents/src/core/pipeline/answerGenerator.ts b/packages/agents/src/core/pipeline/answerGenerator.ts index 586f6188..6effd036 100644 --- a/packages/agents/src/core/pipeline/answerGenerator.ts +++ b/packages/agents/src/core/pipeline/answerGenerator.ts @@ -28,8 +28,11 @@ export class AnswerGenerator { logger.debug('Final Prompt:' + prompt); // Use stream instead of invoke, and pipe through StringOutputParser + logger.debug('Before streaming response'); + const startTime = Date.now(); const stream = await this.llm.stream(prompt); logger.debug('Started streaming response'); + logger.debug(`Time to stream: ${Date.now() - startTime}ms`); return stream; } diff --git a/packages/agents/src/core/pipeline/ragPipeline.ts b/packages/agents/src/core/pipeline/ragPipeline.ts index 6ef6316d..4b4172a2 100644 --- a/packages/agents/src/core/pipeline/ragPipeline.ts +++ b/packages/agents/src/core/pipeline/ragPipeline.ts @@ -1,5 +1,10 @@ import { Embeddings } from '@langchain/core/embeddings'; -import { RagInput, StreamHandler, RagSearchConfig, LLMConfig } from '../../types'; +import { + RagInput, + StreamHandler, + RagSearchConfig, + LLMConfig, +} from '../../types'; import { QueryProcessor } from './queryProcessor'; import { DocumentRetriever } from './documentRetriever'; import { AnswerGenerator } from './answerGenerator'; @@ -61,10 +66,13 @@ export class RagPipeline { // Step 3: Generate the answer as a stream const stream = await this.answerGenerator.generate(input, retrieved); + const startTime = Date.now(); for await (const chunk of stream) { handler.emitResponse(chunk); + logger.debug(`Time to get chunk: ${Date.now() - startTime}ms`); } logger.debug('Stream ended'); + logger.debug(`Total time: ${Date.now() - startTime}ms`); handler.emitEnd(); } catch (error) { logger.error('Pipeline error:', error); diff --git a/packages/agents/src/tests/samples/all_quality.json b/packages/agents/src/doc-quality/samples/all_quality.json similarity index 100% rename from packages/agents/src/tests/samples/all_quality.json rename to packages/agents/src/doc-quality/samples/all_quality.json diff --git a/packages/agents/src/tests/samples/cairo_book_quality.json b/packages/agents/src/doc-quality/samples/cairo_book_quality.json similarity index 100% rename from packages/agents/src/tests/samples/cairo_book_quality.json rename to packages/agents/src/doc-quality/samples/cairo_book_quality.json diff --git a/packages/agents/src/tests/samples/openzeppelin_quality.json b/packages/agents/src/doc-quality/samples/openzeppelin_quality.json similarity index 100% rename from packages/agents/src/tests/samples/openzeppelin_quality.json rename to packages/agents/src/doc-quality/samples/openzeppelin_quality.json diff --git a/packages/agents/src/tests/samples/starknet_docs_quality.json b/packages/agents/src/doc-quality/samples/starknet_docs_quality.json similarity index 100% rename from packages/agents/src/tests/samples/starknet_docs_quality.json rename to packages/agents/src/doc-quality/samples/starknet_docs_quality.json diff --git a/packages/agents/src/doc-quality/testDocQuality.ts b/packages/agents/src/doc-quality/testDocQuality.ts new file mode 100644 index 00000000..2048f3b6 --- /dev/null +++ b/packages/agents/src/doc-quality/testDocQuality.ts @@ -0,0 +1,293 @@ +import fs from 'fs'; +import path from 'path'; +import { Command } from 'commander'; +import { BaseChatModel } from '@langchain/core/language_models/chat_models'; +import { OpenAIEmbeddings } from '@langchain/openai'; +import { DocumentSource, RagSearchConfig } from '../types'; +import { DocQualityTester } from '../core/pipeline/docQualityTester'; +import { getAgentConfig } from '../config/agent'; +import { logger } from '../utils'; +import { ChatGoogleGenerativeAI } from '@langchain/google-genai'; +import { VectorStore } from '../db/postgresVectorStore'; +import { + getGeminiApiKey, + getOpenaiApiKey, + getVectorDbConfig, +} from '../config/settings'; +import { Embeddings } from '@langchain/core/embeddings'; +import { LLMConfig } from '../types'; + +const program = new Command(); + +// Initialize the program +program + .name('test-doc-quality') + .description('Test documentation quality using the Starknet Agent') + .version('1.0.0'); + +program + .command('test') + .description('Run documentation quality tests') + .requiredOption( + '-s, --source ', + 'Documentation source to test (e.g., starknet_docs)', + ) + .requiredOption('-t, --test-file ', 'Path to test file (JSON)') + .option('-o, --output ', 'Path to output file (JSON)') + .option( + '-m, --model ', + 'LLM model to use for testing', + 'Claude 3.5 Sonnet', + ) + .option( + '-e, --eval-model ', + 'LLM model to use for evaluation (defaults to same as model)', + ) + .option( + '--no-detailed-output', + 'Disable detailed test output with PASS/FAIL status', + ) + .option( + '--thresholds ', + 'Custom thresholds for determining pass/fail status (JSON string)', + ) + .action(async (options) => { + try { + // Validate source + const source = options.source as DocumentSource; + const focus = source as string; + + // Load test file + const testFilePath = path.resolve(process.cwd(), options.testFile); + if (!fs.existsSync(testFilePath)) { + logger.error(`Test file not found: ${testFilePath}`); + process.exit(1); + } + + const testFileContent = fs.readFileSync(testFilePath, 'utf-8'); + const testSet = JSON.parse(testFileContent); + + const geminiApiKey = getGeminiApiKey(); + const openaiApiKey = getOpenaiApiKey(); + + // Initialize models and embeddings + const defaultLLM = new ChatGoogleGenerativeAI({ + temperature: 0.7, + apiKey: geminiApiKey, + modelName: 'gemini-2.0-flash', + }); + + const llmConfig: LLMConfig = { + defaultLLM: defaultLLM as unknown as BaseChatModel, + fastLLM: defaultLLM as unknown as BaseChatModel, + }; + + const embeddings = new OpenAIEmbeddings({ + openAIApiKey: openaiApiKey, + modelName: 'text-embedding-3-large', + dimensions: 1536, + }) as unknown as Embeddings; + + // Initialize vector store + const dbConfig = getVectorDbConfig(); + const vectorStore = await VectorStore.getInstance(dbConfig, embeddings); + + // Get agent configuration + const agentConfig = getAgentConfig(vectorStore); + if (!agentConfig) { + logger.error(`Agent configuration not found for source: ${source}`); + process.exit(1); + } + + // Create RAG config + const ragConfig: RagSearchConfig = { + ...agentConfig, + vectorStore, + sources: agentConfig.sources, + }; + + // Initialize DocQualityTester + const tester = new DocQualityTester(llmConfig, embeddings, ragConfig); + + // Parse thresholds if provided + let thresholds = undefined; + if (options.thresholds) { + try { + thresholds = JSON.parse(options.thresholds); + } catch (e) { + logger.error(`Error parsing thresholds JSON: ${e}`); + process.exit(1); + } + } + + // Run tests with detailed output options + logger.info(`Starting documentation quality tests for focus ${focus}`); + const results = await tester.testDocQuality(testSet, focus, { + showDetailedOutput: options.detailedOutput, + thresholds, + }); + + // Generate report + const report = await tester.generateReport(results); + + // Output results + if (options.output) { + const outputPath = path.resolve(process.cwd(), options.output); + fs.writeFileSync(outputPath, JSON.stringify(report, null, 2)); + logger.info(`Report saved to ${outputPath}`); + } else { + console.log('\nDocumentation Quality Report'); + console.log('===========================\n'); + console.log(`Focus: ${report.results.focus}`); + console.log(`Version: ${report.results.version}`); + console.log(`Test Cases: ${report.results.caseResults.length}`); + console.log('\nSummary:'); + console.log(report.summary); + + console.log('\nKey Metrics:'); + console.log( + `- Relevance Score: ${report.results.metrics.overall.percentAnswered.toFixed(2)}`, + ); + console.log( + `- Coverage Score: ${report.results.metrics.overall.avgClarityScore.toFixed(2)}`, + ); + console.log( + `- Answer Completeness: ${report.results.metrics.overall.avgSourceAlignment.toFixed(2)}`, + ); + + console.log('\nTop Recommendations:'); + const highPriorityRecs = report.recommendations.filter( + (r) => r.priority === 'high', + ); + highPriorityRecs.forEach((rec, i) => { + console.log(`${i + 1}. ${rec.description}`); + }); + + console.log( + '\nFor full report, use the --output option to save to file.', + ); + } + } catch (error) { + logger.error('Error running documentation quality tests:', error); + process.exit(1); + } + }); + +program + .command('compare') + .description('Compare documentation quality between versions') + .requiredOption('-s, --source ', 'Documentation source to test') + .requiredOption( + '-b, --baseline ', + 'Path to baseline results file (JSON)', + ) + .requiredOption('-c, --current ', 'Path to current results file (JSON)') + .option('-o, --output ', 'Path to output file (JSON)') + .option( + '-m, --model ', + 'LLM model to use for comparison', + 'Claude 3.5 Sonnet', + ) + .action(async (options) => { + try { + // Validate source + const focus = options.source as DocumentSource; + + // Load result files + const baselinePath = path.resolve(process.cwd(), options.baseline); + const currentPath = path.resolve(process.cwd(), options.current); + + if (!fs.existsSync(baselinePath)) { + logger.error(`Baseline file not found: ${baselinePath}`); + process.exit(1); + } + + if (!fs.existsSync(currentPath)) { + logger.error(`Current file not found: ${currentPath}`); + process.exit(1); + } + + const baselineContent = fs.readFileSync(baselinePath, 'utf-8'); + const currentContent = fs.readFileSync(currentPath, 'utf-8'); + + const baseline = JSON.parse(baselineContent); + const current = JSON.parse(currentContent); + + const geminiApiKey = getGeminiApiKey(); + const openaiApiKey = getOpenaiApiKey(); + + // Initialize models and embeddings + const defaultLLM = new ChatGoogleGenerativeAI({ + temperature: 0.7, + apiKey: geminiApiKey, + modelName: 'gemini-2.0-flash', + }); + + const llmConfig: LLMConfig = { + defaultLLM: defaultLLM as unknown as BaseChatModel, + fastLLM: defaultLLM as unknown as BaseChatModel, + }; + + const embeddings = new OpenAIEmbeddings({ + openAIApiKey: openaiApiKey, + modelName: 'text-embedding-3-large', + dimensions: 1536, + }) as unknown as Embeddings; + + // Initialize vector store + const dbConfig = getVectorDbConfig(); + const vectorStore = await VectorStore.getInstance(dbConfig, embeddings); + + // Get agent configuration + const agentConfig = getAgentConfig(vectorStore); + if (!agentConfig) { + logger.error(`Agent configuration not found for source: ${focus}`); + process.exit(1); + } + + // Create RAG config + const ragConfig: RagSearchConfig = { + ...agentConfig, + vectorStore, + sources: agentConfig.sources, + }; + + // Initialize DocQualityTester + const tester = new DocQualityTester(llmConfig, embeddings, ragConfig); + + // Run comparison + const comparisonReport = await tester.compareResults(baseline, current); + + // Output results + if (options.output) { + const outputPath = path.resolve(process.cwd(), options.output); + fs.writeFileSync(outputPath, JSON.stringify(comparisonReport, null, 2)); + logger.info(`Comparison report saved to ${outputPath}`); + } else { + console.log('\nDocumentation Quality Comparison'); + console.log('==============================\n'); + console.log(`Focus: ${current.focus}`); + console.log(`Baseline Version: ${baseline.version}`); + console.log(`Current Version: ${current.version}`); + console.log('\nComparison Summary:'); + console.log(comparisonReport.summary); + + // Display recommendations + console.log('\nRecommendations:'); + comparisonReport.recommendations.forEach((rec, i) => { + console.log( + `${i + 1}. [${rec.priority.toUpperCase()}] ${rec.description}`, + ); + }); + + console.log( + '\nFor full report, use the --output option to save to file.', + ); + } + } catch (error) { + logger.error('Error comparing documentation quality:', error); + process.exit(1); + } + }); + +program.parse(process.argv); diff --git a/packages/agents/src/tests/testDocQuality.ts b/packages/agents/src/tests/testDocQuality.ts deleted file mode 100644 index 0e08d93e..00000000 --- a/packages/agents/src/tests/testDocQuality.ts +++ /dev/null @@ -1,339 +0,0 @@ -// import fs from 'fs'; -// import path from 'path'; -// import { Command } from 'commander'; -// import { BaseChatModel } from '@langchain/core/language_models/chat_models'; -// import { OpenAIEmbeddings } from '@langchain/openai'; -// import { DocumentSource, RagSearchConfig } from './core/types'; -// import { DocQualityTester } from './pipeline/docQualityTester'; -// import { -// AvailableAgents, -// getAgentConfig, -// LLMConfig, -// } from './config/agentConfigs'; -// import logger from './utils/logger'; -// import { ChatGoogleGenerativeAI } from '@langchain/google-genai'; -// import { VectorStore } from './db/vectorStore'; -// import { getGeminiApiKey, getOpenaiApiKey, getVectorDbConfig } from './config'; -// import { Embeddings } from '@langchain/core/embeddings'; - -// const program = new Command(); - -// // Initialize the program -// program -// .name('test-doc-quality') -// .description('Test documentation quality using the Starknet Agent') -// .version('1.0.0'); - -// program -// .command('test') -// .description('Run documentation quality tests') -// .requiredOption( -// '-s, --source ', -// 'Documentation source to test (e.g., starknet_docs)', -// ) -// .requiredOption('-t, --test-file ', 'Path to test file (JSON)') -// .option('-o, --output ', 'Path to output file (JSON)') -// .option( -// '-m, --model ', -// 'LLM model to use for testing', -// 'Claude 3.5 Sonnet', -// ) -// .option( -// '-e, --eval-model ', -// 'LLM model to use for evaluation (defaults to same as model)', -// ) -// .option( -// '--no-detailed-output', -// 'Disable detailed test output with PASS/FAIL status', -// ) -// .option( -// '--thresholds ', -// 'Custom thresholds for determining pass/fail status (JSON string)', -// ) -// .action(async (options) => { -// try { -// // Validate source -// const source = options.source as DocumentSource; -// const focus = source as string; - -// // Load test file -// const testFilePath = path.resolve(process.cwd(), options.testFile); -// if (!fs.existsSync(testFilePath)) { -// logger.error(`Test file not found: ${testFilePath}`); -// process.exit(1); -// } - -// const testFileContent = fs.readFileSync(testFilePath, 'utf-8'); -// const testSet = JSON.parse(testFileContent); - -// const geminiApiKey = getGeminiApiKey(); -// const openaiApiKey = getOpenaiApiKey(); - -// // Initialize models and embeddings -// const defaultLLM = new ChatGoogleGenerativeAI({ -// temperature: 0.7, -// apiKey: geminiApiKey, -// modelName: 'gemini-2.0-flash', -// }); - -// const llmConfig = { -// defaultLLM: defaultLLM as unknown as BaseChatModel, -// fastLLM: defaultLLM as unknown as BaseChatModel, -// evaluationLLM: defaultLLM as unknown as BaseChatModel, -// }; - -// const embeddings = new OpenAIEmbeddings({ -// openAIApiKey: openaiApiKey, -// modelName: 'text-embedding-3-large', -// dimensions: 2048, -// }) as unknown as Embeddings; - -// // Initialize vector store -// const dbConfig = getVectorDbConfig(); -// const vectorStore = await VectorStore.getInstance(dbConfig, embeddings); - -// const source_to_agent_name: Record< -// DocumentSource | 'starknet_ecosystem', -// AvailableAgents -// > = { -// [DocumentSource.CAIRO_BOOK]: 'cairoBook', -// [DocumentSource.STARKNET_DOCS]: 'starknetDocs', -// starknet_ecosystem: 'starknetEcosystem', -// [DocumentSource.STARKNET_FOUNDRY]: 'starknetFoundry', -// [DocumentSource.CAIRO_BY_EXAMPLE]: 'cairoByExample', -// [DocumentSource.OPENZEPPELIN_DOCS]: 'openZeppelinDocs', -// }; -// // Get agent configuration -// const agentConfig = getAgentConfig( -// source_to_agent_name[source], -// vectorStore, -// ); -// if (!agentConfig) { -// logger.error(`Agent configuration not found for source: ${source}`); -// process.exit(1); -// } - -// // Create RAG config -// const ragConfig: RagSearchConfig = { -// ...agentConfig, -// vectorStore, -// sources: agentConfig.sources, -// }; - -// // Initialize DocQualityTester -// const tester = new DocQualityTester(llmConfig, embeddings, ragConfig); - -// // Parse thresholds if provided -// let thresholds = undefined; -// if (options.thresholds) { -// try { -// thresholds = JSON.parse(options.thresholds); -// } catch (e) { -// logger.error(`Error parsing thresholds JSON: ${e}`); -// process.exit(1); -// } -// } - -// // Run tests with detailed output options -// logger.info(`Starting documentation quality tests for focus ${focus}`); -// const results = await tester.testDocQuality(testSet, focus, { -// showDetailedOutput: options.detailedOutput, -// thresholds, -// }); - -// // Generate report -// const report = await tester.generateReport(results); - -// // Output results -// if (options.output) { -// const outputPath = path.resolve(process.cwd(), options.output); -// fs.writeFileSync(outputPath, JSON.stringify(report, null, 2)); -// logger.info(`Report saved to ${outputPath}`); -// } else { -// console.log('\nDocumentation Quality Report'); -// console.log('===========================\n'); -// console.log(`Focus: ${report.results.focus}`); -// console.log(`Version: ${report.results.version}`); -// console.log(`Test Cases: ${report.results.caseResults.length}`); -// console.log('\nSummary:'); -// console.log(report.summary); - -// console.log('\nKey Metrics:'); -// console.log( -// `- Relevance Score: ${report.results.metrics.overall.percentAnswered.toFixed(2)}`, -// ); -// console.log( -// `- Coverage Score: ${report.results.metrics.overall.avgClarityScore.toFixed(2)}`, -// ); -// console.log( -// `- Answer Completeness: ${report.results.metrics.overall.avgSourceAlignment.toFixed(2)}`, -// ); - -// console.log('\nTop Recommendations:'); -// const highPriorityRecs = report.recommendations.filter( -// (r) => r.priority === 'high', -// ); -// highPriorityRecs.forEach((rec, i) => { -// console.log(`${i + 1}. ${rec.description}`); -// }); - -// console.log( -// '\nFor full report, use the --output option to save to file.', -// ); -// } -// } catch (error) { -// logger.error('Error running documentation quality tests:', error); -// process.exit(1); -// } -// }); - -// program -// .command('compare') -// .description('Compare documentation quality between versions') -// .requiredOption('-s, --source ', 'Documentation source to test') -// .requiredOption( -// '-b, --baseline ', -// 'Path to baseline results file (JSON)', -// ) -// .requiredOption('-c, --current ', 'Path to current results file (JSON)') -// .option('-o, --output ', 'Path to output file (JSON)') -// .option( -// '-m, --model ', -// 'LLM model to use for comparison', -// 'Claude 3.5 Sonnet', -// ) -// .action(async (options) => { -// try { -// // Validate source -// const focus = options.source as DocumentSource; - -// // Load result files -// const baselinePath = path.resolve(process.cwd(), options.baseline); -// const currentPath = path.resolve(process.cwd(), options.current); - -// if (!fs.existsSync(baselinePath)) { -// logger.error(`Baseline file not found: ${baselinePath}`); -// process.exit(1); -// } - -// if (!fs.existsSync(currentPath)) { -// logger.error(`Current file not found: ${currentPath}`); -// process.exit(1); -// } - -// const baselineContent = fs.readFileSync(baselinePath, 'utf-8'); -// const currentContent = fs.readFileSync(currentPath, 'utf-8'); - -// const baseline = JSON.parse(baselineContent); -// const current = JSON.parse(currentContent); - -// const geminiApiKey = getGeminiApiKey(); -// const openaiApiKey = getOpenaiApiKey(); - -// // Initialize models and embeddings -// const defaultLLM = new ChatGoogleGenerativeAI({ -// temperature: 0.7, -// apiKey: geminiApiKey, -// modelName: 'gemini-2.0-flash', -// }); - -// const llmConfig = { -// defaultLLM: defaultLLM as unknown as BaseChatModel, -// fastLLM: defaultLLM as unknown as BaseChatModel, -// evaluationLLM: defaultLLM as unknown as BaseChatModel, -// }; - -// const embeddings = new OpenAIEmbeddings({ -// openAIApiKey: openaiApiKey, -// modelName: 'text-embedding-3-large', -// dimensions: 2048, -// }) as unknown as Embeddings; - -// // Initialize vector store -// const dbConfig = getVectorDbConfig(); -// const vectorStore = await VectorStore.getInstance(dbConfig, embeddings); - -// const focus_to_agent_name: Record< -// DocumentSource | 'starknet_ecosystem', -// AvailableAgents -// > = { -// [DocumentSource.CAIRO_BOOK]: 'cairoBook', -// [DocumentSource.STARKNET_DOCS]: 'starknetDocs', -// starknet_ecosystem: 'starknetEcosystem', -// [DocumentSource.STARKNET_FOUNDRY]: 'starknetFoundry', -// [DocumentSource.CAIRO_BY_EXAMPLE]: 'cairoByExample', -// [DocumentSource.OPENZEPPELIN_DOCS]: 'openZeppelinDocs', -// }; -// // Get agent configuration -// const agentConfig = getAgentConfig( -// focus_to_agent_name[focus], -// vectorStore, -// ); -// if (!agentConfig) { -// logger.error(`Agent configuration not found for focus: ${focus}`); -// process.exit(1); -// } - -// // Initialize DocQualityTester -// const tester = new DocQualityTester(llmConfig, embeddings, { -// ...agentConfig, -// vectorStore: {} as any, // Not needed for comparison -// sources: agentConfig.sources, -// }); - -// // Compare results -// logger.info(`Comparing documentation quality for ${focus}`); -// const report = await tester.compareResults( -// baseline.results, -// current.results, -// ); - -// // Output comparison -// if (options.output) { -// const outputPath = path.resolve(process.cwd(), options.output); -// fs.writeFileSync(outputPath, JSON.stringify(report, null, 2)); -// logger.info(`Comparison report saved to ${outputPath}`); -// } else { -// console.log('\nDocumentation Quality Comparison Report'); -// console.log('=====================================\n'); -// console.log(`Focus: ${report.results.focus}`); -// console.log(`Baseline Version: ${baseline.results.version}`); -// console.log(`Current Version: ${current.results.version}`); - -// console.log('\nSummary:'); -// console.log(report.summary); - -// console.log('\nMetric Changes:'); -// const baselineMetrics = baseline.results.metrics.overall; -// const currentMetrics = report.results.metrics.overall; - -// console.log( -// `- Relevance Score: ${baselineMetrics.percentAnswered.toFixed(2)} → ${currentMetrics.percentAnswered.toFixed(2)} (${baselineMetrics.percentAnswered > currentMetrics.percentAnswered ? '↓' : '↑'})`, -// ); -// console.log( -// `- Coverage Score: ${baselineMetrics.avgClarityScore.toFixed(2)} → ${currentMetrics.avgClarityScore.toFixed(2)} (${baselineMetrics.avgClarityScore > currentMetrics.avgClarityScore ? '↓' : '↑'})`, -// ); -// console.log( -// `- Answer Completeness: ${baselineMetrics.avgSourceAlignment.toFixed(2)} → ${currentMetrics.avgSourceAlignment.toFixed(2)} (${baselineMetrics.avgSourceAlignment > currentMetrics.avgSourceAlignment ? '↓' : '↑'})`, -// ); - -// if (report.recommendations.length > 0) { -// console.log('\nRecommendations:'); -// report.recommendations.forEach((rec, i) => { -// console.log( -// `${i + 1}. [${rec.priority.toUpperCase()}] ${rec.description}`, -// ); -// }); -// } - -// console.log( -// '\nFor full comparison report, use the --output option to save to file.', -// ); -// } -// } catch (error) { -// logger.error('Error comparing documentation quality results:', error); -// process.exit(1); -// } -// }); - -// program.parse(); diff --git a/packages/agents/src/types/index.ts b/packages/agents/src/types/index.ts index cbfb429e..c2995da1 100644 --- a/packages/agents/src/types/index.ts +++ b/packages/agents/src/types/index.ts @@ -7,7 +7,7 @@ export type AvailableAgents = 'cairoCoder'; export interface LLMConfig { defaultLLM: BaseChatModel; - fastLLM?: BaseChatModel; + fastLLM: BaseChatModel; } export interface VectorStoreConfig { diff --git a/packages/backend/__tests__/types/context.test.ts b/packages/backend/__tests__/unit/context.test.ts similarity index 100% rename from packages/backend/__tests__/types/context.test.ts rename to packages/backend/__tests__/unit/context.test.ts diff --git a/packages/backend/__tests__/server.test.ts b/packages/backend/__tests__/unit/server.test.ts similarity index 93% rename from packages/backend/__tests__/server.test.ts rename to packages/backend/__tests__/unit/server.test.ts index 6c3dbd96..6f4f1b67 100644 --- a/packages/backend/__tests__/server.test.ts +++ b/packages/backend/__tests__/unit/server.test.ts @@ -1,11 +1,11 @@ -import { createApplication } from '../src/server'; -import { Container } from '../src/config/context'; +import { createApplication } from '../../src/server'; +import { Container } from '../../src/config/context'; import express from 'express'; import { Server } from 'http'; import supertest from 'supertest'; describe('Server', () => { - jest.mock('../src/config/llm', () => ({ + jest.mock('../../src/config/llm', () => ({ initializeLLMConfig: jest.fn().mockResolvedValue({ defaultLLM: {}, fastLLM: {}, @@ -25,9 +25,8 @@ describe('Server', () => { getCairoByExampleDbConfig: jest.fn().mockReturnValue({}), })); - // Mock HTTP handling to avoid actual initialization - jest.mock('../src/config/http', () => ({ + jest.mock('../../src/config/http', () => ({ initializeHTTP: jest.fn(), })); diff --git a/packages/backend/package.json b/packages/backend/package.json index 40024caf..4a83f31c 100644 --- a/packages/backend/package.json +++ b/packages/backend/package.json @@ -9,7 +9,8 @@ "test": "jest", "test:watch": "jest --watch", "test:coverage": "jest --coverage", - "check-types": "tsc --noEmit" + "check-types": "tsc --noEmit", + "test:unit": "jest --config jest.config.js --testMatch=\"**/__tests__/unit/**/*.test.[jt]s?(x)\"" }, "dependencies": { "@cairo-coder/agents": "workspace:*", diff --git a/packages/backend/src/app.ts b/packages/backend/src/app.ts index 65a2aa1c..6e73f57f 100644 --- a/packages/backend/src/app.ts +++ b/packages/backend/src/app.ts @@ -10,7 +10,7 @@ process.on('uncaughtException', (err, origin) => { }); process.on('unhandledRejection', (reason, promise) => { - logger.error(`Unhandled Rejection at: ${promise}, reason: ${reason}`); + logger.error(`Unhandled Rejection at : ${promise}, reason: ${reason}`); }); // Start the application diff --git a/packages/ingester/__tests__/AsciiDocIngester.test.ts b/packages/ingester/__tests__/unit/AsciiDocIngester.test.ts similarity index 98% rename from packages/ingester/__tests__/AsciiDocIngester.test.ts rename to packages/ingester/__tests__/unit/AsciiDocIngester.test.ts index cd1cb2eb..48a8acf8 100644 --- a/packages/ingester/__tests__/AsciiDocIngester.test.ts +++ b/packages/ingester/__tests__/unit/AsciiDocIngester.test.ts @@ -3,8 +3,8 @@ import { BookChunk, DocumentSource } from '@cairo-coder/agents/types/index'; import { AsciiDocIngester, AsciiDocIngesterConfig, -} from '../src/ingesters/AsciiDocIngester'; -import { BookConfig, BookPageDto, ParsedSection } from '../src/utils/types'; +} from '../../src/ingesters/AsciiDocIngester'; +import { BookConfig, BookPageDto, ParsedSection } from '../../src/utils/types'; // Create a concrete implementation of AsciiDocIngester for testing class TestAsciiDocIngester extends AsciiDocIngester { diff --git a/packages/ingester/__tests__/IngesterFactory.test.ts b/packages/ingester/__tests__/unit/IngesterFactory.test.ts similarity index 75% rename from packages/ingester/__tests__/IngesterFactory.test.ts rename to packages/ingester/__tests__/unit/IngesterFactory.test.ts index d647453c..6c7643db 100644 --- a/packages/ingester/__tests__/IngesterFactory.test.ts +++ b/packages/ingester/__tests__/unit/IngesterFactory.test.ts @@ -1,18 +1,18 @@ -import { IngesterFactory } from '../src/IngesterFactory'; -import { CairoBookIngester } from '../src/ingesters/CairoBookIngester'; -import { StarknetDocsIngester } from '../src/ingesters/StarknetDocsIngester'; -import { StarknetFoundryIngester } from '../src/ingesters/StarknetFoundryIngester'; -import { CairoByExampleIngester } from '../src/ingesters/CairoByExampleIngester'; -import { OpenZeppelinDocsIngester } from '../src/ingesters/OpenZeppelinDocsIngester'; -import { BaseIngester } from '../src/BaseIngester'; +import { IngesterFactory } from '../../src/IngesterFactory'; +import { CairoBookIngester } from '../../src/ingesters/CairoBookIngester'; +import { StarknetDocsIngester } from '../../src/ingesters/StarknetDocsIngester'; +import { StarknetFoundryIngester } from '../../src/ingesters/StarknetFoundryIngester'; +import { CairoByExampleIngester } from '../../src/ingesters/CairoByExampleIngester'; +import { OpenZeppelinDocsIngester } from '../../src/ingesters/OpenZeppelinDocsIngester'; +import { BaseIngester } from '../../src/BaseIngester'; import { DocumentSource } from '@cairo-coder/agents/types/index'; // Mock the ingesters -jest.mock('../src/ingesters/CairoBookIngester'); -jest.mock('../src/ingesters/StarknetDocsIngester'); -jest.mock('../src/ingesters/StarknetFoundryIngester'); -jest.mock('../src/ingesters/CairoByExampleIngester'); -jest.mock('../src/ingesters/OpenZeppelinDocsIngester'); +jest.mock('../../src/ingesters/CairoBookIngester'); +jest.mock('../../src/ingesters/StarknetDocsIngester'); +jest.mock('../../src/ingesters/StarknetFoundryIngester'); +jest.mock('../../src/ingesters/CairoByExampleIngester'); +jest.mock('../../src/ingesters/OpenZeppelinDocsIngester'); describe('IngesterFactory', () => { beforeEach(() => { diff --git a/packages/ingester/__tests__/MarkdownIngester.test.ts b/packages/ingester/__tests__/unit/MarkdownIngester.test.ts similarity index 97% rename from packages/ingester/__tests__/MarkdownIngester.test.ts rename to packages/ingester/__tests__/unit/MarkdownIngester.test.ts index 1b8ed1a0..98a59afd 100644 --- a/packages/ingester/__tests__/MarkdownIngester.test.ts +++ b/packages/ingester/__tests__/unit/MarkdownIngester.test.ts @@ -1,6 +1,6 @@ -import { BookPageDto, isInsideCodeBlock } from '../src/shared'; +import { BookPageDto, isInsideCodeBlock } from '../../src/shared'; import { Document } from '@langchain/core/documents'; -import { MarkdownIngester } from '../src/ingesters/MarkdownIngester'; +import { MarkdownIngester } from '../../src/ingesters/MarkdownIngester'; import { DocumentSource } from '@cairo-coder/agents/types/index'; // Create a concrete implementation of the abstract MarkdownIngester for testing diff --git a/packages/ingester/__tests__/contentUtils.test.ts b/packages/ingester/__tests__/unit/contentUtils.test.ts similarity index 97% rename from packages/ingester/__tests__/contentUtils.test.ts rename to packages/ingester/__tests__/unit/contentUtils.test.ts index 90e8fee7..13b27cea 100644 --- a/packages/ingester/__tests__/contentUtils.test.ts +++ b/packages/ingester/__tests__/unit/contentUtils.test.ts @@ -1,4 +1,4 @@ -import { createAnchor, isInsideCodeBlock } from '../src/utils/contentUtils'; +import { createAnchor, isInsideCodeBlock } from '../../src/utils/contentUtils'; describe('createAnchor', () => { it('should handle undefined input', () => { diff --git a/packages/ingester/__tests__/shared.test.ts b/packages/ingester/__tests__/unit/shared.test.ts similarity index 94% rename from packages/ingester/__tests__/shared.test.ts rename to packages/ingester/__tests__/unit/shared.test.ts index 4855b284..10b58adb 100644 --- a/packages/ingester/__tests__/shared.test.ts +++ b/packages/ingester/__tests__/unit/shared.test.ts @@ -1,4 +1,4 @@ -import { createAnchor } from '../src/utils/contentUtils'; +import { createAnchor } from '../../src/utils/contentUtils'; describe('createAnchor', () => { it('should handle undefined input', () => { diff --git a/packages/ingester/__tests__/vectorStoreUtils.test.ts b/packages/ingester/__tests__/unit/vectorStoreUtils.test.ts similarity index 96% rename from packages/ingester/__tests__/vectorStoreUtils.test.ts rename to packages/ingester/__tests__/unit/vectorStoreUtils.test.ts index 7336bd86..77723341 100644 --- a/packages/ingester/__tests__/vectorStoreUtils.test.ts +++ b/packages/ingester/__tests__/unit/vectorStoreUtils.test.ts @@ -1,4 +1,4 @@ -import { findChunksToUpdateAndRemove } from '../src/utils/vectorStoreUtils'; +import { findChunksToUpdateAndRemove } from '../../src/utils/vectorStoreUtils'; import { Document } from '@langchain/core/documents'; describe('findChunksToUpdateAndRemove', () => { diff --git a/packages/ingester/jest.config.js b/packages/ingester/jest.config.js index 864a15d9..8ddf4c7c 100644 --- a/packages/ingester/jest.config.js +++ b/packages/ingester/jest.config.js @@ -4,7 +4,12 @@ module.exports = { roots: ['/src', '/__tests__'], testMatch: ['**/__tests__/**/*.ts', '**/?(*.)+(spec|test).ts'], transform: { - '^.+\\.ts$': ['ts-jest'], + '^.+\\.ts$': [ + 'ts-jest', + { + tsconfig: 'tsconfig.test.json', + }, + ], }, moduleNameMapper: { '^@/(.*)$': '/src/$1', diff --git a/packages/ingester/package.json b/packages/ingester/package.json index af408c28..38bee6b3 100644 --- a/packages/ingester/package.json +++ b/packages/ingester/package.json @@ -3,6 +3,7 @@ "version": "1.0.0", "scripts": { "test": "jest", + "test:unit": "jest --config jest.config.js --testMatch=\"**/__tests__/unit/**/*.test.[jt]s?(x)\"", "build": "tsc -p tsconfig.json", "generate-embeddings": "node ./dist/src/generateEmbeddings.js", "generate-embeddings:yes": "node ./dist/src/generateEmbeddings.js -y" diff --git a/packages/ingester/tsconfig.test.json b/packages/ingester/tsconfig.test.json new file mode 100644 index 00000000..877ede0a --- /dev/null +++ b/packages/ingester/tsconfig.test.json @@ -0,0 +1,9 @@ +{ + "extends": "./tsconfig.json", + "compilerOptions": { + "types": ["node", "jest"], + "esModuleInterop": true + }, + "include": ["src", "__tests__"], + "exclude": ["node_modules"] +} diff --git a/scripts/database-connection.sh b/scripts/database-connection.sh new file mode 100755 index 00000000..a57d18f6 --- /dev/null +++ b/scripts/database-connection.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +# Préparation des données pour l'appel API (requête simple) +REQUEST_DATA='{ + "model": "gemini-2.5-flash", + "messages": [ + { + "role": "user", + "content": "Hello" + } + ], + "temperature": 0.7 + }' + +RESPONSE=$(curl -s -X POST http://localhost:3001/chat/completions \ + -H "Content-Type: application/json" \ + -d "$REQUEST_DATA") + +# Vérification de la réponse pour détecter des erreurs de base de données +if echo "$RESPONSE" | jq -e '.error' >/dev/null 2>&1; then + echo "❌ Database connection error detected" + echo "Error details:" + echo "$RESPONSE" | jq '.error' + exit 1 +else + echo "✅ Successfully connected to database via /chat/completions endpoint" + exit 0 +fi \ No newline at end of file diff --git a/scripts/integration-tests.sh b/scripts/integration-tests.sh new file mode 100755 index 00000000..f0d96979 --- /dev/null +++ b/scripts/integration-tests.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +docker ps + +docker exec postgresql pg_isready -U postgres -h localhost +if [ $? -eq 0 ]; then + echo "✅ PostgreSQL is ready!" +else + echo "❌ PostgreSQL is not ready" +fi + +docker exec backend ping -c 2 postgres +if [ $? -eq 0 ]; then + echo "✅ Network connectivity to PostgreSQL works!" +else + echo "❌ Network connectivity issue to PostgreSQL" +fi + +RESPONSE=$(curl -s -o /dev/null -w "%{http_code}" http://localhost:3001/ 2>/dev/null) +if [ "$RESPONSE" == "200" ]; then + echo "✅ Backend API is working correctly!" +else + echo "❌ Issue with backend API. HTTP code: $RESPONSE" + # Get more details about the error + echo "Error details:" + curl -v http://localhost:3001/ +fi diff --git a/scripts/snak-test.sh b/scripts/snak-test.sh new file mode 100755 index 00000000..f08303bc --- /dev/null +++ b/scripts/snak-test.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +pnpm run test:code-quality --force | tee test-output.log + +TEST_RESULT=${PIPESTATUS[0]} + +if [ $TEST_RESULT -ne 0 ]; then + echo "❌ API test failed with exit code $TEST_RESULT!" + exit 1 +else + echo "✅ API test passed!" +fi \ No newline at end of file diff --git a/turbo.json b/turbo.json index 4985fcad..a0909faa 100644 --- a/turbo.json +++ b/turbo.json @@ -20,6 +20,18 @@ "dependsOn": ["^build"], "outputs": [] }, + "test:unit": { + "dependsOn": ["^build"], + "outputs": [] + }, + "doc-quality": { + "dependsOn": ["^build"], + "outputs": [] + }, + "test:code-quality": { + "dependsOn": ["^build"], + "outputs": [] + }, "start": { "dependsOn": ["build"], "cache": false,