Skip to content

Commit

Permalink
Merge pull request #4455 from broadinstitute/vlm-directory
Browse files Browse the repository at this point in the history
Inital VLM setup
  • Loading branch information
hanars authored Oct 30, 2024
2 parents 91ad846 + 568eec4 commit 0d3f34d
Show file tree
Hide file tree
Showing 10 changed files with 137 additions and 1 deletion.
11 changes: 11 additions & 0 deletions .cloudbuild/seqr-vlm-docker.cloudbuild.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
steps:
- name: 'gcr.io/kaniko-project/executor:v1.3.0'
args:
- --destination=gcr.io/seqr-project/seqr-vlm:${COMMIT_SHA}
- --destination=gcr.io/seqr-project/seqr-vlm:${_CUSTOM_BRANCH_TAG}
- --destination=gcr.io/seqr-project/seqr-vlm:latest
- --dockerfile=vlm/deploy/Dockerfile
- --cache=true
- --cache-ttl=168h

timeout: 1800s
1 change: 1 addition & 0 deletions .dockerignore
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ static/*
deploy/*
!deploy/docker/seqr/*
!deploy/docker/hail_search/*
!deploy/docker/vlm/*
hail_search/fixtures/*
.git
.vscode
Expand Down
7 changes: 6 additions & 1 deletion .github/workflows/docker-lint.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ on:
paths:
- deploy/docker/seqr/Dockerfile
- hail_search/deploy/Dockerfile
- vlm/deploy/Dockerfile
- .hadolint.yaml
- .docker-compose.yaml
- .github/workflows/docker-lint.yaml
Expand All @@ -21,6 +22,7 @@ on:
paths:
- deploy/docker/seqr/Dockerfile
- hail_search/deploy/Dockerfile
- vlm/deploy/Dockerfile
- .hadolint.yaml
- .docker-compose.yaml
- .github/workflows/docker-lint.yaml
Expand All @@ -31,10 +33,13 @@ jobs:
steps:
- uses: actions/checkout@v2
- name: Validate docker compose
run: docker-compose -f docker-compose.yml config
run: docker compose -f docker-compose.yml config
- uses: hadolint/[email protected]
with:
dockerfile: deploy/docker/seqr/Dockerfile
- uses: hadolint/[email protected]
with:
dockerfile: hail_search/deploy/Dockerfile
- uses: hadolint/[email protected]
with:
dockerfile: vlm/deploy/Dockerfile
2 changes: 2 additions & 0 deletions .github/workflows/unit-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -9,11 +9,13 @@ on:
- dev
paths-ignore:
- 'hail_search/**'
- 'vlm/**'
- '.github/workflows/hail-search-unit-tests.yaml'
pull_request:
types: [opened, synchronize, reopened]
paths-ignore:
- 'hail_search/**'
- 'vlm/**'
- '.github/workflows/hail-search-unit-tests.yaml'

jobs:
Expand Down
31 changes: 31 additions & 0 deletions .github/workflows/vlm-unit-tests.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
name: VLM Unit Tests

# Run the test suite on pushes (incl. merges) to master and dev
# Run the test suite when a PR is opened, pushed to, or reopened
on:
push:
branches:
- master
- dev
paths:
- 'vlm/**'
pull_request:
types: [opened, synchronize, reopened]
paths:
- 'vlm/**'

jobs:
vlm:
runs-on: ubuntu-latest
container: hailgenetics/hail:0.2.128

steps:
- uses: actions/checkout@v2
- name: Install dependencies
run: |
python3 -m pip install --upgrade pip wheel
pip install -r hail_search/requirements-test.txt
- name: Run coverage tests
run: |
coverage run --source="./vlm" --omit="./vlm/__main__.py" -m pytest vlm/
coverage report --fail-under=90
Empty file added vlm/__init__.py
Empty file.
22 changes: 22 additions & 0 deletions vlm/__main__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
from aiohttp import web
import logging

from vlm.web_app import init_web_app


async def status(request: web.Request) -> web.Response:
return web.json_response({'success': True})


def run():
logging.basicConfig(level=logging.INFO)
app = init_web_app()
web.run_app(
app,
host='0.0.0.0', # nosec
port=6000,
access_log_format='%a "%r" %s %Tfs',
)


run()
12 changes: 12 additions & 0 deletions vlm/deploy/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
FROM hailgenetics/hail:0.2.128

LABEL maintainer="Broad TGG"

WORKDIR /vlm

# Application Code
COPY vlm/ .

WORKDIR /
EXPOSE 6000
CMD ["python3", "-m", "vlm"]
19 changes: 19 additions & 0 deletions vlm/test_vlm.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
from aiohttp.test_utils import AioHTTPTestCase

from vlm.web_app import init_web_app


class VlmTestCase(AioHTTPTestCase):

async def get_application(self):
return await init_web_app()

async def test_error(self):
async with self.client.request('GET', '/foo') as resp:
self.assertEqual(resp.status, 404)

async def test_status(self):
async with self.client.request('GET', '/status') as resp:
self.assertEqual(resp.status, 200)
resp_json = await resp.json()
self.assertDictEqual(resp_json, {'success': True})
33 changes: 33 additions & 0 deletions vlm/web_app.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
from aiohttp import web
import logging
import traceback

logger = logging.getLogger(__name__)


def _handle_exception(e, request):
logger.error(f'{request.headers.get("From")} "{e}"')
raise e


@web.middleware
async def error_middleware(request, handler):
try:
return await handler(request)
except web.HTTPError as e:
_handle_exception(e, request)
except Exception as e:
error_reason = f'{e}: {traceback.format_exc()}'
_handle_exception(web.HTTPInternalServerError(reason=error_reason), request)


async def status(request: web.Request) -> web.Response:
return web.json_response({'success': True})


async def init_web_app():
app = web.Application(middlewares=[error_middleware], client_max_size=(1024 ** 2) * 10)
app.add_routes([
web.get('/status', status),
])
return app

0 comments on commit 0d3f34d

Please sign in to comment.