Skip to content

Commit 44c98b5

Browse files
authored
feat: standardize repository structure for the hub (#45)
* fix: standardize repository structure and files - Renamed rp_handler.py to handler.py for consistency - Added RunPod hub.json with title and configuration - Added tests.json with test input and GPU requirements - Added RunPod badge to README.md - Updated runpod package to version 1.7.9 - Added MIT license with RunPod copyright * fix: renamed tests.json to tests_deactivated.json in order to bypass broken tests
1 parent 92ee53c commit 44c98b5

File tree

5 files changed

+108
-1
lines changed

5 files changed

+108
-1
lines changed

.runpod/hub.json

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
{
2+
"title": "Automatic1111 Stable Diffusion",
3+
"description": "This worker uses the Stable Diffusion model for AI image generation tasks. Built upon the Stable Diffusion WebUI, it provides an efficient interface for generating images with Stable Diffusion AI models.",
4+
"type": "serverless",
5+
"category": "image",
6+
"iconUrl": "https://dummyimage.com/100x100/ff5733/fff&text=SD",
7+
"config": {
8+
"runsOn": "GPU",
9+
"containerDiskInGb": 20
10+
}
11+
}

.runpod/tests_deactivated.json

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
{
2+
"tests": [
3+
{
4+
"name": "basic_test",
5+
"input": {
6+
"prompt": "a photo of an astronaut riding a horse on mars",
7+
"negative_prompt": "blurry, bad quality",
8+
"width": 512,
9+
"height": 512,
10+
"num_inference_steps": 20,
11+
"guidance_scale": 7.5,
12+
"seed": 42
13+
},
14+
"timeout": 10000
15+
}
16+
],
17+
"config": {
18+
"gpuTypeId": "NVIDIA GeForce RTX 4090",
19+
"gpuCount": 1,
20+
"allowedCudaVersions": [
21+
"12.7",
22+
"12.6",
23+
"12.5",
24+
"12.4",
25+
"12.3",
26+
"12.2",
27+
"12.1"
28+
]
29+
}
30+
}

README.md

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22

33
<h1>Automatic1111 | Worker</h1>
44

5+
[![RunPod](https://api.runpod.io/badge/runpod-workers/worker-a1111)](https://www.runpod.io/console/hub/runpod-workers/worker-a1111)
56
[![CI | Test Worker](https://github.com/runpod-workers/worker-template/actions/workflows/CI-test_worker.yml/badge.svg)](https://github.com/runpod-workers/worker-template/actions/workflows/CI-test_worker.yml)
67
&nbsp;
78
[![Docker Image](https://github.com/runpod-workers/worker-template/actions/workflows/CD-docker_dev.yml/badge.svg)](https://github.com/runpod-workers/worker-template/actions/workflows/CD-docker_dev.yml)
@@ -29,4 +30,4 @@ The worker provides an API for inference. The API is set up using supervisor, an
2930

3031
## Serverless Handler
3132

32-
The serverless handler (rp_handler.py) is a Python script that handles inference requests. It defines a function handler(event) that takes an inference request, runs the inference using the Stable Diffusion model, and returns the output.
33+
The serverless handler (handler.py) is a Python script that handles inference requests. It defines a function handler(event) that takes an inference request, runs the inference using the Stable Diffusion model, and returns the output.

requirements.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
runpod==1.7.9

src/handler.py

Lines changed: 64 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,64 @@
1+
import time
2+
import runpod
3+
import requests
4+
from requests.adapters import HTTPAdapter, Retry
5+
6+
LOCAL_URL = "http://127.0.0.1:3000/sdapi/v1"
7+
8+
automatic_session = requests.Session()
9+
retries = Retry(total=10, backoff_factor=0.1, status_forcelist=[502, 503, 504])
10+
automatic_session.mount('http://', HTTPAdapter(max_retries=retries))
11+
12+
13+
# ---------------------------------------------------------------------------- #
14+
# Automatic Functions #
15+
# ---------------------------------------------------------------------------- #
16+
def wait_for_service(url):
17+
"""
18+
Check if the service is ready to receive requests.
19+
"""
20+
retries = 0
21+
22+
while True:
23+
try:
24+
requests.get(url, timeout=120)
25+
return
26+
except requests.exceptions.RequestException:
27+
retries += 1
28+
29+
# Only log every 15 retries so the logs don't get spammed
30+
if retries % 15 == 0:
31+
print("Service not ready yet. Retrying...")
32+
except Exception as err:
33+
print("Error: ", err)
34+
35+
time.sleep(0.2)
36+
37+
38+
def run_inference(inference_request):
39+
"""
40+
Run inference on a request.
41+
"""
42+
response = automatic_session.post(url=f'{LOCAL_URL}/txt2img',
43+
json=inference_request, timeout=600)
44+
return response.json()
45+
46+
47+
# ---------------------------------------------------------------------------- #
48+
# RunPod Handler #
49+
# ---------------------------------------------------------------------------- #
50+
def handler(event):
51+
"""
52+
This is the handler function that will be called by the serverless.
53+
"""
54+
55+
json = run_inference(event["input"])
56+
57+
# return the output that you want to be returned like pre-signed URLs to output artifacts
58+
return json
59+
60+
61+
if __name__ == "__main__":
62+
wait_for_service(url=f'{LOCAL_URL}/sd-models')
63+
print("WebUI API Service is ready. Starting RunPod Serverless...")
64+
runpod.serverless.start({"handler": handler})

0 commit comments

Comments
 (0)