Skip to content

Commit

Permalink
change from openai to google gemini
Browse files Browse the repository at this point in the history
  • Loading branch information
thiago-felipe-99 committed Dec 19, 2023
1 parent 4117c97 commit 70a6135
Show file tree
Hide file tree
Showing 3 changed files with 63 additions and 80 deletions.
32 changes: 12 additions & 20 deletions pipelines/deteccao_alagamento_cameras/flooding_detection/flows.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,21 +7,18 @@
from prefect.run_configs import KubernetesRun
from prefect.storage import GCS
from prefect.utilities.edges import unmapped
from prefeitura_rio.pipelines_utils.custom import Flow

from pipelines.constants import constants
from pipelines.deteccao_alagamento_cameras.flooding_detection.schedules import (
update_flooding_data_schedule,
)

from pipelines.deteccao_alagamento_cameras.flooding_detection.tasks import (
get_last_update,
get_api_key,
get_prediction,
get_snapshot,
pick_cameras,
update_flooding_api_data,
)
from prefeitura_rio.pipelines_utils.custom import Flow
from pipelines.deteccao_alagamento_cameras.flooding_detection.tasks import get_api_key
from pipelines.deteccao_alagamento_cameras.flooding_detection.tasks import get_last_update
from pipelines.deteccao_alagamento_cameras.flooding_detection.tasks import get_prediction
from pipelines.deteccao_alagamento_cameras.flooding_detection.tasks import get_snapshot
from pipelines.deteccao_alagamento_cameras.flooding_detection.tasks import pick_cameras
from pipelines.deteccao_alagamento_cameras.flooding_detection.tasks import update_flooding_api_data

# Flow

Expand All @@ -43,12 +40,8 @@
"mocked_cameras_number",
default=0,
)
openai_api_max_tokens = Parameter("openai_api_max_tokens", default=300)
openai_api_model = Parameter("openai_api_model", default="gpt-4-vision-preview")
openai_api_url = Parameter(
"openai_api_url",
default="https://api.openai.com/v1/chat/completions",
)
google_api_max_output_tokens = Parameter("google_api_max_output_tokens", default=300)
google_api_model = Parameter("google_api_model", default="gemini-pro-vision")
api_key_secret_path = Parameter("api_key_secret_path", required=True)
rain_api_data_url = Parameter(
"rain_api_url",
Expand Down Expand Up @@ -84,10 +77,9 @@
)
cameras_with_image_and_classification = get_prediction.map(
camera_with_image=cameras_with_image,
openai_api_model=unmapped(openai_api_model),
api_key=unmapped(api_key),
openai_api_max_tokens=unmapped(openai_api_max_tokens),
openai_api_url=unmapped(openai_api_url),
google_api_key=unmapped(api_key),
google_api_model=unmapped(google_api_model),
google_api_max_output_tokens=unmapped(google_api_max_output_tokens),
)
update_flooding_api_data(
cameras_with_image_and_classification=cameras_with_image_and_classification,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,8 @@
"cameras_geodf_url": "https://docs.google.com/spreadsheets/d/122uOaPr8YdW5PTzrxSPF-FD0tgco596HqgB7WK7cHFw/edit#gid=1580662721", # noqa
"mocked_cameras_number": 0,
"api_key_secret_path": "/flooding-detection",
"openai_api_max_tokens": 300,
"openai_api_model": "gpt-4-vision-preview",
"openai_api_url": "https://api.openai.com/v1/chat/completions",
"google_api_max_output_tokens": 300,
"google_api_model": "gemini-pro-vision",
"rain_api_update_url": "https://api.dados.rio/v2/clima_pluviometro/ultima_atualizacao_precipitacao_15min/", # noqa
"rain_api_url": "https://api.dados.rio/v2/clima_pluviometro/precipitacao_15min/",
"redis_key_flooding_detection_data": "flooding_detection_data",
Expand Down
106 changes: 49 additions & 57 deletions pipelines/deteccao_alagamento_cameras/flooding_detection/tasks.py
Original file line number Diff line number Diff line change
@@ -1,32 +1,33 @@
# -*- coding: utf-8 -*-
import base64
from datetime import datetime, timedelta
import io
import json
from pathlib import Path
import random
from typing import Dict, List, Tuple, Union
from datetime import datetime
from datetime import timedelta
from pathlib import Path
from typing import Dict
from typing import List
from typing import Union

import cv2
import geopandas as gpd
import google.generativeai as genai
import numpy as np
import pandas as pd
import pendulum
import requests
from PIL import Image
from prefect import task
import requests
from prefeitura_rio.pipelines_utils.infisical import get_secret
from prefeitura_rio.pipelines_utils.logging import log
from prefeitura_rio.pipelines_utils.redis_pal import get_redis_client
from shapely.geometry import Point

from pipelines.deteccao_alagamento_cameras.flooding_detection.utils import (
download_file,
redis_add_to_prediction_buffer,
redis_get_prediction_buffer,
)

from pipelines.deteccao_alagamento_cameras.flooding_detection.utils import download_file
from pipelines.deteccao_alagamento_cameras.flooding_detection.utils import redis_add_to_prediction_buffer
from pipelines.deteccao_alagamento_cameras.flooding_detection.utils import redis_get_prediction_buffer
# get_vault_secret
from prefeitura_rio.pipelines_utils.infisical import get_secret
from prefeitura_rio.pipelines_utils.redis_pal import get_redis_client
from prefeitura_rio.pipelines_utils.logging import log


@task
Expand Down Expand Up @@ -69,13 +70,12 @@ def get_api_key(secret_path: str, secret_name: str = "GEMINI-PRO-VISION-API-KEY"
def get_prediction(
camera_with_image: Dict[str, Union[str, float]],
flooding_prompt: str,
api_key: str,
openai_api_model: str,
openai_api_max_tokens: int = 300,
openai_api_url: str = "https://api.openai.com/v1/chat/completions",
google_api_key: str,
google_api_model: str,
google_api_max_output_tokens: int = 300,
) -> Dict[str, Union[str, float, bool]]:
"""
Gets the flooding detection prediction from OpenAI API.
Gets the flooding detection prediction from Google Gemini API.
Args:
camera_with_image: The camera with image in the following format:
Expand All @@ -88,10 +88,10 @@ def get_prediction(
"attempt_classification": True,
}
flooding_prompt: The flooding prompt.
openai_api_key: The OpenAI API key.
openai_api_model: The OpenAI API model.
openai_api_max_tokens: The OpenAI API max tokens.
openai_api_url: The OpenAI API URL.
google_api_key: The OpenAI API key.
google_api_model: The OpenAI API model.
google_api_max_tokens: The OpenAI API max tokens.
google_api_url: The OpenAI API URL.
Returns: The camera with image and classification in the following format:
{
Expand Down Expand Up @@ -130,49 +130,41 @@ def get_prediction(
}
]
return camera_with_image
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}",
}
payload = {
"model": openai_api_model,
"messages": [
{
"role": "user",
"content": [
{
"type": "text",
"text": flooding_prompt,
},
{
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{camera_with_image['image_base64']}"
},
},
],
}
],
"max_tokens": openai_api_max_tokens,
}
response = requests.post(openai_api_url, headers=headers, json=payload)
data: dict = response.json()
if data.get("error"):
flooding_detected = None
log(f"Failed to get prediction: {data['error']}")
else:
content: str = data["choices"][0]["message"]["content"]
json_string = content.replace("```json\n", "").replace("\n```", "")
json_object = json.loads(json_string)
flooding_detected = json_object["flooding_detected"]

flooding_detected = None

try:
genai.configure(api_key=google_api_key)
model = genai.GenerativeModel(google_api_model)
responses = model.generate_content(
contents=[flooding_prompt, camera_with_image["image_base64"]],
generation_config={
"max_output_tokens": google_api_max_output_tokens,
"temperature": 0.4,
"top_p": 1,
"top_k": 32
},
stream=True,
)

responses.resolve()

json_string = responses.text.replace("```json\n", "").replace("\n```", "")
flooding_detected = json.loads(json_string)["flooding_detected"]

log(f"Successfully got prediction: {flooding_detected}")

except Exception as e:
log(f"Failed to get prediction: {e}")

camera_with_image["ai_classification"] = [
{
"object": "alagamento",
"label": flooding_detected,
"confidence": 0.7,
}
]

return camera_with_image


Expand Down

0 comments on commit 70a6135

Please sign in to comment.