From d3405f61edb3a3b03b811e4030c610c7e264e5cf Mon Sep 17 00:00:00 2001 From: yanfeng-li Date: Tue, 31 May 2022 17:21:18 +0800 Subject: [PATCH] [Docs] add some case --- .../huggingface/pipeline/nlp/bert/index.md | 2 +- mkdocs.yml | 50 +++++++++- pinferencia/frontend/templates/raw_request.py | 3 +- .../frontend/templates/url_image_to_image.py | 45 +++++++++ .../frontend/templates/url_image_to_text.py | 43 +++++++++ pinferencia/task.py | 4 + pinferencia/tools.py | 16 ++++ tests/conftest.py | 6 ++ .../template/test_url_image_to_image.py | 94 +++++++++++++++++++ .../template/test_url_image_to_text.py | 89 ++++++++++++++++++ tests/unittest/test_tools.py | 24 +++++ 11 files changed, 372 insertions(+), 4 deletions(-) create mode 100644 pinferencia/frontend/templates/url_image_to_image.py create mode 100644 pinferencia/frontend/templates/url_image_to_text.py create mode 100644 tests/unittest/test_frontend/template/test_url_image_to_image.py create mode 100644 tests/unittest/test_frontend/template/test_url_image_to_text.py diff --git a/docs/how-to-guides/huggingface/pipeline/nlp/bert/index.md b/docs/how-to-guides/huggingface/pipeline/nlp/bert/index.md index 0f022915..32ee5954 100644 --- a/docs/how-to-guides/huggingface/pipeline/nlp/bert/index.md +++ b/docs/how-to-guides/huggingface/pipeline/nlp/bert/index.md @@ -93,7 +93,7 @@ Run the service, and wait for it to load the model and start the server: ### Test the service -=== "Frontend" +=== "UI" Open http://127.0.0.1:8501, and the template `Text to Text` will be selected automatically. diff --git a/mkdocs.yml b/mkdocs.yml index f1cff09f..39b9e3e8 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -90,8 +90,10 @@ plugins: # how-to guides How-to Guides: 实战指南 - Use Pinferencia with HuggingFace: 上线 HuggingFace 模型 + Use Pinferencia with HuggingFace: 上线HuggingFace模型 + Use Pinferencia with PaddlePaddle: 上线PaddlePaddle模型 HuggingFace: HuggingFace + PaddlePaddle: PaddlePaddle NLP Tasks: 自然语言处理 Install Dependencies: 安装依赖 @@ -105,6 +107,20 @@ plugins: Define Request and Response Schema: 定义请求和响应 Schema Custom Templates: 自定义模版 Custom Frontend: 自定义前端 + Image Tasks: 图像任务 + Image Generation: 图像生成 + Face Detection: 人脸检测 + Object Detection: 对象检测 + Semantic Segmentation: 语义分割 + Text Recognition: 文字识别 + + Text Tasks: 文本任务 + Emotion Detection Textcnn: 情绪分析 + Lexical analysis: 语法分析 + Simultaneous Translation: 同声传译 + Text Generation: 文字生成 + + Define Request and Response Schema: 定义请求和响应Schema # reference Reference: 参考文档 @@ -139,8 +155,10 @@ plugins: # how-to guides How-to Guides: 实战指南 - Use Pinferencia with HuggingFace: 上线 HuggingFace 模型 + Use Pinferencia with HuggingFace: 上线HuggingFace模型 + Use Pinferencia with PaddlePaddle: 上线PaddlePaddle模型 HuggingFace: HuggingFace + PaddlePaddle: PaddlePaddle NLP Tasks: 自然语言处理 Install Dependencies: 安装依赖 @@ -154,6 +172,20 @@ plugins: Define Request and Response Schema: 定义请求和响应 Schema Custom Templates: 自定义模版 Custom Frontend: 自定义前端 + Image Tasks: 图像任务 + Image Generation: 图像生成 + Face Detection: 人脸检测 + Object Detection: 对象检测 + Semantic Segmentation: 语义分割 + Text Recognition: 文字识别 + + Text Tasks: 文本任务 + Emotion Detection Textcnn: 情绪分析 + Lexical analysis: 语法分析 + Simultaneous Translation: 同声传译 + Text Generation: 文字生成 + + Define Request and Response Schema: 定义请求和响应Schema # reference Reference: 参考文档 @@ -234,6 +266,20 @@ nav: - Translation - Google T5: how-to-guides/huggingface/pipeline/nlp/translation/index.md - Vision Tasks: - Image Classification: how-to-guides/huggingface/pipeline/vision/index.md + - Use Pinferencia with PaddlePaddle: + - Install Dependencies: how-to-guides/paddlepaddle/dependencies/index.md + - Image Tasks: + - Image Classification: how-to-guides/paddlepaddle/modules/image/image_classification/index.md + - Image Generation: how-to-guides/paddlepaddle/modules/image/image_generation/index.md + - Face Detection: how-to-guides/paddlepaddle/modules/image/face_detection/index.md + # - Object Detection: how-to-guides/paddlepaddle/modules/image/object_detection/index.md + - Semantic Segmentation: how-to-guides/paddlepaddle/modules/image/semantic_segmentation/index.md + # - Text Recognition: how-to-guides/paddlepaddle/modules/image/text_recognition/index.md + - Text Tasks: + - Emotion Detection Textcnn: how-to-guides/paddlepaddle/modules/text/emotion_detection_textcnn/index.md + - Lexical analysis: how-to-guides/paddlepaddle/modules/text/lexical_analysis/index.md + # - Simultaneous Translation: how-to-guides/paddlepaddle/modules/text/simultaneous_translation/index.md + - Text Generation: how-to-guides/paddlepaddle/modules/text/text_generation/index.md - Define Request and Response Schema: how-to-guides/schema/index.md - Custom Templates: how-to-guides/custom-templates/index.md - Custom Frontend: how-to-guides/custom-frontend/index.md diff --git a/pinferencia/frontend/templates/raw_request.py b/pinferencia/frontend/templates/raw_request.py index 0a8f57a6..70437176 100644 --- a/pinferencia/frontend/templates/raw_request.py +++ b/pinferencia/frontend/templates/raw_request.py @@ -24,4 +24,5 @@ def render(self): with st.spinner("Wait for result"): prediction = self.predict(json.loads(raw_text), parse_data=False) st.write("Response") - st.json(prediction) + if prediction: + st.json(prediction) diff --git a/pinferencia/frontend/templates/url_image_to_image.py b/pinferencia/frontend/templates/url_image_to_image.py new file mode 100644 index 00000000..cfb3bc4a --- /dev/null +++ b/pinferencia/frontend/templates/url_image_to_image.py @@ -0,0 +1,45 @@ +import base64 +from io import BytesIO + +import streamlit as st +from PIL import Image + +from .base import BaseTemplate + + +class Template(BaseTemplate): + title = ( + 'Image ' + 'to ' + 'Image' + ) + + def render(self): + """Render the Streamlit Frontend""" + super().render() + + with st.form("Image Upload", clear_on_submit=True): + uploaded_file = st.file_uploader( + "Choose an image...", type=["jpg", "png", "jpeg"] + ) + st.form_submit_button("Upload and Run") + + col1, col2 = st.columns(2) + col1.markdown( + '

Input

', + unsafe_allow_html=True, + ) + col2.markdown( + '

Result

', + unsafe_allow_html=True, + ) + if uploaded_file is not None: + image = Image.open(uploaded_file) + col1.image(image, use_column_width=True) + base64_img_str = base64.b64encode(uploaded_file.getvalue()).decode() + with st.spinner("Waiting for result"): + prediction = self.auto_predict({"base64_img_str": base64_img_str}) + if isinstance(prediction, list) and prediction: + prediction = prediction[0] + result_image = Image.open(BytesIO(base64.b64decode(prediction))) + col2.image(result_image) diff --git a/pinferencia/frontend/templates/url_image_to_text.py b/pinferencia/frontend/templates/url_image_to_text.py new file mode 100644 index 00000000..69a69c35 --- /dev/null +++ b/pinferencia/frontend/templates/url_image_to_text.py @@ -0,0 +1,43 @@ +import base64 +import json + +import streamlit as st +from PIL import Image + +from .base import BaseTemplate +from .utils import display_text_prediction + + +class Template(BaseTemplate): + title = ( + 'Image ' + 'to ' + 'Text' + ) + + def render(self): + super().render() + with st.form("Image Upload", clear_on_submit=True): + uploaded_file = st.file_uploader( + "Choose an image...", type=["jpg", "png", "jpeg"] + ) + st.form_submit_button("Upload and Run") + + col1, col2 = st.columns(2) + col1.markdown( + '

Input

', + unsafe_allow_html=True, + ) + col2.markdown( + '

Result

', + unsafe_allow_html=True, + ) + if uploaded_file is not None: + image = Image.open(uploaded_file) + col1.image(image, use_column_width=True) + base64_img_str = base64.b64encode(uploaded_file.getvalue()).decode() + with st.spinner("Waiting for result"): + prediction = self.auto_predict({"base64_img_str": base64_img_str}) + if isinstance(prediction, dict): + prediction = json.dumps(prediction) + display_text_prediction(prediction, component=col2) diff --git a/pinferencia/task.py b/pinferencia/task.py index eb05b325..a1700f76 100644 --- a/pinferencia/task.py +++ b/pinferencia/task.py @@ -6,6 +6,8 @@ IMAGE_TO_IMAGE = "image_to_image" TEXT_TO_TEXT = "text_to_text" TEXT_TO_IMAGE = "text_to_image" +URL_IMAGE_TO_TEXT = "url_image_to_text" +URL_IMAGE_TO_IMAGE = "url_image_to_image" # Specific Task TRANSLATION = "translation" @@ -25,4 +27,6 @@ IMAGE_TO_IMAGE, TEXT_TO_TEXT, TEXT_TO_IMAGE, + URL_IMAGE_TO_TEXT, + URL_IMAGE_TO_IMAGE, ) diff --git a/pinferencia/tools.py b/pinferencia/tools.py index 7d168d7c..c90d22f1 100644 --- a/pinferencia/tools.py +++ b/pinferencia/tools.py @@ -1,4 +1,10 @@ import base64 + +try: + import cv2 +except ImportError: + pass +import numpy as np from io import BytesIO from PIL import Image @@ -12,3 +18,13 @@ def pil_image_to_base64_str(image: Image) -> str: def base64_str_to_pil_image(base64_str: str) -> Image: return Image.open(BytesIO(base64.b64decode(base64_str))) + + +def base64_str_to_cv2(base64_str: str) -> np.ndarray: + return cv2.imdecode( + np.fromstring(base64.b64decode(base64_str), np.uint8), cv2.IMREAD_COLOR + ) + + +def cv2_to_base64_str(image: np.ndarray) -> str: + return base64.b64encode(cv2.imencode(".jpg", image)[1].tostring()) diff --git a/tests/conftest.py b/tests/conftest.py index e572bd63..b59389bb 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,4 +1,5 @@ import pytest +import numpy as np @pytest.fixture(scope="session") @@ -56,3 +57,8 @@ def sum_product_model_metadata(): @pytest.fixture def image_base64_string(): return "iVBORw0KGgoAAAANSUhEUgAAABwAAAAcCAIAAAD9b0jDAAADEUlEQVR4nK1WzUrrQBQ+M9NJ0qRJCg1WiBu7cKeCSxGRbrp051bQrQu34gv4BvoEPkhfoNCFKELBhQhaa2z6l99zF+cyNyDW9t77LcJM5uSb78z5mQDMBWMMAKSUmqYBgK7rmqZxzgFAPaWU80n+wDRNGpTLZRqsrKyoVSGEMhBClEqlRXkVHMehgeu6QgjDMIp+GIZBqn+GEAIApJRKhfqSuGgVACqVyt8IBADLsopTwzCU1+p8FgJjLAzDIAiU2N3d3dPTUyJVZrquL8pomqYQYjgc9no9APA8DwAeHx9fXl5s21ZmpVKJMaYO5GdcXFwg4uvrK023t7eHwyEi0pRzrhwvCp8HIUStVkPE2Wy2s7Pjuq7rur1eL03Tk5MTstF13bIsKITu937fkSLi+/v729ubpmm+739+foZhmOc557zVakkpLcuKomg8HsPiCSCEOD4+TpLk/v6eQsEY63Q6cRx/fHwodY7jfK2ob5VmWVav10ul0vX1NdWoaZqu6zLGOOeGYdBOURR9jdK88trf359MJp1OJwzDarUaBAGdwM3NzWw2I2+iKCLVKoDzlFYqlUajYZpmu90GgCAIbNu2bVvTtDRNm82m53m6rlNKFRnnodVqIWK/37+8vDw4ONjY2PA87+7uDhGjKMrz/PDwkCyllEs0qsFgkCRJFEWIiIjdbhcR0zTNsgwR9/b2AKBcLi9RUQBwdHTU7XaJNAxDok7TtN1ub21tkQ3FbYmKAoB6ve77/tra2vr6+vn5+dPTUxzHV1dX1LGURmppP0Pls1Lh+/7DwwMinp2d0Rsqp6/4Nvqj0QgAOOeqxT0/P0+nUwCQUlarVcaYSqxFSQHANE06xDRNydNarZYkCWNsPB4jYpZlS5NOJhNKQCklJfl0OhVCrK6uJkkCAORBHMeLNhTHcVRvVtSj0Yhz3mw26dbL87xosCioAZODhmHc3t7GcYyIm5ubtKUQgnO+6MX3HQaDQb/fV1P1P/BPpADQaDQAwHEc9WPxf1CMOKVdcXXpfXzfh8KlpGmaEIJyS+EXXIc7GO0j1jYAAAAASUVORK5CYII=" # noqa + + +@pytest.fixture +def image_np_ndarray(): + return np.array([[1, 2], [3, 4]]) diff --git a/tests/unittest/test_frontend/template/test_url_image_to_image.py b/tests/unittest/test_frontend/template/test_url_image_to_image.py new file mode 100644 index 00000000..326d6184 --- /dev/null +++ b/tests/unittest/test_frontend/template/test_url_image_to_image.py @@ -0,0 +1,94 @@ +from unittest.mock import MagicMock, Mock + +import pytest +from PIL import Image + +from pinferencia.frontend.templates.url_image_to_image import Template, st + + +@pytest.mark.parametrize("upload", [True, False]) +@pytest.mark.parametrize( + "return_value_and_display_type", + [ + (["use image_base64_string"], "image"), + ], +) +@pytest.mark.parametrize( + "metadata", + [ + {}, + {"input_type": "str"}, + {"output_type": "str"}, + {"input_type": "list", "output_type": "str"}, + {"input_type": "str", "output_type": "list"}, + ], +) +def test_render( + upload, + return_value_and_display_type, + metadata, + image_base64_string, + image_byte, + monkeypatch, +): + return_value, display_type = return_value_and_display_type + if display_type == "image": + return_value = [image_base64_string] + + # magic mock for general purpose mock + st_mock = MagicMock() + + # mock the file uploader, return a byte object if upload is true + file_uploader_mock = Mock(return_value=image_byte if upload else None) + monkeypatch.setattr(st, "file_uploader", file_uploader_mock) + + # mock the upload form + monkeypatch.setattr(st, "form", st_mock.form) + monkeypatch.setattr(st, "form_submit_button", st_mock.form_submit_button) + + # mock the two columns + col1_mock = MagicMock() + col2_mock = MagicMock() + mock_colums = Mock(return_value=(col1_mock, col2_mock)) + monkeypatch.setattr(st, "columns", mock_colums) + + # general purpose mock + monkeypatch.setattr(st, "spinner", st_mock.spinner) + + # mock the model manager + model_manager = MagicMock() + model_manager.predict = Mock(return_value=return_value) + + # initialize and render the template + tmpl = Template(model_name="test", metadata=metadata, model_manager=model_manager) + tmpl.render() + + # assert file uploader is correctly called + assert file_uploader_mock.call_count == 1 + assert file_uploader_mock.call_args[1]["type"] == ["jpg", "png", "jpeg"] + + # assert spinner is called if upload is true + assert st_mock.spinner.call_count == (1 if upload else 0) + + # assert the correct method is called to display the result + assert col2_mock.image.call_count == (1 if upload else 0) + if col2_mock.image.called: + call_arg = col2_mock.image.call_args[0][0] + assert call_arg == Image.open(image_byte) + + # assert the model manager's predict is correctly called + assert model_manager.predict.call_count == (1 if upload else 0) + if model_manager.predict.called: + assert ( + model_manager.predict.call_args[1]["data"] + == [{"base64_img_str": image_base64_string}] + if metadata.get("input_type") == "list" + else image_base64_string + ) + assert model_manager.predict.call_args[1]["model_name"] == "test" + assert model_manager.predict.call_args[1]["version_name"] is None + + +def test_title(): + assert "image" in Template.title.lower() + assert "text" not in Template.title.lower() diff --git a/tests/unittest/test_frontend/template/test_url_image_to_text.py b/tests/unittest/test_frontend/template/test_url_image_to_text.py new file mode 100644 index 00000000..7d3f42bc --- /dev/null +++ b/tests/unittest/test_frontend/template/test_url_image_to_text.py @@ -0,0 +1,89 @@ +from unittest.mock import MagicMock, Mock + +import pytest + +from pinferencia.frontend.templates.url_image_to_text import Template, st + + +@pytest.mark.parametrize("upload", [True, False]) +@pytest.mark.parametrize( + "return_value_and_display_type", + [ + ("abc", "info"), + (["abc"], "info"), + ({"name": "abc"}, "info"), + ([[1, 2, 3]], "json"), + ([[{"a": 1, "b": 2}, {"a": 3, "b": 4}]], "table"), + ], +) +def test_render( + upload, + return_value_and_display_type, + image_base64_string, + image_byte, + monkeypatch, +): + return_value, display_type = return_value_and_display_type + + # magic mock for general purpose mock + st_mock = MagicMock() + + # mock the file uploader, return a byte object if upload is true + file_uploader_mock = Mock(return_value=image_byte if upload else None) + monkeypatch.setattr(st, "file_uploader", file_uploader_mock) + + # mock the upload form + monkeypatch.setattr(st, "form", st_mock.form) + monkeypatch.setattr(st, "form_submit_button", st_mock.form_submit_button) + + # mock the two columns + col1_mock = MagicMock() + col2_mock = MagicMock() + mock_colums = Mock(return_value=(col1_mock, col2_mock)) + monkeypatch.setattr(st, "columns", mock_colums) + + # general purpose mock + monkeypatch.setattr(st, "spinner", st_mock.spinner) + + # mock the model manager + model_manager = MagicMock() + model_manager.predict = Mock(return_value=return_value) + + # initialize and render the template + tmpl = Template(model_name="test", model_manager=model_manager) + tmpl.render() + + # assert file uploader is correctly called + assert file_uploader_mock.call_count == 1 + assert file_uploader_mock.call_args[1]["type"] == ["jpg", "png", "jpeg"] + + # assert spinner is called if upload is true + assert st_mock.spinner.call_count == (1 if upload else 0) + + # assert the correct method is called to display the result + for field in ("json", "info", "table"): + module = getattr(col2_mock, field) + assert module.call_count == (1 if field == display_type and upload else 0) + if module.called: + call_arg = module.call_args[0][0] + if field == "table": + call_arg = call_arg.to_dict("records") + assert ( + call_arg == return_value[0] + if isinstance(return_value, list) + else return_value + ) + + # assert the model manager's predict is correctly called + assert model_manager.predict.call_count == (1 if upload else 0) + if model_manager.predict.called: + assert model_manager.predict.call_args[1]["data"] == [ + {"base64_img_str": image_base64_string} + ] + assert model_manager.predict.call_args[1]["model_name"] == "test" + assert model_manager.predict.call_args[1]["version_name"] is None + + +def test_title(): + assert "image" in Template.title.lower() + assert "text" in Template.title.lower() diff --git a/tests/unittest/test_tools.py b/tests/unittest/test_tools.py index 8be4215b..af9f3813 100644 --- a/tests/unittest/test_tools.py +++ b/tests/unittest/test_tools.py @@ -1,6 +1,30 @@ from pinferencia.tools import base64_str_to_pil_image, pil_image_to_base64_str +from pinferencia import tools def test_image_convesion(image_base64_string): image = base64_str_to_pil_image(image_base64_string) assert image_base64_string == pil_image_to_base64_str(image) + + +def test_base64_str_to_cv2(image_base64_string, image_np_ndarray, monkeypatch): + class CV2: + + IMREAD_COLOR = "default" + + def imdecode(*args, **kwargs): + return image_np_ndarray + + setattr(tools, "cv2", CV2()) + + assert tools.base64_str_to_cv2(image_base64_string).any() + + +def test_cv2_to_base64_str(image_np_ndarray): + class CV2: + def imencode(*args, **kwargs): + return image_np_ndarray + + setattr(tools, "cv2", CV2()) + + assert tools.cv2_to_base64_str(image_np_ndarray)