|
| 1 | +import streamlit as st |
| 2 | +import matplotlib.pyplot as plt |
| 3 | +import cv2 |
| 4 | +import numpy as np |
| 5 | +import pandas as pd |
| 6 | + |
| 7 | +def add_yolo(): |
| 8 | + |
| 9 | + st.title("Object Detection") |
| 10 | + st.write("Object Detection is a field which consists in identifying objects in an image or a video feed. This task involves convolutional neural networks (CNNs), a special type of deep learning architecture. The algorithm presented below is YOLO (You Only Look Once), a state-of-the-art algorithm trained to identify thousands of object types.") |
| 11 | + # This sidebar UI lets the user select parameters for the YOLO object detector. |
| 12 | + def object_detector_ui(): |
| 13 | + st.sidebar.markdown("# Model") |
| 14 | + confidence_threshold = st.sidebar.slider("Confidence threshold", 0.0, 1.0, 0.5, 0.01) |
| 15 | + return confidence_threshold #overlap_threshold |
| 16 | + |
| 17 | + # Draws an image with boxes overlayed to indicate the presence of cars, pedestrians etc. |
| 18 | + def draw_image_with_boxes(image, boxes): |
| 19 | + LABEL_COLORS = [0, 255, 0] |
| 20 | + image_with_boxes = image.astype(np.float64) |
| 21 | + for _, (xmin, ymin, xmax, ymax) in boxes.iterrows(): |
| 22 | + image_with_boxes[int(ymin):int(ymax),int(xmin):int(xmax),:] += LABEL_COLORS |
| 23 | + image_with_boxes[int(ymin):int(ymax),int(xmin):int(xmax),:] /= 2 |
| 24 | + |
| 25 | + st.image(image_with_boxes.astype(np.uint8), use_column_width=True) |
| 26 | + |
| 27 | + @st.cache(show_spinner=False) |
| 28 | + def load_present_image(img): |
| 29 | + image = cv2.imread(img, cv2.IMREAD_COLOR) |
| 30 | + image = image[:, :, [2, 1, 0]] # BGR -> RGB |
| 31 | + return image |
| 32 | + |
| 33 | + def yolo_v3(image, confidence_threshold=0.5, overlap_threshold=0.3): |
| 34 | + #@st.cache()allow_output_mutation=True |
| 35 | + def load_network(config_path, weights_path): |
| 36 | + net = cv2.dnn.readNetFromDarknet(config_path, weights_path) |
| 37 | + output_layer_names = net.getLayerNames() |
| 38 | + output_layer_names = [output_layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()] |
| 39 | + return net, output_layer_names |
| 40 | + |
| 41 | + net, output_layer_names = load_network("yolov3/yolov3.cfg", "yolov3.weights") |
| 42 | + |
| 43 | + blob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416), swapRB=True, crop=False) |
| 44 | + net.setInput(blob) |
| 45 | + layer_outputs = net.forward(output_layer_names) |
| 46 | + |
| 47 | + boxes, confidences, class_IDs = [], [], [] |
| 48 | + H, W = image.shape[:2] |
| 49 | + |
| 50 | + for output in layer_outputs: |
| 51 | + for detection in output: |
| 52 | + scores = detection[5:] |
| 53 | + classID = np.argmax(scores) |
| 54 | + confidence = scores[classID] |
| 55 | + if confidence > confidence_threshold: |
| 56 | + box = detection[0:4] * np.array([W, H, W, H]) |
| 57 | + centerX, centerY, width, height = box.astype("int") |
| 58 | + x, y = int(centerX - (width / 2)), int(centerY - (height / 2)) |
| 59 | + boxes.append([x, y, int(width), int(height)]) |
| 60 | + confidences.append(float(confidence)) |
| 61 | + class_IDs.append(classID) |
| 62 | + |
| 63 | + f = open("yolov3/classes.txt", "r") |
| 64 | + f = f.readlines() |
| 65 | + f = [line.rstrip('\n') for line in list(f)] |
| 66 | + |
| 67 | + try: |
| 68 | + st.subheader("Detected objects: " + ', '.join(list(set([f[obj] for obj in class_IDs])))) |
| 69 | + except IndexError: |
| 70 | + st.write("Nothing detected") |
| 71 | + |
| 72 | + indices = cv2.dnn.NMSBoxes(boxes, confidences, confidence_threshold, overlap_threshold) |
| 73 | + |
| 74 | + xmin, xmax, ymin, ymax, labels = [], [], [], [], [] |
| 75 | + if len(indices) > 0: |
| 76 | + |
| 77 | + for i in indices.flatten(): |
| 78 | + |
| 79 | + x, y, w, h = boxes[i][0], boxes[i][1], boxes[i][2], boxes[i][3] |
| 80 | + xmin.append(x) |
| 81 | + ymin.append(y) |
| 82 | + xmax.append(x+w) |
| 83 | + ymax.append(y+h) |
| 84 | + |
| 85 | + boxes = pd.DataFrame({"xmin": xmin, "ymin": ymin, "xmax": xmax, "ymax": ymax}) |
| 86 | + return boxes[["xmin", "ymin", "xmax", "ymax"]] |
| 87 | + |
| 88 | + confidence_threshold = object_detector_ui() |
| 89 | + img_type = st.sidebar.selectbox("Select image type?", ['Cars', 'People', 'Animals', "Meeting"]) |
| 90 | + |
| 91 | + if img_type == 'People': |
| 92 | + image_url = "images/Group.jpg" |
| 93 | + elif img_type == 'Cars': |
| 94 | + image_url = "images/cars.jpg" |
| 95 | + elif img_type == 'Animals': |
| 96 | + image_url = "images/animal.jpg" |
| 97 | + elif img_type == 'Meeting': |
| 98 | + image_url = "images/Men.jpg" |
| 99 | + |
| 100 | + image = load_present_image(image_url) |
| 101 | + |
| 102 | + # Get the boxes for the objects detected by YOLO by running the YOLO model. |
| 103 | + yolo_boxes = yolo_v3(image, confidence_threshold) |
| 104 | + draw_image_with_boxes(image, yolo_boxes) |
0 commit comments