Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Adding polygon detection alongside bounding boxes #6506

Closed
wants to merge 47 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
47 commits
Select commit Hold shift + click to select a range
202e62e
adding polygon yolo
Jan 25, 2022
ca9d901
last changes
Jan 27, 2022
6ad3f30
more cleaning
Jan 27, 2022
2dd00f1
added polygon_train bool to parse model
Jan 27, 2022
2d61899
modifying detect for yolo and polygon
Jan 28, 2022
9e8f8ed
polygon plot results
Jan 28, 2022
1828e48
dataset fix
Jan 31, 2022
6fb367d
merging latest yolo commits
Jan 31, 2022
2754448
Delete lpr.yaml
ahmad4633 Jan 31, 2022
6be048b
cleaning
Jan 31, 2022
218a731
last changes
Feb 2, 2022
38d936c
Merge remote-tracking branch 'origin/master' into test/polygon_yolo
Feb 2, 2022
3e16e13
fixing polygon_create_dataloader
Feb 2, 2022
3e974f2
fixing plot_results
Feb 2, 2022
bb7026a
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Feb 2, 2022
ce3527b
Update requirements.txt
ahmad4633 Feb 2, 2022
fc6aa18
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Feb 2, 2022
6f10192
Update detect.py
ahmad4633 Feb 2, 2022
183e2d3
fixing indents
ahmad4633 Feb 4, 2022
66a53f0
Merge remote-tracking branch 'origin/master' into test/polygon_yolo
ahmad4633 Feb 4, 2022
3f30d43
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Feb 4, 2022
7d077d5
pre-commit.ci
ahmad4633 Feb 4, 2022
b0282f9
Merge branch 'test/polygon_yolo' of https://github.com/ahmad4633/yolo…
ahmad4633 Feb 4, 2022
7de5778
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Feb 4, 2022
bdb0994
solving conflicts
ahmad4633 Feb 5, 2022
03235fb
Merge branch 'test/polygon_yolo' of https://github.com/ahmad4633/yolo…
ahmad4633 Feb 5, 2022
8385766
Merge branch 'master' into test/polygon_yolo
ahmad4633 Feb 5, 2022
957cf53
fixing ppe8
ahmad4633 Feb 7, 2022
4de79d1
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Feb 7, 2022
f6ef69d
fixing pep8
ahmad4633 Feb 7, 2022
66a5401
Merge branch 'test/polygon_yolo' of https://github.com/ahmad4633/yolo…
ahmad4633 Feb 7, 2022
f832fa3
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Feb 7, 2022
c114834
pep8 fixes
ahmad4633 Feb 7, 2022
80a5afd
Merge branch 'test/polygon_yolo' of https://github.com/ahmad4633/yolo…
ahmad4633 Feb 7, 2022
f52af58
pep8 fixes
ahmad4633 Feb 7, 2022
e15e5ad
plots_polygon.py:167: [F841] local variable 'c' is assigned to but ne…
ahmad4633 Feb 7, 2022
a6f385c
pep8 fix indents
ahmad4633 Feb 7, 2022
6b8e102
Update setup.py
ahmad4633 Feb 7, 2022
607bfec
fixing export
ahmad4633 Feb 14, 2022
85fcc88
Merge branch 'master' into test/polygon_yolo
ahmad4633 Feb 14, 2022
ded4625
Merge branch 'master' into test/polygon_yolo
ahmad4633 Feb 14, 2022
1101845
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Feb 14, 2022
4dd66aa
pre commit fix
ahmad4633 Feb 14, 2022
e50fb5a
Merge branch 'test/polygon_yolo' of https://github.com/ahmad4633/yolo…
ahmad4633 Feb 14, 2022
410a8d2
fixed name 'MixConv2d' is not defined
ahmad4633 Feb 15, 2022
5cc70b6
adding polygon_load_image
ahmad4633 Feb 15, 2022
efb59a4
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Feb 15, 2022
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
71 changes: 56 additions & 15 deletions detect.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,21 +29,23 @@
import sys
from pathlib import Path

import cv2
import torch
import torch.backends.cudnn as cudnn

FILE = Path(__file__).resolve()
ROOT = FILE.parents[0] # YOLOv5 root directory
if str(ROOT) not in sys.path:
sys.path.append(str(ROOT)) # add ROOT to PATH
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative

import cv2
import torch
import torch.backends.cudnn as cudnn

from models.common import DetectMultiBackend
from models.experimental import attempt_load
from utils.datasets import IMG_FORMATS, VID_FORMATS, LoadImages, LoadStreams
from utils.general import (LOGGER, check_file, check_img_size, check_imshow, check_requirements, colorstr,
increment_path, non_max_suppression, print_args, scale_coords, strip_optimizer, xyxy2xywh)
from utils.plots import Annotator, colors, save_one_box
from utils.general_polygon import (LOGGER, check_file, check_img_size, check_imshow, check_requirements, colorstr,
increment_path, non_max_suppression, polygon_non_max_suppression,
polygon_scale_coords, print_args, scale_coords, strip_optimizer, xyxy2xywh)
from utils.plots_polygon import Annotator, colors, polygon_plot_one_box, save_one_box
from utils.torch_utils import select_device, time_sync


Expand All @@ -55,7 +57,7 @@ def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s)
conf_thres=0.25, # confidence threshold
iou_thres=0.45, # NMS IOU threshold
max_det=1000, # maximum detections per image
device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
view_img=False, # show results
save_txt=False, # save results to *.txt
save_conf=False, # save confidences in --save-txt labels
Expand All @@ -74,7 +76,7 @@ def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s)
hide_conf=False, # hide confidences
half=False, # use FP16 half-precision inference
dnn=False, # use OpenCV DNN for ONNX inference
):
polygon=False):
source = str(source)
save_img = not nosave and not source.endswith('.txt') # save inference images
is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)
Expand All @@ -89,14 +91,27 @@ def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s)

# Load model
device = select_device(device)
model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data)
stride, names, pt, jit, onnx, engine = model.stride, model.names, model.pt, model.jit, model.onnx, model.engine
if polygon:
model = attempt_load(weights, map_location=device) # load FP32 model
else:
model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data)

if polygon:
stride = int(model.stride.max()) # model stride
names = model.module.names if hasattr(model, 'module') else model.names # get class names
pt, jit, onnx, engine = True, False, False, False
else:
stride, names, pt, jit, onnx, engine = model.stride, model.names, model.pt, model.jit, model.onnx, model.engine
imgsz = check_img_size(imgsz, s=stride) # check image size

# Half
half &= (pt or jit or onnx or engine) and device.type != 'cpu' # FP16 supported on limited backends with CUDA
if pt or jit:
model.model.half() if half else model.model.float()
if polygon:
# Polygon does not support second-stage classifier
classify = False
assert not classify, "polygon does not support second-stage classifier"

# Dataloader
if webcam:
Expand All @@ -110,7 +125,11 @@ def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s)
vid_path, vid_writer = [None] * bs, [None] * bs

# Run inference
model.warmup(imgsz=(1 if pt else bs, 3, *imgsz), half=half) # warmup
if polygon:
if device.type != 'cpu':
model(torch.zeros(1, 3, imgsz[0], imgsz[1]).to(device).type_as(next(model.parameters()))) # run once
else:
model.warmup(imgsz=(1 if pt else bs, 3, *imgsz), half=half) # warmup
dt, seen = [0.0, 0.0, 0.0], 0
for path, im, im0s, vid_cap, s in dataset:
t1 = time_sync()
Expand All @@ -129,11 +148,15 @@ def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s)
dt[1] += t3 - t2

# NMS
pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det)
if polygon:
pred = polygon_non_max_suppression(pred[0], conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det)
else:
pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det)
dt[2] += time_sync() - t3

# Second-stage classifier (optional)
# pred = utils.general.apply_classifier(pred, classifier_model, im, im0s)
# pred = utils.general.apply_classifier(pred, classifier_model, im,
# im0s)

# Process predictions
for i, det in enumerate(pred): # per image
Expand All @@ -153,14 +176,31 @@ def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s)
annotator = Annotator(im0, line_width=line_thickness, example=str(names))
if len(det):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round()
if polygon:
det[:, :8] = polygon_scale_coords(im.shape[2:], det[:, :8], im0.shape).round()
else:
det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round()

# Print results
for c in det[:, -1].unique():
n = (det[:, -1] == c).sum() # detections per class
s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string

# Write results
if polygon:
for *xyxyxyxy, conf, cls in reversed(det):
if save_txt: # Write to file
xyxyxyxyn = (torch.tensor(xyxyxyxy).view(1, 8) / gn).view(-1).tolist() # normalized xyxyxyxy
line = (cls, *xyxyxyxyn, conf) if save_conf else (cls, *xyxyxyxyn) # label format
with open(txt_path + '.txt', 'a') as f:
f.write(('%g ' * len(line)).rstrip() % line + '\n')

if save_img or view_img: # Add bbox to image
c = int(cls) # integer class
label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}')
polygon_plot_one_box(torch.tensor(xyxyxyxy).cpu().numpy(), im0, label=label,
color=colors(c, True), line_thickness=line_thickness)
else:
for *xyxy, conf, cls in reversed(det):
if save_txt: # Write to file
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
Expand Down Expand Up @@ -241,6 +281,7 @@ def parse_opt():
parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences')
parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
parser.add_argument('--polygon', default=False, help='true to train polygon labels')
opt = parser.parse_args()
opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
print_args(FILE.stem, opt)
Expand Down
36 changes: 18 additions & 18 deletions export.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorst
# Checks
model_onnx = onnx.load(f) # load onnx model
onnx.checker.check_model(model_onnx) # check onnx model
# LOGGER.info(onnx.helper.printable_graph(model_onnx.graph)) # print
# LOGGER.info(onnx.helper.printable_graph(model_onnx.graph)) # print

# Simplify
if simplify:
Expand All @@ -139,10 +139,9 @@ def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorst
import onnxsim

LOGGER.info(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...')
model_onnx, check = onnxsim.simplify(
model_onnx,
dynamic_input_shape=dynamic,
input_shapes={'images': list(im.shape)} if dynamic else None)
model_onnx, check = onnxsim.simplify(model_onnx,
dynamic_input_shape=dynamic,
input_shapes={'images': list(im.shape)} if dynamic else None)
assert check, 'assert check failed'
onnx.save(model_onnx, f)
except Exception as e:
Expand Down Expand Up @@ -277,7 +276,8 @@ def export_saved_model(model, im, file, dynamic,


def export_pb(keras_model, im, file, prefix=colorstr('TensorFlow GraphDef:')):
# YOLOv5 TensorFlow GraphDef *.pb export https://github.com/leimao/Frozen_Graph_TensorFlow
# YOLOv5 TensorFlow GraphDef *.pb export
# https://github.com/leimao/Frozen_Graph_TensorFlow
try:
import tensorflow as tf
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
Expand Down Expand Up @@ -339,7 +339,8 @@ def export_edgetpu(keras_model, im, file, prefix=colorstr('Edge TPU:')):
LOGGER.info(f'\n{prefix} export requires Edge TPU compiler. Attempting install from {help_url}')
sudo = subprocess.run('sudo --version >/dev/null', shell=True).returncode == 0 # sudo installed on system
for c in ['curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -',
'echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list',
'echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" |\
sudo tee /etc/apt/sources.list.d/coral-edgetpu.list',
'sudo apt-get update',
'sudo apt-get install edgetpu-compiler']:
subprocess.run(c if sudo else c.replace('sudo ', ''), shell=True, check=True)
Expand Down Expand Up @@ -377,16 +378,15 @@ def export_tfjs(keras_model, im, file, prefix=colorstr('TensorFlow.js:')):

json = open(f_json).read()
with open(f_json, 'w') as j: # sort JSON Identity_* in ascending order
subst = re.sub(
r'{"outputs": {"Identity.?.?": {"name": "Identity.?.?"}, '
r'"Identity.?.?": {"name": "Identity.?.?"}, '
r'"Identity.?.?": {"name": "Identity.?.?"}, '
r'"Identity.?.?": {"name": "Identity.?.?"}}}',
r'{"outputs": {"Identity": {"name": "Identity"}, '
r'"Identity_1": {"name": "Identity_1"}, '
r'"Identity_2": {"name": "Identity_2"}, '
r'"Identity_3": {"name": "Identity_3"}}}',
json)
subst = re.sub(r'{"outputs": {"Identity.?.?": {"name": "Identity.?.?"}, '
r'"Identity.?.?": {"name": "Identity.?.?"}, '
r'"Identity.?.?": {"name": "Identity.?.?"}, '
r'"Identity.?.?": {"name": "Identity.?.?"}}}',
r'{"outputs": {"Identity": {"name": "Identity"}, '
r'"Identity_1": {"name": "Identity_1"}, '
r'"Identity_2": {"name": "Identity_2"}, '
r'"Identity_3": {"name": "Identity_3"}}}',
json)
j.write(subst)

LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
Expand All @@ -400,7 +400,7 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path'
weights=ROOT / 'yolov5s.pt', # weights path
imgsz=(640, 640), # image (height, width)
batch_size=1, # batch size
device='cpu', # cuda device, i.e. 0 or 0,1,2,3 or cpu
device='cpu', # cuda device, i.e. 0 or 0,1,2,3 or cpu
include=('torchscript', 'onnx'), # include formats
half=False, # FP16 half-precision export
inplace=False, # set YOLOv5 Detect() inplace=True
Expand Down
5 changes: 3 additions & 2 deletions hubconf.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,8 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo

if pretrained and channels == 3 and classes == 80:
model = DetectMultiBackend(path, device=device) # download/load FP32 model
# model = models.experimental.attempt_load(path, map_location=device) # download/load FP32 model
# model = models.experimental.attempt_load(path,
# map_location=device) # download/load FP32 model
else:
cfg = list((Path(__file__).parent / 'models').rglob(f'{path.stem}.yaml'))[0] # model.yaml path
model = Model(cfg, channels, classes) # create model
Expand Down Expand Up @@ -122,7 +123,7 @@ def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=Tr

if __name__ == '__main__':
model = _create(name='yolov5s', pretrained=True, channels=3, classes=80, autoshape=True, verbose=True) # pretrained
# model = custom(path='path/to/model.pt') # custom
# model = custom(path='path/to/model.pt') # custom

# Verify inference
from pathlib import Path
Expand Down
Loading