Skip to content

Commit

Permalink
Merge branch 'next_version' into mAP
Browse files Browse the repository at this point in the history
  • Loading branch information
bubbliiiing committed May 6, 2022
2 parents 78e87db + 1d314f1 commit 8ceca37
Show file tree
Hide file tree
Showing 2 changed files with 63 additions and 25 deletions.
47 changes: 36 additions & 11 deletions get_map.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,11 +10,11 @@

if __name__ == "__main__":
'''
Recall和Precision不像AP是一个面积的概念,在门限值不同时,网络的Recall和Precision值是不同的。
map计算结果中的Recall和Precision代表的是当预测时,门限置信度为0.5时,所对应的Recall和Precision值。
Recall和Precision不像AP是一个面积的概念,因此在门限值(Confidence)不同时,网络的Recall和Precision值是不同的。
默认情况下,本代码计算的Recall和Precision代表的是当门限值(Confidence)为0.5时,所对应的Recall和Precision值。
此处获得的./map_out/detection-results/里面的txt的框的数量会比直接predict多一些,这是因为这里的门限低,
目的是为了计算不同门限条件下的Recall和Precision值,从而实现map的计算。
受到mAP计算原理的限制,网络在计算mAP时需要获得近乎所有的预测框,这样才可以计算不同门限条件下的Recall和Precision值
因此,本代码获得的map_out/detection-results/里面的txt的框的数量一般会比直接predict多一些,目的是列出所有可能的预测框,
'''
#------------------------------------------------------------------------------------------------------------------#
# map_mode用于指定该文件运行时计算的内容
Expand All @@ -25,16 +25,41 @@
# map_mode为4代表利用COCO工具箱计算当前数据集的0.50:0.95map。需要获得预测结果、获得真实框后并安装pycocotools才行
#-------------------------------------------------------------------------------------------------------------------#
map_mode = 0
#-------------------------------------------------------#
#--------------------------------------------------------------------------------------#
# 此处的classes_path用于指定需要测量VOC_map的类别
# 一般情况下与训练和预测所用的classes_path一致即可
#-------------------------------------------------------#
#--------------------------------------------------------------------------------------#
classes_path = 'model_data/voc_classes.txt'
#-------------------------------------------------------#
# MINOVERLAP用于指定想要获得的mAP0.x
#--------------------------------------------------------------------------------------#
# MINOVERLAP用于指定想要获得的mAP0.x,mAP0.x的意义是什么请同学们百度一下。
# 比如计算mAP0.75,可以设定MINOVERLAP = 0.75。
#-------------------------------------------------------#
#
# 当某一预测框与真实框重合度大于MINOVERLAP时,该预测框被认为是正样本,否则为负样本。
# 因此MINOVERLAP的值越大,预测框要预测的越准确才能被认为是正样本,此时算出来的mAP值越低,
#--------------------------------------------------------------------------------------#
MINOVERLAP = 0.5
#--------------------------------------------------------------------------------------#
# 受到mAP计算原理的限制,网络在计算mAP时需要获得近乎所有的预测框,这样才可以计算mAP
# 因此,confidence的值应当设置的尽量小进而获得全部可能的预测框。
#
# 该值一般不调整。因为计算mAP需要获得近乎所有的预测框,此处的confidence不能随便更改。
# 想要获得不同门限值下的Recall和Precision值,请修改下方的score_threhold。
#--------------------------------------------------------------------------------------#
confidence = 0.001
#--------------------------------------------------------------------------------------#
# 预测时使用到的非极大抑制值的大小,越大表示非极大抑制越不严格。
#
# 该值一般不调整。
#--------------------------------------------------------------------------------------#
nms_iou = 0.5
#---------------------------------------------------------------------------------------------------------------#
# Recall和Precision不像AP是一个面积的概念,因此在门限值不同时,网络的Recall和Precision值是不同的。
#
# 默认情况下,本代码计算的Recall和Precision代表的是当门限值为0.5(此处定义为score_threhold)时所对应的Recall和Precision值。
# 因为计算mAP需要获得近乎所有的预测框,上面定义的confidence不能随便更改。
# 这里专门定义一个score_threhold用于代表门限值,进而在计算mAP时找到门限值对应的Recall和Precision值。
#---------------------------------------------------------------------------------------------------------------#
score_threhold = 0.5
#-------------------------------------------------------#
# map_vis用于指定是否开启VOC_map计算的可视化
#-------------------------------------------------------#
Expand Down Expand Up @@ -64,7 +89,7 @@

if map_mode == 0 or map_mode == 1:
print("Load model.")
yolo = YOLO(confidence = 0.001, nms_iou = 0.5)
yolo = YOLO(confidence = confidence, nms_iou = nms_iou)
print("Load model done.")

print("Get predict result.")
Expand Down Expand Up @@ -104,7 +129,7 @@

if map_mode == 0 or map_mode == 3:
print("Get map.")
get_map(MINOVERLAP, True, path = map_out_path)
get_map(MINOVERLAP, True, score_threhold = score_threhold, path = map_out_path)
print("Get map done.")

if map_mode == 4:
Expand Down
41 changes: 27 additions & 14 deletions utils/utils_map.py
Original file line number Diff line number Diff line change
Expand Up @@ -267,7 +267,7 @@ def draw_plot_func(dictionary, n_classes, window_title, plot_title, x_label, out
# close the plot
plt.close()

def get_map(MINOVERLAP, draw_plot, path = './map_out'):
def get_map(MINOVERLAP, draw_plot, score_threhold=0.5, path = './map_out'):
GT_PATH = os.path.join(path, 'ground-truth')
DR_PATH = os.path.join(path, 'detection-results')
IMG_PATH = os.path.join(path, 'images-optional')
Expand Down Expand Up @@ -423,12 +423,12 @@ def get_map(MINOVERLAP, draw_plot, path = './map_out'):
tp = [0] * nd
fp = [0] * nd
score = [0] * nd
score05_idx = 0
score_threhold_idx = 0
for idx, detection in enumerate(dr_data):
file_id = detection["file_id"]
score[idx] = float(detection["confidence"])
if score[idx] > 0.5:
score05_idx = idx
if score[idx] >= score_threhold:
score_threhold_idx = idx

if show_animation:
ground_truth_img = glob.glob1(IMG_PATH, file_id + ".*")
Expand Down Expand Up @@ -566,9 +566,9 @@ def get_map(MINOVERLAP, draw_plot, path = './map_out'):
text = "{0:.2f}%".format(ap*100) + " = " + class_name + " AP " #class_name + " AP = {0:.2f}%".format(ap*100)

if len(prec)>0:
F1_text = "{0:.2f}".format(F1[score05_idx]) + " = " + class_name + " F1 "
Recall_text = "{0:.2f}%".format(rec[score05_idx]*100) + " = " + class_name + " Recall "
Precision_text = "{0:.2f}%".format(prec[score05_idx]*100) + " = " + class_name + " Precision "
F1_text = "{0:.2f}".format(F1[score_threhold_idx]) + " = " + class_name + " F1 "
Recall_text = "{0:.2f}%".format(rec[score_threhold_idx]*100) + " = " + class_name + " Recall "
Precision_text = "{0:.2f}%".format(prec[score_threhold_idx]*100) + " = " + class_name + " Precision "
else:
F1_text = "0.00" + " = " + class_name + " F1 "
Recall_text = "0.00%" + " = " + class_name + " Recall "
Expand All @@ -577,11 +577,12 @@ def get_map(MINOVERLAP, draw_plot, path = './map_out'):
rounded_prec = [ '%.2f' % elem for elem in prec ]
rounded_rec = [ '%.2f' % elem for elem in rec ]
results_file.write(text + "\n Precision: " + str(rounded_prec) + "\n Recall :" + str(rounded_rec) + "\n\n")

if len(prec)>0:
print(text + "\t||\tscore_threhold=0.5 : " + "F1=" + "{0:.2f}".format(F1[score05_idx])\
+ " ; Recall=" + "{0:.2f}%".format(rec[score05_idx]*100) + " ; Precision=" + "{0:.2f}%".format(prec[score05_idx]*100))
print(text + "\t||\tscore_threhold=" + str(score_threhold) + " : " + "F1=" + "{0:.2f}".format(F1[score_threhold_idx])\
+ " ; Recall=" + "{0:.2f}%".format(rec[score_threhold_idx]*100) + " ; Precision=" + "{0:.2f}%".format(prec[score_threhold_idx]*100))
else:
print(text + "\t||\tscore_threhold=0.5 : F1=0.00% ; Recall=0.00% ; Precision=0.00%")
print(text + "\t||\tscore_threhold=" + str(score_threhold) + " : " + "F1=0.00% ; Recall=0.00% ; Precision=0.00%")
ap_dictionary[class_name] = ap

n_images = counter_images_per_class[class_name]
Expand All @@ -607,7 +608,7 @@ def get_map(MINOVERLAP, draw_plot, path = './map_out'):
plt.cla()

plt.plot(score, F1, "-", color='orangered')
plt.title('class: ' + F1_text + "\nscore_threhold=0.5")
plt.title('class: ' + F1_text + "\nscore_threhold=" + str(score_threhold))
plt.xlabel('Score_Threhold')
plt.ylabel('F1')
axes = plt.gca()
Expand All @@ -617,7 +618,7 @@ def get_map(MINOVERLAP, draw_plot, path = './map_out'):
plt.cla()

plt.plot(score, rec, "-H", color='gold')
plt.title('class: ' + Recall_text + "\nscore_threhold=0.5")
plt.title('class: ' + Recall_text + "\nscore_threhold=" + str(score_threhold))
plt.xlabel('Score_Threhold')
plt.ylabel('Recall')
axes = plt.gca()
Expand All @@ -627,7 +628,7 @@ def get_map(MINOVERLAP, draw_plot, path = './map_out'):
plt.cla()

plt.plot(score, prec, "-s", color='palevioletred')
plt.title('class: ' + Precision_text + "\nscore_threhold=0.5")
plt.title('class: ' + Precision_text + "\nscore_threhold=" + str(score_threhold))
plt.xlabel('Score_Threhold')
plt.ylabel('Precision')
axes = plt.gca()
Expand All @@ -638,7 +639,9 @@ def get_map(MINOVERLAP, draw_plot, path = './map_out'):

if show_animation:
cv2.destroyAllWindows()

if n_classes == 0:
print("未检测到任何种类,请检查标签信息与get_map.py中的classes_path是否修改。")
return 0
results_file.write("\n# mAP of all classes\n")
mAP = sum_AP / n_classes
text = "mAP = {0:.2f}%".format(mAP*100)
Expand Down Expand Up @@ -782,6 +785,7 @@ def get_map(MINOVERLAP, draw_plot, path = './map_out'):
plot_color,
""
)
return mAP

def preprocess_gt(gt_path, class_names):
image_ids = os.listdir(gt_path)
Expand Down Expand Up @@ -822,6 +826,8 @@ def preprocess_gt(gt_path, class_names):
class_name = class_name[:-1]

left, top, right, bottom = float(left), float(top), float(right), float(bottom)
if class_name not in class_names:
continue
cls_id = class_names.index(class_name) + 1
bbox = [left, top, right - left, bottom - top, difficult, str(image_id), cls_id, (right - left) * (bottom - top) - 10.0]
boxes_per_image.append(bbox)
Expand Down Expand Up @@ -867,6 +873,8 @@ def preprocess_dr(dr_path, class_names):
left, top, right, bottom = float(left), float(top), float(right), float(bottom)
result = {}
result["image_id"] = str(image_id)
if class_name not in class_names:
continue
result["category_id"] = class_names.index(class_name) + 1
result["bbox"] = [left, top, right - left, bottom - top]
result["score"] = float(confidence)
Expand Down Expand Up @@ -894,10 +902,15 @@ def get_coco_map(class_names, path):
with open(DR_JSON_PATH, "w") as f:
results_dr = preprocess_dr(DR_PATH, class_names)
json.dump(results_dr, f, indent=4)
if len(results_dr) == 0:
print("未检测到任何目标。")
return [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]

cocoGt = COCO(GT_JSON_PATH)
cocoDt = cocoGt.loadRes(DR_JSON_PATH)
cocoEval = COCOeval(cocoGt, cocoDt, 'bbox')
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()

return cocoEval.stats

0 comments on commit 8ceca37

Please sign in to comment.