import cv2 import argparse from ultralytics import YOLO import supervision as sv import numpy as np import requests import time import pandas as pd import os from ultralytics.yolo.utils.plotting import Annotator, colors, save_one_box ZONE_POLYGON = np.array([ [0, 0], [0.3, 0], [0.3, 1], [0, 1] ]) def parse_arguments() -> argparse.Namespace: parser = argparse.ArgumentParser(description="YOLOv8 live") parser.add_argument( "--webcam-resolution", default=[800, 600], nargs=2, type=int ) args = parser.parse_args() return args def main(): args = parse_arguments() frame_width, frame_height = args.webcam_resolution # print(frame_width,frame_height) cap = cv2.VideoCapture(0) cap.set(cv2.CAP_PROP_FRAME_WIDTH, frame_width) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, frame_height) # model = YOLO("yolov8n.pt") #使用模組 model = YOLO("best.pt") #設定辨識方框參數 box_annotator = sv.BoxAnnotator( thickness=2, text_thickness=2, text_scale=1 ) #設定區塊參數 zone_polygon = (ZONE_POLYGON * np.array(args.webcam_resolution)).astype(int) zone = sv.PolygonZone(polygon=zone_polygon, frame_resolution_wh=tuple(args.webcam_resolution)) zone_annotator = sv.PolygonZoneAnnotator( zone=zone, color=sv.Color.blue(), thickness=2, text_thickness=4, text_scale=2 ) # count 計算數量 # FPS_count 計算FPS # start_time 用於計算FPS時間 count = 0 FPS_count = 0 font = cv2.FONT_HERSHEY_SIMPLEX color = (255,0,0) start_time = time.time() while True: ret, frame = cap.read() result = model(frame, agnostic_nms=True,save_crop=False,save_conf=False)[0] detections = sv.Detections.from_yolov8(result) #辨識到的名稱與準確度 labels = [ f"{model.model.names[class_id]} {confidence:0.2f}" for _, confidence, class_id, _ in detections ] #辨識名稱 labels_name = [ f"{model.model.names[class_id]}" for _, confidence, class_id, _ in detections ] #辨識準確度 labels_confidence = [ f"{confidence:0.2f}" for _, confidence, class_id, _ in detections ] #畫面顯示辨識框 frame = box_annotator.annotate( scene=frame, detections=detections, labels=labels ) #抓取辨識框的資料 boxes_confidence = result.boxes.conf boxes_confidence = boxes_confidence * 100 print(boxes_confidence) #顯示區塊內結果 mask = zone.trigger(detections=detections) # 區塊內有辨識到會寫True,判斷等於'Fatwolf'時,判斷辨識框的準確度是否大於等於50, # 如果是就計算次數+1且把辨識框內的圖片擷取下來並記錄到txt。 if mask.any() == True: if labels_name == 0: continue elif labels_name == ['Fatwolf']: print('fatwolf') #count += 1 #x1,y1,x2,y2辨識框的座標 x1 = result.boxes.xyxy[0][0] y1 = result.boxes.xyxy[0][1] x2 = result.boxes.xyxy[0][2] y2 = result.boxes.xyxy[0][3] if int(boxes_confidence) >= 50: count += 1 roi2 = frame[int(y1) + 4:int(y2) - 2, int(x1) + 4:int(x2) - 2] now_time = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime(time.time())) save_pic_name = now_time+'_'+str(count) + '.jpg' print("存圖片:",save_pic_name, "可信度:",int(boxes_confidence)) cv2.imwrite(save_pic_name, roi2) path = 'output.txt' with open(path, 'a') as f: f.write(now_time+'_'+str(count) + '.jpg'+' 可信度:'+str(boxes_confidence)+'\n') else: continue elif labels_name == ['Bottle']: print('Bottle') # 畫面添加區塊顯示 frame = zone_annotator.annotate(scene=frame) # 即時顯示計算區塊內辨識到的次數 cv2.putText(frame, 'Count: {}'.format(count), (10, 50), font, 1, color, 2, cv2.LINE_AA) # 計算FPS實際幀數並即時顯示 FPS_count += 1 fps = FPS_count / (time.time() - start_time) cv2.putText(frame, 'FPS: {:.2f}'.format(fps), (frame.shape[1]-200, 50), font, 1, color, 2, cv2.LINE_AA) cv2.imshow("yolov8", frame) key = cv2.waitKey(1) if key == ord('q'): break if __name__ == "__main__": main()