|
@@ -0,0 +1,67 @@
|
|
|
+import cv2
|
|
|
+import supervision as sv
|
|
|
+from ultralytics import YOLO
|
|
|
+
|
|
|
+cap = cv2.VideoCapture(0)
|
|
|
+#model = YOLO('best_magent_block.pt')
|
|
|
+model = YOLO('yolov8n-seg.pt')
|
|
|
+#model = YOLO('best0312.pt')
|
|
|
+byte_tracker = sv.ByteTrack()
|
|
|
+
|
|
|
+while True:
|
|
|
+ ret, frame = cap.read()
|
|
|
+ result = model(frame)[0]
|
|
|
+ detections = sv.Detections.from_ultralytics(result)
|
|
|
+ detections_tracker = byte_tracker.update_with_detections(detections=detections)
|
|
|
+
|
|
|
+ #print(detections)
|
|
|
+ if ret:
|
|
|
+
|
|
|
+ polygon_annotator = sv.PolygonAnnotator()
|
|
|
+ annotated_frame = polygon_annotator.annotate(
|
|
|
+ scene=frame.copy(), detections=detections)
|
|
|
+
|
|
|
+ mask_annotator = sv.MaskAnnotator()
|
|
|
+ annotated_frame = mask_annotator.annotate(
|
|
|
+ scene=annotated_frame,detections=detections
|
|
|
+ )
|
|
|
+
|
|
|
+ dot_annotator = sv.DotAnnotator()
|
|
|
+ annotated_frame = dot_annotator.annotate(
|
|
|
+ scene=annotated_frame, detections=detections_tracker)
|
|
|
+
|
|
|
+
|
|
|
+ # bounding_box_annotator = sv.BoundingBoxAnnotator()
|
|
|
+ # annotated_frame = bounding_box_annotator.annotate(
|
|
|
+ # scene=annotated_frame, detections=detections)
|
|
|
+ #labels = [detections.confidence]
|
|
|
+
|
|
|
+ labels = [
|
|
|
+ f"#{tracker_id} {result.names[class_id]} {confidence:.2f}"
|
|
|
+ for class_id, tracker_id, confidence
|
|
|
+ in zip(detections_tracker.class_id, detections_tracker.tracker_id, detections_tracker.confidence)
|
|
|
+ ]
|
|
|
+ # labels_con_id = [
|
|
|
+ # f"{result.names[class_id]} {confidence:.2f}"
|
|
|
+ # for class_id, confidence
|
|
|
+ # in zip(detections.class_id, detections.confidence)
|
|
|
+ # ]
|
|
|
+
|
|
|
+ print(labels)
|
|
|
+
|
|
|
+ label_annotator = sv.LabelAnnotator()
|
|
|
+ annotated_frame = label_annotator.annotate(
|
|
|
+ scene=annotated_frame,
|
|
|
+ detections=detections_tracker,
|
|
|
+ labels=labels
|
|
|
+ )
|
|
|
+ position = sv.Position.CENTER_OF_MASS
|
|
|
+ xy = detections.get_anchors_coordinates(anchor=position)
|
|
|
+ for detection_idx in range(len(detections)):
|
|
|
+ print(xy)
|
|
|
+ center = (int(xy[detection_idx,0]), int(xy[detection_idx,1]))
|
|
|
+ cv2.circle(annotated_frame, center,radius=3, color=(255,255,0), thickness=-1)
|
|
|
+ cv2.imshow('frame', annotated_frame)
|
|
|
+
|
|
|
+ if cv2.waitKey(1) == ord('q'):
|
|
|
+ break
|