Browse Source

上傳檔案到 ''

fatwolf 8 months ago
parent
commit
47e32a8f2e
3 changed files with 74 additions and 0 deletions
  1. 7 0
      seg_magnet.yaml
  2. 67 0
      segment_test_sv.py
  3. BIN
      yolov8n-seg.pt

+ 7 - 0
seg_magnet.yaml

@@ -0,0 +1,7 @@
+names:
+  0: Magnet
+  1: Block
+
+path: D:\fatwolf\company_files\python_code\planting_container\greenbeans\JSON2YOLO-master\JSON2YOLO-master
+train: D:\fatwolf\company_files\python_code\planting_container\greenbeans\JSON2YOLO-master\JSON2YOLO-master\train\images
+val: D:\fatwolf\company_files\python_code\planting_container\greenbeans\JSON2YOLO-master\JSON2YOLO-master\val\images

+ 67 - 0
segment_test_sv.py

@@ -0,0 +1,67 @@
+import cv2
+import supervision as sv
+from ultralytics import YOLO
+
+cap = cv2.VideoCapture(0)
+#model = YOLO('best_magent_block.pt')
+model = YOLO('yolov8n-seg.pt')
+#model = YOLO('best0312.pt')
+byte_tracker = sv.ByteTrack()
+
+while True:
+    ret, frame = cap.read()
+    result = model(frame)[0]
+    detections = sv.Detections.from_ultralytics(result)
+    detections_tracker = byte_tracker.update_with_detections(detections=detections)
+
+    #print(detections)
+    if ret:
+
+        polygon_annotator = sv.PolygonAnnotator()
+        annotated_frame = polygon_annotator.annotate(
+            scene=frame.copy(), detections=detections)
+
+        mask_annotator = sv.MaskAnnotator()
+        annotated_frame = mask_annotator.annotate(
+            scene=annotated_frame,detections=detections
+        )
+
+        dot_annotator = sv.DotAnnotator()
+        annotated_frame = dot_annotator.annotate(
+            scene=annotated_frame, detections=detections_tracker)
+
+
+        # bounding_box_annotator = sv.BoundingBoxAnnotator()
+        # annotated_frame = bounding_box_annotator.annotate(
+        #     scene=annotated_frame, detections=detections)
+        #labels = [detections.confidence]
+
+        labels = [
+            f"#{tracker_id} {result.names[class_id]} {confidence:.2f}"
+            for class_id, tracker_id, confidence
+            in zip(detections_tracker.class_id, detections_tracker.tracker_id, detections_tracker.confidence)
+        ]
+        # labels_con_id = [
+        #     f"{result.names[class_id]} {confidence:.2f}"
+        #     for class_id, confidence
+        #     in zip(detections.class_id, detections.confidence)
+        # ]
+
+        print(labels)
+
+        label_annotator = sv.LabelAnnotator()
+        annotated_frame = label_annotator.annotate(
+            scene=annotated_frame,
+            detections=detections_tracker,
+            labels=labels
+        )
+        position = sv.Position.CENTER_OF_MASS
+        xy = detections.get_anchors_coordinates(anchor=position)
+        for detection_idx in range(len(detections)):
+            print(xy)
+            center = (int(xy[detection_idx,0]), int(xy[detection_idx,1]))
+            cv2.circle(annotated_frame, center,radius=3, color=(255,255,0), thickness=-1)
+        cv2.imshow('frame', annotated_frame)
+
+    if cv2.waitKey(1) == ord('q'):
+        break

BIN
yolov8n-seg.pt