l515_distance.py 3.8 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798
  1. import pyrealsense2 as rs
  2. import numpy as np
  3. import cv2
  4. import supervision as sv
  5. from ultralytics import YOLO
  6. import time
  7. # 啟動 RealSense 相機
  8. pipeline = rs.pipeline()
  9. config = rs.config()
  10. config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
  11. config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
  12. pipeline.start(config)
  13. # profile = config.resolve(pipeline)
  14. # # Declare sensor object and set options
  15. # depth_sensor = profile.get_device().first_depth_sensor()
  16. # depth_sensor.set_option(rs.option.visual_preset, 5) # 5 is short range, 3 is low ambient light
  17. # 初始化 YOLO 模型
  18. model = YOLO('yolov8n-seg.pt')
  19. byte_tracker = sv.ByteTrack()
  20. align_to = rs.stream.color
  21. align = rs.align(align_to)
  22. try:
  23. while True:
  24. # 讀取 RealSense 相機影像
  25. frames = pipeline.wait_for_frames()
  26. aligned_frames = align.process(frames)
  27. depth_frame = aligned_frames.get_depth_frame()
  28. color_frame = aligned_frames.get_color_frame()
  29. if not depth_frame or not color_frame:
  30. continue
  31. depth_image = np.asanyarray(depth_frame.get_data())
  32. color_image = np.asanyarray(color_frame.get_data())
  33. depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_WINTER)
  34. # 進行物體檢測
  35. result = model(color_image)[0]
  36. detections = sv.Detections.from_ultralytics(result)
  37. detections_tracker = byte_tracker.update_with_detections(detections=detections)
  38. polygon_annotator = sv.PolygonAnnotator()
  39. annotated_frame = polygon_annotator.annotate(
  40. scene=color_image.copy(), detections=detections)
  41. mask_annotator = sv.MaskAnnotator()
  42. annotated_frame = mask_annotator.annotate(
  43. scene=annotated_frame, detections=detections
  44. )
  45. labels = [
  46. f"#{tracker_id} {result.names[class_id]} {confidence:.2f}"
  47. for class_id, tracker_id, confidence
  48. in zip(detections_tracker.class_id, detections_tracker.tracker_id, detections_tracker.confidence)
  49. ]
  50. label_annotator = sv.LabelAnnotator()
  51. annotated_frame = label_annotator.annotate(
  52. scene=annotated_frame,
  53. detections=detections_tracker,
  54. labels=labels
  55. )
  56. if detections:
  57. position = sv.Position.CENTER_OF_MASS
  58. xy = detections.get_anchors_coordinates(anchor=position)
  59. #print('postion',xy)
  60. for detection_idx in range(len(detections)):
  61. #print(xy)
  62. center = (int(xy[detection_idx,0]), int(xy[detection_idx,1]))
  63. cv2.circle(annotated_frame, center,radius=3, color=(255,255,0), thickness=-1)
  64. #print(center[0],center[1])
  65. # 在畫面上顯示距離信息和檢測結果
  66. text_depth = "Depth value at ({x},{y}): {:.2f} meters".format(
  67. depth_frame.get_distance(int(xy[detection_idx,0]), int(xy[detection_idx,1])),x = int(xy[detection_idx,0]),y=int(xy[detection_idx,1]))
  68. annotated_frame = cv2.putText(annotated_frame, text_depth, (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.8,
  69. (0, 0, 255), 2)
  70. print("距離:",depth_frame.get_distance(int(xy[detection_idx,0]), int(xy[detection_idx,1])))
  71. #annotated_frame = cv2.circle(annotated_frame, (320, 240), 4, (0, 0, 255), -1)
  72. annotated_frame = np.hstack((annotated_frame, depth_colormap))
  73. # 顯示畫面
  74. cv2.imshow('RealSense', annotated_frame)
  75. key = cv2.waitKey(1)
  76. if key & 0xFF == ord('q') or key == 27:
  77. cv2.destroyAllWindows()
  78. break
  79. finally:
  80. pipeline.stop()