123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582 |
- #===========================================================================#
- # #
- # Copyright (C) 2006 - 2018 #
- # IDS Imaging Development Systems GmbH #
- # Dimbacher Str. 6-8 #
- # D-74182 Obersulm, Germany #
- # #
- # The information in this document is subject to change without notice #
- # and should not be construed as a commitment by IDS Imaging Development #
- # Systems GmbH. IDS Imaging Development Systems GmbH does not assume any #
- # responsibility for any errors that may appear in this document. #
- # #
- # This document, or source code, is provided solely as an example #
- # of how to utilize IDS software libraries in a sample application. #
- # IDS Imaging Development Systems GmbH does not assume any responsibility #
- # for the use or reliability of any portion of this document or the #
- # described software. #
- # #
- # General permission to copy or modify, but not for profit, is hereby #
- # granted, provided that the above copyright notice is included and #
- # reference made to the fact that reproduction privileges were granted #
- # by IDS Imaging Development Systems GmbH. #
- # #
- # IDS Imaging Development Systems GmbH cannot assume any responsibility #
- # for the use or misuse of any portion of this software for other than #
- # its intended diagnostic purpose in calibrating and testing IDS #
- # manufactured cameras and software. #
- # #
- #===========================================================================#
- # Developer Note: I tried to let it as simple as possible.
- # Therefore there are no functions asking for the newest driver software or freeing memory beforehand, etc.
- # The sole purpose of this program is to show one of the simplest ways to interact with an IDS camera via the uEye API.
- # (XS cameras are not supported)
- #---------------------------------------------------------------------------------------------------------------------------------------
- #coding=utf-8
- #Libraries
- from ctypes import *
- from pyueye import ueye
- import numpy as np
- import cv2
- import sys
- import ctypes
- import struct
- import threading
- import time
- import datetime
- import os
- import pymysql
- import tensorflow as tf
- import requests as req
- import sys
- import mysql.connector
- from mysql.connector import Error
- from urllib import parse
- from PIL import Image
- #---------------------------------------------------------------------------------------------------------------------------------------
- #Variables
- hCam = ueye.HIDS(0) #0: first available camera; 1-254: The camera with the specified camera ID
- sInfo = ueye.SENSORINFO()
- cInfo = ueye.CAMINFO()
- pcImageMemory = ueye.c_mem_p()
- MemID = ueye.int()
- rectAOI = ueye.IS_RECT()
- pitch = ueye.INT()
- nBitsPerPixel = ueye.INT(24) #24: bits per pixel for color mode; take 8 bits per pixel for monochrome
- channels = 3 #3: channels for color mode(RGB); take 1 channel for monochrome
- m_nColorMode = ueye.INT() # Y8/RGB16/RGB24/REG32
- bytes_per_pixel = int(nBitsPerPixel / 8)
- formatInfo=ueye.IMAGE_FORMAT_INFO()
- now=ctypes.c_uint()
- m_pcSeqImgMem=[]
- m_lSeqMemId=[]
- nNum=ueye.INT()
- pcMem=ueye.c_mem_p()
- pcMemLast=ueye.c_mem_p()
- ImageFileParams=ueye.IMAGE_FILE_PARAMS()
- ueye.is_FreeImageMem(hCam, pcImageMemory, MemID)
- conn = pymysql.connect(host="127.0.0.1", port=3306, user='root', passwd='g53743001',
- db='coffee_detection',
- charset='utf8')
- #---------------------------------------------------------------------------------------------------------------------------------------
- print("START")
- #print()
- # Starts the driver and establishes the connection to the camera
- nRet = ueye.is_InitCamera(hCam, None)
- if nRet != ueye.IS_SUCCESS:
- ueye.is_InitCamera(hCam, None)
- #print("is_InitCamera ERROR")
- ueye.is_ParameterSet(hCam,ueye.IS_PARAMETERSET_CMD_LOAD_EEPROM,None,0)
- # Reads out the data hard-coded in the non-volatile camera memory and writes it to the data structure that cInfo points to
- nRet = ueye.is_GetCameraInfo(hCam, cInfo)
- if nRet != ueye.IS_SUCCESS:
- print("is_GetCameraInfo ERROR")
- # You can query additional information about the sensor type used in the camera
- nRet = ueye.is_GetSensorInfo(hCam, sInfo)
- if nRet != ueye.IS_SUCCESS:
- print("is_GetSensorInfo ERROR")
- # Set display mode to DIB
- nRet = ueye.is_SetDisplayMode(hCam, ueye.IS_SET_DM_DIB)
- # Set the right color mode
- if int.from_bytes(sInfo.nColorMode.value, byteorder='big') == ueye.IS_COLORMODE_BAYER:
- # setup the color depth to the current windows setting
- ueye.is_GetColorDepth(hCam, nBitsPerPixel, m_nColorMode)
- bytes_per_pixel = int(nBitsPerPixel / 8)
- #print("IS_COLORMODE_BAYER: ", )
- #print("\tm_nColorMode: \t\t", m_nColorMode)
- #print("\tnBitsPerPixel: \t\t", nBitsPerPixel)
- #print("\tbytes_per_pixel: \t\t", bytes_per_pixel)
- #print()
- elif int.from_bytes(sInfo.nColorMode.value, byteorder='big') == ueye.IS_COLORMODE_CBYCRY:
- # for color camera models use RGB32 mode
- m_nColorMode = ueye.IS_CM_BGR8_PACKED
- nBitsPerPixel = ueye.INT(24)
- bytes_per_pixel = int(nBitsPerPixel / 8)
- #print("IS_COLORMODE_CBYCRY: ", )
- #print("\tm_nColorMode: \t\t", m_nColorMode)
- #print("\tnBitsPerPixel: \t\t", nBitsPerPixel)
- #print("\tbytes_per_pixel: \t\t", bytes_per_pixel)
- #print()
- elif int.from_bytes(sInfo.nColorMode.value, byteorder='big') == ueye.IS_COLORMODE_MONOCHROME:
- # for color camera models use RGB32 mode
- m_nColorMode = ueye.IS_CM_MONO8
- nBitsPerPixel = ueye.INT(8)
- bytes_per_pixel = int(nBitsPerPixel / 8)
- #print("IS_COLORMODE_MONOCHROME: ", )
- #print("\tm_nColorMode: \t\t", m_nColorMode)
- #print("\tnBitsPerPixel: \t\t", nBitsPerPixel)
- #print("\tbytes_per_pixel: \t\t", bytes_per_pixel)
- #print()
- else:
- # for monochrome camera models use Y8 mode
- m_nColorMode = ueye.IS_CM_MONO8
- nBitsPerPixel = ueye.INT(8)
- bytes_per_pixel = int(nBitsPerPixel / 8)
- print("else")
- ueye.is_SetColorMode(hCam,ueye.IS_CM_BGRA8_PACKED)
- nBitsPerPixel=ueye.INT(32)
- # Can be used to set the size and position of an "area of interest"(AOI) within an image
- nRet = ueye.is_AOI(hCam, ueye.IS_AOI_IMAGE_GET_AOI, rectAOI, ueye.sizeof(rectAOI))
- if nRet != ueye.IS_SUCCESS:
- print("is_AOI ERROR")
- width =rectAOI.s32Width
- height =rectAOI.s32Height
- # Prints out some information about the camera and the sensor
- #print("Camera model:\t\t", sInfo.strSensorName.decode('utf-8'))
- #print("Camera serial no.:\t", cInfo.SerNo.decode('utf-8'))
- #print("Maximum image width:\t", width)
- #print("Maximum image height:\t", height)
- #print()
- #---------------------------------------------------------------------------------------------------------------------------------------
- for i in range(1,5):
- # Allocates an image memory for an image having its dimensions defined by width and height and its color depth defined by nBitsPerPixel
- nRet = ueye.is_AllocImageMem(hCam, width, height, nBitsPerPixel, pcImageMemory, MemID)
- # m_pcSeqImgMem.append(pcImageMemory)
- # m_lSeqMemId.append(MemID)
- ueye.is_AddToSequence(hCam,pcImageMemory,MemID)
- #path=ueye.c_wchar_p()
- #print(type(path))
- #nRet = ueye.is_ParameterSet(hCam, ueye.IS_PARAMETERSET_CMD_LOAD_FILE, path, 0)
-
- # Activates the camera's live video mode (free run mode)
- nRet = ueye.is_CaptureVideo(hCam, ueye.IS_DONT_WAIT)
- if nRet != ueye.IS_SUCCESS:
- print("is_CaptureVideo ERROR")
- nRet = ueye.is_InquireImageMem(hCam, pcImageMemory, MemID, width, height, nBitsPerPixel, pitch)
- if nRet != ueye.IS_SUCCESS:
- print("is_InquireImageMem ERROR")
- else:
- print("Press q to leave the programm")
- Count=0
- def cut_rectangle():
- image_size = 150
- # img = cv2.imread("D:\\fatwolf\\company_files\\opencv\\2021-05-05-11_13_47.png")
- # img = cv2.imread("D:\\fatwolf\\company_files\\opencv\\2.png")
- #img = cv2.imread("C:\\Users\\User\\Desktop\\tfcoffebean\\test\\1.png")
- # img = cv2.imread("C:\\Users\\User\\Desktop\\IDS\\p\\12033_248.png")
- # img_size = img.shape
- # print(img_size)
- img = cv2.imread('D:\\fatwolf\\company_files\\python_code\\test_code\\test_pic\\12033_267.png')
- # img = cv2.resize(img1,(968,548))
- point_color = (0, 0, 255)
- command1 = "SELECT Name,X, X1 ,Y ,Y1 FROM `cut` WHERE Name LIKE 'roi1'"
- l = conn.cursor()
- l.execute(command1)
- conn.commit()
- r1 = l.fetchone()
- # print(r1[0])
- count = 1
- def roi1():
- # x = r1[1]
- # x1 = r1[2]
- # y = r1[3]
- # y1 = r1[4]
- #x = 743
- #x1 = 892
- #y = 17
- #y1 = 164
- x = 1257
- x1 = 1355
- y = 185
- y1 = 278
- i = 1
- i1 = 1
- i2 = 1
- i3 = 1
- i4 = 1
- i5 = 1
- i6 = 1
- number = count
- for i in range(6):
- roi = img[y:y1, x:x1]
- cv2.rectangle(img, (x, y), (x1, y1), point_color, 1)
- roi = cv2.resize(roi, (image_size, image_size), 0, 0, cv2.INTER_LINEAR)
- cv2.imwrite('D:\\fatwolf\\company_files\\python_code\\test_code\\test_pic\\pic' + '00_' + str(number) + '.png', roi)
- number = number + 1
- y = y + 150
- y1 = y1 + 150
- x = x + 145
- x1 = x1 + 145
- y = 1355
- y1 = 278
- '''
- for i in range(6):
- roi = img[y:y1, x:x1]
- cv2.rectangle(img, (x, y), (x1, y1), point_color, 1)
- roi = cv2.resize(roi, (image_size, image_size), 0, 0, cv2.INTER_LINEAR)
- cv2.imwrite('D:\\fatwolf\\company_files\\paper_coffee\\pic\\' + '00_' + str(number) + '.png', roi)
- number = number + 1
- x = x + 150
- x1 = x1 + 150
- y = y + 145
- y1 = y1 + 145
- x = 1257
- x1 = 1355
- '''
- '''
- for i1 in range(6):
- roi = img[y:y1, x:x1]
- cv2.rectangle(img, (x, y), (x1, y1), point_color, 1)
- roi = cv2.resize(roi, (image_size, image_size), 0, 0, cv2.INTER_LINEAR)
- cv2.imwrite('D:\\fatwolf\\company_files\\paper_coffee\\pic\\' + '00_' + str(number) + '.png', roi)
- number = number + 1
- x = x + 150
- x1 = x1 + 150
- y = y + 145
- y1 = y1 + 145
- x = 743
- x1 = 892
- for i2 in range(6):
- roi = img[y:y1, x:x1]
- cv2.rectangle(img, (x, y), (x1, y1), point_color, 1)
- roi = cv2.resize(roi, (image_size, image_size), 0, 0, cv2.INTER_LINEAR)
- cv2.imwrite('D:\\fatwolf\\company_files\\paper_coffee\\pic\\' + '00_' + str(number) + '.png', roi)
- number = number + 1
- x = x + 150
- x1 = x1 + 150
- y = y + 145
- y1 = y1 + 145
- x = 743
- x1 = 892
- for i3 in range(6):
- roi = img[y:y1, x:x1]
- cv2.rectangle(img, (x, y), (x1, y1), point_color, 1)
- roi = cv2.resize(roi, (image_size, image_size), 0, 0, cv2.INTER_LINEAR)
- cv2.imwrite('D:\\fatwolf\\company_files\\paper_coffee\\pic\\' + '00_' + str(number) + '.png', roi)
- number = number + 1
- x = x + 150
- x1 = x1 + 150
- y = y + 145
- y1 = y1 + 145
- x = 743
- x1 = 892
- for i4 in range(6):
- roi = img[y:y1, x:x1]
- cv2.rectangle(img, (x, y), (x1, y1), point_color, 1)
- roi = cv2.resize(roi, (image_size, image_size), 0, 0, cv2.INTER_LINEAR)
- cv2.imwrite('D:\\fatwolf\\company_files\\paper_coffee\\pic\\' + '00_' + str(number) + '.png', roi)
- number = number + 1
- x = x + 150
- x1 = x1 + 150
- y = y + 145
- y1 = y1 + 145
- x = 743
- x1 = 892
- for i5 in range(6):
- roi = img[y:y1, x:x1]
- cv2.rectangle(img, (x, y), (x1, y1), point_color, 1)
- roi = cv2.resize(roi, (image_size, image_size), 0, 0, cv2.INTER_LINEAR)
- cv2.imwrite('D:\\fatwolf\\company_files\\paper_coffee\\pic\\' + '00_' + str(number) + '.png', roi)
- number = number + 1
- x = x + 150
- x1 = x1 + 150
- y = y + 145
- y1 = y1 + 145
- x = 743
- x1 = 892
- for i6 in range(6):
- roi = img[y:y1, x:x1]
- cv2.rectangle(img, (x, y), (x1, y1), point_color, 1)
- roi = cv2.resize(roi, (image_size, image_size), 0, 0, cv2.INTER_LINEAR)
- cv2.imwrite('D:\\fatwolf\\company_files\\paper_coffee\\pic\\' + '00_' + str(number) + '.png', roi)
- number = number + 1
- x = x + 150
- x1 = x1 + 150
- y = y + 145
- y1 = y1 + 145
- x = 743
- x1 = 892
- '''
- start = datetime.datetime.now()
- roi1()
- end = datetime.datetime.now()
- print("cut_rectangle Run Time:", end - start)
- def cnn():
- # data file
- data_dir = r"D:\\fatwolf\\company_files\\python_code\\test_code\\test_pic\\pic"
- print(data_dir)
- allName = os.listdir(data_dir)
- # train or test
- train = False
- # model address
- model_path = "model/image_model"
- allTestDataName = []
- def read_data(data_dir):
- datas = []
- labels = []
- fpaths = []
- for filename in os.listdir(data_dir):
- fpath = os.path.join(data_dir, filename)
- allTestDataName.append(filename)
- image = Image.open(fpath)
- data = np.array(image) / 255.0
- label = int(filename.split("_")[0])
- datas.append(data)
- labels.append(label)
- # allTestDataName.append(filename)
- datas = np.array(datas)
- labels = np.array(labels)
- allTestDataName.sort(key=lambda x: int(x[:-4]))
- # print(allTestDataName)
- # print("shape of datas: {}\tshape of labels: {}".format(datas.shape, labels.shape))
- return allTestDataName, datas, labels
- allTestDataName, datas, labels = read_data(data_dir)
- # num_classes = len(set(labels))
- num_classes = 4
- datas_placeholder = tf.compat.v1.placeholder(tf.float32, [None, 150, 150, 3])
- labels_placeholder = tf.compat.v1.placeholder(tf.int32, [None])
- dropout_placeholdr = tf.compat.v1.placeholder(tf.float32)
- conv0 = tf.layers.conv2d(datas_placeholder, 20, 5, activation=tf.nn.relu)
- pool0 = tf.layers.max_pooling2d(conv0, [2, 2], [2, 2])
- conv1 = tf.layers.conv2d(pool0, 40, 4, activation=tf.nn.relu)
- pool1 = tf.layers.max_pooling2d(conv1, [2, 2], [2, 2])
- flatten = tf.layers.flatten(pool1)
- fc = tf.layers.dense(flatten, 400, activation=tf.nn.relu)
- dropout_fc = tf.layers.dropout(fc, dropout_placeholdr)
- logits = tf.layers.dense(dropout_fc, num_classes)
- predicted_labels = tf.arg_max(logits, 1)
- losses = tf.nn.softmax_cross_entropy_with_logits(
- labels=tf.one_hot(labels_placeholder, num_classes),
- logits=logits
- )
- mean_loss = tf.reduce_mean(losses)
- optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=1e-2).minimize(losses)
- saver = tf.compat.v1.train.Saver()
- with tf.compat.v1.Session() as sess:
- if train:
- print("train mode")
- sess.run(tf.global_variables_initializer())
- train_feed_dict = {
- datas_placeholder: datas,
- labels_placeholder: labels,
- dropout_placeholdr: 0.25
- }
- for step in range(500):
- _, mean_loss_val = sess.run([optimizer, mean_loss], feed_dict=train_feed_dict)
- if step % 50 == 0:
- print("step = {}\tmean loss = {}".format(step, mean_loss_val))
- saver.save(sess, model_path)
- print("train done save model{}".format(model_path))
- else:
- # print("reloading model")
- saver.restore(sess, model_path)
- # print("{}reload model".format(model_path))
- label_name_dict = {
- 0: "Brokenbeans",
- 1: "Peaberry",
- 2: "shellbean",
- 3: "Worms"
- }
- test_feed_dict = {
- datas_placeholder: datas,
- labels_placeholder: labels,
- dropout_placeholdr: 0
- }
- predicted_labels_val = sess.run(predicted_labels, feed_dict=test_feed_dict)
- for fpath, real_label, predicted_label in zip(allTestDataName, labels, predicted_labels_val):
- real_label_name = label_name_dict[real_label]
- # print("訓練前",real_label_name)
- predicted_label_name = label_name_dict[predicted_label]
- # print("訓練後",predicted_label_name)
- # print("{}\t => {}".format(fpath, predicted_label_name))
- fpath = os.path.basename(fpath)
- print(f"{fpath}\t => {predicted_label_name}")
- path1 = 'output.txt'
- f = open(path1, 'a+')
- f.write(f"{fpath} => {predicted_label_name}""\n")
- f.close()
- if predicted_label_name == "shellbean" or "Peaberry" or "Worms":
- print("觸發噴嘴")
- else:
- print('沒有觸發')
- '''
- sqlStuff = "INSERT INTO result(picname,identify)""VALUES (%s,%s)"
- data = [(fpath, predicted_label_name)]
- a = conn.cursor()
- a.executemany(sqlStuff, data)
- conn.commit()
- try:
- connection = mysql.connector.connect(
- host='127.0.0.1', # 主機名稱
- database='coffee_detection', # 資料庫名稱
- user='root', # 帳號
- password='g53743001') # 密碼
- # 新增資料
- sql = "INSERT INTO result(picname,identify)""VALUES (%s,%s)"
- new_data = ("test", "test2")
- cursor = connection.cursor()
- cursor.execute(sql, new_data)
- # 確認資料有存入資料庫
- connection.commit()
- except Error as e:
- print("資料庫連接失敗:", e)
- finally:
- if (connection.is_connected()):
- cursor.close()
- connection.close()
- '''
- dirListing = os.listdir(data_dir)
- # print(len(dirListing))
- while(nRet == ueye.IS_SUCCESS):
- start = datetime.datetime.now()
- ueye.is_GetActSeqBuf(hCam, nNum, pcMem, pcMemLast)
- array = ueye.get_data(pcMemLast, width, height, nBitsPerPixel, pitch, copy=False)
- bytes_per_pixel = int(nBitsPerPixel / 8)
- # ...reshape it in an numpy array...
- frame = np.reshape(array,(height.value, width.value, bytes_per_pixel))
- # ...resize the image by a half
- frame = cv2.resize(frame,(0,0),fx=0.5, fy=0.5)
- #---------------------------------------------------------------------------------------------------------------------------------------
- #Include image data processing here
- Count = Count + 1
- '''
- if cv2.waitKey(2) & 0xFF == ord('p'):
- Count=Count+1
- ImageFileParams.pwchFileName = ueye.c_wchar_p('C:\\Users\\User\\Desktop\\test\\1.png')
- #ImageFileParams.pnImageID = &nID;
- #ImageFileParams.ppcImageMem = &pcMemory;
- ImageFileParams.nFileType = ueye.IS_IMG_PNG
- #ImageFileParams.ppcImageMem = pcMemLast;
- ImageFileParams.nQuality = 75
- nRet = ueye.is_ImageFile(hCam, ueye.IS_IMAGE_FILE_CMD_SAVE, ImageFileParams, ueye.sizeof(ImageFileParams));
- print(Count)
- print(nRet)
- '''
- #---------------------------------------------------------------------------------------------------------------------------------------
- #...and finally display it
- start3 = datetime.datetime.now()
- #cv2.imshow("SimpleLive_Python_uEye_OpenCV", frame)
- time.sleep(0.3)
- ImageFileParams.pwchFileName = ueye.c_wchar_p('C:\\Users\\User\\Desktop\\tfcoffebean\\test\\1.png')
- ImageFileParams.nFileType = ueye.IS_IMG_PNG
- ImageFileParams.nQuality = 75
- nRet = ueye.is_ImageFile(hCam, ueye.IS_IMAGE_FILE_CMD_SAVE, ImageFileParams, ueye.sizeof(ImageFileParams));
- print(Count)
- print(nRet)
- end3 = datetime.datetime.now()
- print("拍照執行時間:", end3 - start3)
- cut_rectangle()
- start2 = datetime.datetime.now()
- cnn()
- end2 = datetime.datetime.now()
- print("辨識執行時間:", end2 - start2)
- end = datetime.datetime.now()
- print("完整執行時間:", end - start)
- print('-----------------------------------------------------------')
- tf.reset_default_graph()
- # Press q if you want to end the loop
- if cv2.waitKey(1) & 0xFF == ord('q'):
- break
- #---------------------------------------------------------------------------------------------------------------------------------------
- # Releases an image memory that was allocated using is_AllocImageMem() and removes it from the driver management
- ueye.is_FreeImageMem(hCam, pcImageMemory, MemID)
- # Disables the hCam camera handle and releases the data structures and memory areas taken up by the uEye camera
- ueye.is_ExitCamera(hCam)
- # Destroys the OpenCv windows
- cv2.destroyAllWindows()
- #print()
- print("END")
|