我试图在一个框架(tkinter)内的标签中显示我的覆盆子相机,我有一种识别加载视频捕获的人脸的方法,但它没有在标签中显示相机
下面是我创建gui的代码
def create_ui(self):
self.window = tk.Tk()
self.window.title("App Faces")
self.window.geometry('860x720')
self.window.bind('<Escape>', lambda e: self.window.quit())
self.tab_control = ttk.Notebook(self.window)
self.tab2 = ttk.Frame(self.tab_control)
self.tab_control.add(self.tab2, text="Real Time Face Recognition")
self.label2 = ttk.Label(self.tab2, text="Real Time Face Recognition")
self.label2.place(x=12, y=50)
self.lmain2 = ttk.Label(self.tab2)
self.lmain2.place(x=12, y=70)
self.boton2 = ttk.Button(self.tab2, text="START", command=self.recognizing_faces_video)
self.boton2.pack()
self.boton2.place(x=50, y=20)
self.tab_control.pack(expand=1, fill='both')
self.window.mainloop()
在这个方法中,我称之为self.recogniting_faces_video,但它不起作用
def recognizing_faces_video(self):
self.recognizer = pickle.loads(open(self.recognizer, "rb").read())
self.le = pickle.loads(open(self.labelEncoder, "rb").read())
self.cap = cv2.VideoCapture("http://192.168.1.71:8000/stream.mjpg")
# print(self.cap)
# cap = cv2.VideoCapture("http://raspberrypi.mshome.net:8000/stream.mjpg")
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, self.width)
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, self.width)
print(self.cap.isOpened())
# grab the frame from the threaded video stream
_, frame = self.cap.read()
#cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
# resize the frame to have a width of 600 pixels (while
# maintaining the aspect ratio), and then grab the image
# dimensions
frame = imutils.resize(frame, width=600)
(h, w) = frame.shape[:2]
# construct a blob from the image
imageBlob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 1.0, (300, 300), (104.0, 177.0, 123.0),
swapRB=False, crop=False)
# apply OpenCV's deep learning-based face detector to localize
# faces in the input image
self.model.setInput(imageBlob)
detections = self.model.forward()
# loop over the detections
for i in range(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with
# the prediction
confidence = detections[0, 0, i, 2]
# filter out weak detections
if confidence > 0.5:
# compute the (x, y)-coordinates of the bounding box for
# the face
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
# extract the face ROI
face = frame[startY:endY, startX:endX]
(fH, fW) = face.shape[:2]
# ensure the face width and height are sufficiently large
if fW < 20 or fH < 20:
continue
# construct a blob for the face ROI, then pass the blob
# through our face embedding model to obtain the 128-d
# quantification of the face
faceBlob = cv2.dnn.blobFromImage(face, 1.0 / 255,
(96, 96), (0, 0, 0), swapRB=True, crop=False)
self.embedder.setInput(faceBlob)
vec = self.embedder.forward()
# perform classification to recognize the face
preds = self.recognizer.predict_proba(vec)[0]
j = np.argmax(preds)
proba = preds[j]
name = self.le.classes_[j]
# draw the bounding box of the face along with the
# associated probability
text = "{}: {:.2f}%".format(name, proba * 100)
y = startY - 10 if startY - 10 > 10 else startY + 10
cv2.rectangle(frame, (startX, startY), (endX, endY),
(0, 0, 255), 2)
cv2.putText(frame, text, (startX, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
self.boton2['state'] = "disabled"
img = Image.fromArray(frame)
imgTk = ImageTk.PhotoImage(image = img)
self.lmain2.imgtk = imgTk
self.lmain2.configure(image=imgTk)
self.lmain2.after(10, self.recognizing_faces_video)
有人能帮我吗
目前没有回答
相关问题 更多 >
编程相关推荐