I ran the code on Ubuntu 20.04, khadas vim3 and the UnboundLocalError is not resolved.
import cv2
import numpy as np
import ksnn.api
import os
from ksnn.api import output_format
npu = ksnn.api.KSNN('VIM3')
model_path = "/home/khadas/Downloads/MoveNet_TFLite_NPU.nb"
if not os.path.exists(model_path):
raise ValueError(f"path Error: {model_path}")
npu.nn_init(library='/home/khadas/libnn_MoveNet_TFLite_NPU.so', model=model_path, level=0)
KEYPOINT_DICT = {
'nose': 0,
'left_eye': 1,
'right_eye': 2,
'left_ear': 3,
'right_ear': 4,
'left_shoulder': 5,
'right_shoulder': 6,
'left_elbow': 7,
'right_elbow': 8,
'left_wrist': 9,
'right_wrist': 10
}
KEYPOINT_EDGE_INDS_TO_COLOR = {
(0, 1): (255, 0, 255),
(0, 2): (0, 255, 255),
(1, 3): (255, 0, 255),
(2, 4): (0, 255, 255),
(0, 5): (255, 0, 255),
(0, 6): (0, 255, 255),
(5, 7): (0, 0, 255), # 왼팔 (빨간색)
(7, 9): (0, 0, 255), # 왼팔 (빨간색)
(6, 8): (255, 0, 0), # 오른팔 (파란색)
(8, 10): (255, 0, 0), # 오른팔 (파란색)
(5, 6): (255, 255, 0)
}
def _keypoints_and_edges_for_display(keypoints_with_scores, height, width, keypoint_threshold=0.11):
keypoints_all = [ ]
keypoint_edges_all = []
edge_colors = []
num_instances, _, _, _ = keypoints_with_scores.shape
for idx in range(num_instances):
kpts_x = keypoints_with_scores[0, idx, :, 1]
kpts_y = keypoints_with_scores[0, idx, :, 0]
kpts_scores = keypoints_with_scores[0, idx, :, 2]
kpts_absolute_xy = np.stack([width * np.array(kpts_x[:11]), height * np.array(kpts_y[:11])], axis=-1)
kpts_above_thresh_absolute = kpts_absolute_xy[kpts_scores[:11] > keypoint_threshold, :]
keypoints_all.append(kpts_above_thresh_absolute)
for edge_pair, color in KEYPOINT_EDGE_INDS_TO_COLOR.items():
if kpts_scores[edge_pair[0]] > keypoint_threshold and kpts_scores[edge_pair[1]] > keypoint_threshold:
x_start = kpts_absolute_xy[edge_pair[0], 0]
y_start = kpts_absolute_xy[edge_pair[0], 1]
x_end = kpts_absolute_xy[edge_pair[1], 0]
y_end = kpts_absolute_xy[edge_pair[1], 1]
line_seg = np.array([[x_start, y_start], [x_end, y_end]])
keypoint_edges_all.append(line_seg)
edge_colors.append(color)
if keypoints_all:
keypoints_xy = np.concatenate(keypoints_all, axis=0)
else:
keypoints_xy = np.zeros((0, 17, 2))
if keypoint_edges_all:
edges_xy = np.stack(keypoint_edges_all, axis=0)
else:
edges_xy = np.zeros((0, 2, 2))
return keypoints_xy, edges_xy, edge_colors
def draw_prediction_on_image(image, keypoints_with_scores):
height, width, _ = image.shape
keypoints_xy, edges_xy, edge_colors = _keypoints_and_edges_for_display(keypoints_with_scores, height, width)
for keypoint in keypoints_xy:
cv2.circle(image, (int(keypoint[0]), int(keypoint[1])), 4, (0, 255, 0), -1)
for edge, color in zip(edges_xy, edge_colors):
p1 = tuple(edge[0].astype(int))
p2 = tuple(edge[1].astype(int))
cv2.line(image, p1, p2, color, 2)
keypoints_dict = {k: (int(keypoints_xy[KEYPOINT_DICT[k]][0]), int(keypoints_xy[KEYPOINT_DICT[k]][1])) for k in KEYPOINT_DICT.keys() if len(keypoints_xy) > KEYPOINT_DICT[k]}
if 'left_wrist' in keypoints_dict and 'left_shoulder' in keypoints_dict and keypoints_dict['left_wrist'][1] < keypoints_dict['left_shoulder'][1]:
cv2.putText(image, "Raise Right Arm", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv2.LINE_AA)
if 'right_wrist' in keypoints_dict and 'right_shoulder' in keypoints_dict and keypoints_dict['right_wrist'][1] < keypoints_dict['right_shoulder'][1]:
cv2.putText(image, "Raise Left Arm", (50, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2, cv2.LINE_AA)
return image
cap = cv2.VideoCapture(0)
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
# 좌우 반전
frame = cv2.flip(frame, 1)
input_image = cv2.resize(frame, (256, 256))
input_image = input_image.astype(np.float32)
input_image /= 255.0
input_image = input_image.transpose(2, 0, 1) # (H, W, C) -> (C, H, W)
input_list = [input_image]
output = npu.nn_inference(cv_img=input_list, platform='TFLITE', reorder='0 1 2', input_tensor=1, output_tensor=1, output_format=output_format.OUT_FORMAT_FLOAT32)
keypoints_with_scores = np.array(output[0])
output_overlay = draw_prediction_on_image(frame, keypoints_with_scores)
cv2.imshow('MoveNet', output_overlay)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
khadas@Khadas:~/ksnn$ /bin/python3 /home/khadas/MoveNet_TFL_NPU_KSNN.py
Traceback (most recent call last):
File "/home/khadas/MoveNet_TFL_NPU_KSNN.py", line 126, in <module>
output = npu.nn_inference(cv_img=input_list, platform='TFLITE', reorder='0 1 2', input_tensor=1, output_tensor=1, output_format=output_format.OUT_FORMAT_FLOAT32)
File "/home/khadas/.local/lib/python3.8/site-packages/ksnn/api.py", line 228, in nn_inference
res = self.nn_set_inputs(cv_img, platform=platform, reorder=reorder, tensor=input_tensor)
File "/home/khadas/.local/lib/python3.8/site-packages/ksnn/api.py", line 149, in nn_set_inputs
del pixel_data
UnboundLocalError: local variable 'pixel_data' referenced before assignment