Add the latest opencv code for the pynq
This commit is contained in:
parent
4f813eb502
commit
cad3850a59
1 changed files with 175 additions and 0 deletions
175
pynq-opencv/main.py
Normal file
175
pynq-opencv/main.py
Normal file
|
@ -0,0 +1,175 @@
|
|||
import cv2
|
||||
import numpy as np
|
||||
from sklearn import cluster
|
||||
import os
|
||||
# Setting up the blob detector
|
||||
params = cv2.SimpleBlobDetector_Params()
|
||||
|
||||
params.filterByInertia
|
||||
params.minInertiaRatio = 0.6
|
||||
|
||||
detector = cv2.SimpleBlobDetector_create(params)
|
||||
|
||||
def get_blobs(frame):
|
||||
#frame_blurred = cv2.medianBlur(frame, 3)
|
||||
#frame_gray = cv2.cvtColor(frame_blurred, cv2.COLOR_BGR2GRAY)
|
||||
blobs = detector.detect(frame)
|
||||
|
||||
return blobs
|
||||
|
||||
|
||||
def get_dice_from_blobs(blobs):
|
||||
# Get centroids of all blobs
|
||||
X = []
|
||||
for b in blobs:
|
||||
pos = b.pt
|
||||
|
||||
if pos != None:
|
||||
X.append(pos)
|
||||
|
||||
X = np.asarray(X)
|
||||
|
||||
if len(X) > 0:
|
||||
# Important to set min_sample to 0, as a dice may only have one dot
|
||||
clustering = cluster.DBSCAN(eps=150, min_samples=1).fit(X)
|
||||
|
||||
# Find the largest label assigned + 1, that's the number of dice found
|
||||
num_dice = max(clustering.labels_) + 1
|
||||
|
||||
dice = []
|
||||
|
||||
# Calculate centroid of each dice, the average between all a dice's dots
|
||||
for i in range(num_dice):
|
||||
X_dice = X[clustering.labels_ == i]
|
||||
|
||||
centroid_dice = np.mean(X_dice, axis=0)
|
||||
|
||||
dice.append([len(X_dice), *centroid_dice])
|
||||
|
||||
return dice
|
||||
|
||||
else:
|
||||
return []
|
||||
|
||||
|
||||
|
||||
|
||||
def overlay_info(frame, dice, blobs):
|
||||
# Overlay blobs
|
||||
for b in blobs:
|
||||
pos = b.pt
|
||||
r = b.size / 2
|
||||
|
||||
cv2.circle(frame, (int(pos[0]), int(pos[1])),
|
||||
int(r), (255, 0, 0), 2)
|
||||
|
||||
# Overlay dice number
|
||||
for d in dice:
|
||||
# Get textsize for text centering
|
||||
textsize = cv2.getTextSize(
|
||||
str(d[0]), cv2.FONT_HERSHEY_PLAIN, 3, 2)[0]
|
||||
|
||||
cv2.putText(frame, str(d[0]),
|
||||
(int(d[1] - textsize[0] / 2),
|
||||
int(d[2] + textsize[1] / 2)),
|
||||
cv2.FONT_HERSHEY_PLAIN, 3, (0, 255, 0), 2)
|
||||
|
||||
# standard edge detection filter
|
||||
def edge_bb_dice(image):
|
||||
|
||||
gray_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
||||
blurred = cv2.GaussianBlur(gray_img, (7, 7), 0)
|
||||
detected_edges = cv2.Canny(blurred, 150, 200)
|
||||
|
||||
cropped = gray_img
|
||||
filtered = detected_edges
|
||||
|
||||
nonzero_coords = np.column_stack(np.where(filtered > 0.0))
|
||||
|
||||
if len(nonzero_coords) > 0:
|
||||
min_y, min_x = np.min(nonzero_coords, axis=0)
|
||||
max_y, max_x = np.max(nonzero_coords, axis=0)
|
||||
|
||||
# padding = 20
|
||||
# min_x = max(0, min_x - padding // 2)
|
||||
# min_y = max(0, min_y - padding // 2)
|
||||
# max_x = min(image.shape[1], max_x + padding // 2)
|
||||
# max_y = min(image.shape[0], max_y + padding // 2)
|
||||
|
||||
cropped = gray_img[min_y:max_y, min_x:max_x]
|
||||
|
||||
#image_with_bb = np.copy(image)
|
||||
#cv2.rectangle(image_with_bb, (min_x, min_y), (max_x, max_y), (0, 255, 0), 2)
|
||||
|
||||
return image, filtered, cropped
|
||||
|
||||
|
||||
|
||||
|
||||
# Initialize a video feed
|
||||
cap = cv2.VideoCapture(1)
|
||||
# 16:9 (544x306 -> 30fps with edge_bb_dice)
|
||||
cap.set(3, 544)
|
||||
cap.set(4, 306)
|
||||
|
||||
last_dice = 0
|
||||
frame_counter = 0
|
||||
label_counter = 0
|
||||
can_throw = True
|
||||
|
||||
current_path = f'../bossfight/assets/current_roll.jpg'
|
||||
previous_path = f'../bossfight/assets/previous_roll.jpg'
|
||||
current_roll = cv2.imread(current_path)
|
||||
|
||||
while(True):
|
||||
# Grab the latest image from the video feed
|
||||
ret, frame = cap.read()
|
||||
#temp = frame.copy()
|
||||
|
||||
image, filtered, cropped = edge_bb_dice(frame)
|
||||
|
||||
# We'll define these later
|
||||
blobs = get_blobs(cropped)
|
||||
dice = get_dice_from_blobs(blobs)
|
||||
out_frame = overlay_info(cropped, dice, blobs)
|
||||
|
||||
|
||||
# cv2.imshow("frame", image)
|
||||
# cv2.imshow("frame2", filtered)
|
||||
# cv2.imshow("frame3", cropped)
|
||||
|
||||
for d in dice:
|
||||
if last_dice == d[0]:
|
||||
frame_counter += 1
|
||||
if frame_counter > 17 and can_throw:
|
||||
frame_counter = 0
|
||||
|
||||
cv2.imwrite(current_path, cropped)
|
||||
cv2.imwrite(previous_path, current_roll)
|
||||
current_roll = cropped
|
||||
|
||||
label_counter += 1
|
||||
s = f'\n{label_counter},{d[0]}'
|
||||
with open('../bossfight/dice_roll_data.csv','a') as fd:
|
||||
fd.write(s)
|
||||
print(s)
|
||||
can_throw = False
|
||||
else:
|
||||
frame_counter = 0
|
||||
last_dice = d[0]
|
||||
if not dice:
|
||||
can_throw = True
|
||||
last_dice = 0
|
||||
|
||||
|
||||
|
||||
|
||||
res = cv2.waitKey(1)
|
||||
|
||||
# Stop if the user presses "q"
|
||||
if res & 0xFF == ord('q'):
|
||||
break
|
||||
|
||||
# When everything is done, release the capture
|
||||
cap.release()
|
||||
cv2.destroyAllWindows()
|
Loading…
Reference in a new issue