This commit is contained in:
cubernetes 2023-08-11 16:38:00 +02:00
commit 8b9636305c
13 changed files with 1631 additions and 0 deletions

BIN
assets/img/42.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 58 KiB

File diff suppressed because it is too large Load Diff

BIN
assets/sound/background_music.mp3 Executable file

Binary file not shown.

BIN
assets/sound/collect.mp3 Executable file

Binary file not shown.

BIN
assets/sound/lost.mp3 Executable file

Binary file not shown.

BIN
assets/sound/rick.mp3 Executable file

Binary file not shown.

BIN
assets/sound/start.mp3 Executable file

Binary file not shown.

BIN
assets/video/rick.mp4 Executable file

Binary file not shown.

BIN
assets/video/rick2.mp4 Executable file

Binary file not shown.

5
bootstrap.cmd Executable file
View File

@ -0,0 +1,5 @@
python3 -m venv env
.\env\bin\activate.bat
python3 -m pip install -r requirements.txt
echo Ready
pause

210
game.py Executable file
View File

@ -0,0 +1,210 @@
#!/usr/bin/env python3
import sys
import random
from time import sleep
import numpy as np
import cv2
import requests
from utils import *
def get_42_img(
img_path: str,
margin_top: int,
margin_bottom: int,
margin_left: int,
margin_right: int,
) -> np.ndarray:
global img42_side_len
img: np.ndarray = cv2.imread(img_path, 0)
if len(img.shape) in [1, 2]:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
img_height, img_width = img.shape[:2]
img = img[
margin_top:img_height-margin_bottom,
margin_left:img_width-margin_right,
]
b_top, b_bottom, b_left, b_right = [10]*4
img = cv2.copyMakeBorder(img, b_top, b_bottom, b_left, b_right, cv2.BORDER_CONSTANT, value=(0, 0, 0))
img = cv2.resize(img, (img42_side_len, img42_side_len))
return img
img42_side_len: int = 100
img42: np.ndarray = get_42_img(
"./assets/img/42.png",
margin_top = 100 + 20,
margin_bottom = 100 + 20,
margin_left = 100,
margin_right = 100,
)
def touches_42(x: int, y: int, img42_x: int, img42_y: int) -> bool:
global collected_42
return (
img42_x <= x <= img42_x + img42_side_len
and img42_y <= y <= img42_y + img42_side_len
)
def add_directional_triangle(
frame: np.ndarray,
x1: int,
y1: int,
x2: int,
y2: int,
rgb: tuple[int, int, int],
side_len: int,
stretch: float,
) -> tuple[int, int]:
dir_vector: np.ndarray = np.array([
x1 - x2, y1 - y2
]).astype(np.float64)
# normalize
norm = np.linalg.norm(dir_vector)
dir_vector /= (norm or 1)
# TODO: Fix type issue
side_len *= norm / 15
# stretch /= (norm/30)
triangle_height: float = side_len * (3**0.5) / 2
half_base: float = side_len / 2
perp_vector: np.ndarray = np.array([-dir_vector[1], dir_vector[0]])
apex_vertex = (int(x1 + dir_vector[0] * triangle_height * 2/3 * stretch), int(y1 + dir_vector[1] * triangle_height * 2/3 * stretch))
left_vertex = (int(x1 - perp_vector[0] * half_base - dir_vector[0] * triangle_height/3),
int(y1 - perp_vector[1] * half_base - dir_vector[1] * triangle_height/3))
right_vertex = (int(x1 + perp_vector[0] * half_base - dir_vector[0] * triangle_height/3),
int(y1 + perp_vector[1] * half_base - dir_vector[1] * triangle_height/3))
triangle: np.ndarray = np.array([apex_vertex, left_vertex, right_vertex])
cv2.drawContours(frame, [triangle], 0, rgb, -1)
return apex_vertex
def show_frame(frame: np.ndarray, to_stdout: bool=False) -> None:
if to_stdout:
sys.stdout.buffer.write(frame.tobytes())
else:
cv2.namedWindow("Image", cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty("Image", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
cv2.imshow("Image", frame)
cv2.waitKey(1)
def collect_vfx() -> None:
requests.post('http://10.11.250.225:8080/api/v1/composition/layers/2/clips/5/connect')
sleep(1)
requests.post('http://10.11.250.225:8080/api/v1/composition/layers/2/clips/7/connect')
def die_vfx() -> None:
requests.post('http://10.11.250.225:8080/api/v1/composition/layers/2/clips/6/connect')
sleep(3)
requests.post('http://10.11.250.225:8080/api/v1/composition/layers/2/clips/7/connect')
def green() -> None:
threading.Thread(target=collect_vfx).start()
def die() -> None:
threading.Thread(target=die_vfx).start()
def main() -> int:
music = start_game_sfx()
capture: cv2.VideoCapture = cv2.VideoCapture(0)
hands: mp.solutions.hands.Hands = mp_hands.Hands(max_num_hands=3)
collected_42: bool = True
noise_42img: int = 5
img42_x: int = -img42_side_len - 1 - noise_42img
img42_y: int = -img42_side_len - 1 - noise_42img
no_fingers: int = 0
score: int = 0
finger_x: int = -1
finger_y: int = -1
no_collect_ratio = 0
no_finger_ratio = 0
timer = 200
i: int = 0
while True:
success: bool
frame: np.ndarray
success, frame = capture.read()
if not success:
continue
frame = cv2.flip(frame, 1)
ratio = max(no_finger_ratio, no_collect_ratio)
frame = cv2.addWeighted(frame, 1 - ratio, np.ones(frame.shape, dtype=frame.dtype), ratio, 0)
if i > 30:
if collected_42:
collected_42 = False
frame_height, frame_width = frame.shape[:2]
img42_x = random.randint(0, frame_width - img42_side_len - 1 - noise_42img)
img42_y = random.randint(0, frame_height - img42_side_len - 1 - noise_42img)
while ((finger_x - img42_x) ** 2 + (finger_y - img42_y) ** 2) ** .5 < 200:
img42_x = random.randint(0, frame_width - img42_side_len - 1 - noise_42img)
img42_y = random.randint(0, frame_height - img42_side_len - 1 - noise_42img)
rand_noise_y = random.randint(0, noise_42img)
rand_noise_x = random.randint(0, noise_42img)
frame[
img42_y + rand_noise_y : img42_y + img42_side_len + rand_noise_y,
img42_x + rand_noise_x : img42_x + img42_side_len + rand_noise_x,
] = img42
no_collect_ratio = min(i, timer) / timer
finger_positions = list(get_finger_positions(frame, hands, add_landmarks=True))
if finger_positions == []:
no_fingers += 1
no_finger_ratio = min(no_fingers, 255) / 255
else:
no_fingers = 0
if ratio > 0.99:
if music:
music.kill()
lost_sfx()
die()
return score
for positions in finger_positions:
index_knuckle_1_pos: tuple[int, int] = (-1, -1)
for finger_id, finger_x, finger_y in positions:
if finger_id == FingerType.INDEX_KNUCKLE_2:
index_knuckle_1_pos = (finger_x, finger_y)
elif finger_id == FingerType.INDEX_TIP and index_knuckle_1_pos != (-1, -1):
apex_x, apex_y = add_directional_triangle(
frame,
finger_x,
finger_y,
*index_knuckle_1_pos,
(0, 0, 0,),
side_len=70,
stretch=2.0,
)
if not collected_42 and (
touches_42(apex_x, apex_y, img42_x, img42_y)
or touches_42(finger_x, finger_y, img42_x, img42_y)
):
collected_42 = True
i = 0
score += 42
if score == 4200 / 4: # that's 25 collects
initiate_rick()
timer = 60 + (timer - 60) * .9
collect_sfx()
green()
show_frame(frame, to_stdout=(not sys.stdout.isatty()))
i += 1
if __name__ == '__main__':
save_score(main())

24
requirements.txt Executable file
View File

@ -0,0 +1,24 @@
absl-py==1.4.0
attrs==23.1.0
cffi==1.15.1
contourpy==1.1.0
cycler==0.11.0
flatbuffers==23.5.26
fonttools==4.42.0
kiwisolver==1.4.4
matplotlib==3.7.2
mediapipe==0.10.3
numpy==1.25.2
opencv-contrib-python==4.8.0.76
opencv-python==4.8.0.76
packaging==23.1
Pillow==10.0.0
playsound==1.3.0
protobuf==3.20.3
pycairo==1.24.0
pycparser==2.21
# PyGObject==3.44.1
pyparsing==3.0.9
python-dateutil==2.8.2
six==1.16.0
sounddevice==0.4.6

118
utils.py Executable file
View File

@ -0,0 +1,118 @@
import sys
import time
from enum import Enum
from subprocess import Popen
from typing import Generator
from types import ModuleType
import threading
# import sounddevice as sd
# import soundfile as sf
import numpy as np
import mediapipe as mp
import cv2
from playsound import playsound
mp_hands = mp.solutions.hands
mp_draw: ModuleType = mp.solutions.drawing_utils
class FingerType(Enum):
BASE = 0
BASE_RIGHT = 1
THUMB_BASE = 2
THUMB_KNUCKLE_1 = 3
THUMB_TIP = 4
INDEX_BASE = 5
INDEX_KNUCKLE_1 = 6
INDEX_KNUCKLE_2 = 7
INDEX_TIP = 8
MIDDLE_BASE = 9
MIDDLE_KNUCKLE_1 = 10
MIDDLE_KNUCKLE_2 = 11
MIDDLE_TIP = 12
RING_BASE = 13
RING_KNUCKLE_1 = 14
RING_KNUCKLE_2 = 15
RING_TIP = 16
PINKY_BASE = 17
PINKY_KNUCKLE_1 = 18
PINKY_KNUCKLE_2 = 19
PINKY_TIP = 20
def save_score(score: int) -> None:
with open('./.score', 'w') as score_file:
score_file.write(str(score))
def play_audio(file_name: str) -> None:
# Read the audio file
data, samplerate = sf.read(file_name)
# Play the audio file
sd.play(data, samplerate)
sd.wait() # Wait for the audio playback to complete
def start_game_sfx() -> None:
# play_audio('./assets/sound/start.mp3')
playsound('./assets/sound/start.mp3')
time.sleep(.5)
# playsound('./assets/sound/background_music.mp3')
# threading.Thread(target=playsound, args=('./assets/sound/background_music.mp3',), daemon=True).start()
def collect_sfx() -> None:
pass
# threading.Thread(target=play_audio, args=('./assets/sound/collect.mp3',), daemon=True).start()
def lost_sfx() -> None:
playsound('./assets/sound/lost.mp3')
def show_matrix() -> None:
Popen(['tmatrix'])
def initiate_rick() -> None:
# threading.Thread(target=play_audio, args=('./assets/sound/rick.mp3',), daemon=True).start()
cap = cv2.VideoCapture('./assets/video/rick2.mp4')
fps: int = int(cap.get(cv2.CAP_PROP_FPS))
desired_delay: float = 1 / fps
while True:
start_time = time.time()
ret, frame = cap.read()
if not ret:
break
sys.stdout.buffer.write(frame.tobytes())
elapsed_time = time.time() - start_time
remaining_delay = max(desired_delay - elapsed_time, 0)
time.sleep(remaining_delay)
cap.release()
def found_hands() -> bool:
capture: cv2.VideoCapture = cv2.VideoCapture(0)
hands = mp_hands.Hands(max_num_hands=1)
success, frame = capture.read()
if not success:
return False
return list(get_finger_positions(frame, hands)) != []
def get_finger_positions(
frame: np.ndarray,
hands: mp.solutions.hands.Hands,
add_landmarks: bool=False,
) -> Generator[list[tuple[int, int, int]], None, None]:
height, width = frame.shape[:2]
img_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
results = hands.process(img_rgb)
if results.multi_hand_landmarks:
for hand_landmarks in results.multi_hand_landmarks:
positions = []
for id, lm in enumerate(hand_landmarks.landmark):
x = int(lm.x * width)
y = int(lm.y * height)
positions.append((FingerType(id), x, y))
yield positions
if add_landmarks:
mp_draw.draw_landmarks(frame, hand_landmarks, mp_hands.HAND_CONNECTIONS)