-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathhandler.py
100 lines (85 loc) · 3.46 KB
/
handler.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
from boto3 import client as boto3_client
import face_recognition
import urllib.parse
import os
import pickle
import boto3
from boto3.dynamodb.conditions import Key
import csv
import json
import io
def findInDynamoTable(table, key):
print("Searching DynamoDB table")
response = table.scan(FilterExpression=Key('name').eq(key))
data = response['Items']
for item in data:
print(item)
return data
def generate_encoding(filename):
with open(filename, "rb") as file:
data = pickle.load(file)
print(data)
return data
def pushToS3(data, key, bucket_name):
print("Pushing data to S3")
file_name = os.path.splitext(key)[0] + ".csv"
header = ["year", "major", "name"]
try:
with open("/tmp/" + file_name, 'w', encoding='utf-8-sig', newline='') as f:
writer = csv.DictWriter(f, fieldnames=header)
writer.writeheader()
writer.writerow({'year': data[0]['year'], 'major': data[0]['major'], 'name': data[0]['name']})
with open("/tmp/" + file_name, "rb") as f:
s3 = boto3.resource('s3')
bucket = s3.Bucket(bucket_name)
bucket.upload_fileobj(f, file_name)
except Exception as e:
print(e)
print('Error in creating and uploading csv to bucket {}.'.format(bucket_name))
raise e
def faceRecognitionFromVideo(video_path, data):
print("Recognizing face from video")
path = "/tmp/"
os.system("ffmpeg -i " + os.path.join(path, video_path) + " -r 1 " + os.path.join(path, "image-%3d.jpeg"))
all_files = os.listdir(path)
image_files = [fname for fname in all_files if fname.endswith('.jpeg')]
for image_file in image_files:
found_image = face_recognition.load_image_file(os.path.join(path, image_file))
image_encoding = face_recognition.face_encodings(found_image)
if not image_encoding:
continue
image_encoding = image_encoding[0]
for i, known_encoding in enumerate(data['encoding']):
results = face_recognition.compare_faces([image_encoding], known_encoding)
if results[0]:
ans = data['name'][i]
searched_data = findInDynamoTable(table, ans)
pushToS3(searched_data, video_path, output_bucket)
return
return "no_face_found"
def face_recognition_handler(event, context):
print("Handling face recognition event")
print(event)
bucket_name = event['Records'][0]['s3']['bucket']['name']
key = urllib.parse.unquote_plus(event['Records'][0]['s3']['object']['key'], encoding='utf-8')
print("Bucket name: {}, Key: {}".format(bucket_name, key))
s3 = boto3.client('s3')
s3.download_file(bucket_name, key, "/tmp/" + key)
data = generate_encoding('encoding')
return faceRecognitionFromVideo(key, data)
if __name__ == "__main__":
input_bucket = ""
output_bucket = ""
aws_access_key_id = ""
aws_secret_access_key = ""
region_name = "us-east-1"
s3_client = boto3.client('s3', aws_access_key_id= aws_access_key_id, aws_secret_access_key=aws_secret_access_key, region_name=region_name)
s3 = boto3.resource(
service_name='s3',
region_name=region_name,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key
)
TABLE_NAME = ""
dynamodb = boto3.resource('dynamodb', region_name="us-east-2", aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key)
table = dynamodb.Table(TABLE_NAME)