forked from tdamdouni/Raspberry-Pi-DIY-Projects
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathpi_surveillance.py
114 lines (96 loc) · 3.56 KB
/
pi_surveillance.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
# https://www.hackster.io/brendan-lewis/detect-motion-with-opencv-no-pir-sensor-needed-bbeacf
from pyimagesearch.tempimage import TempImage
import dropbox as dbx
from picamera.array import PiRGBArray
from picamera import PiCamera
import warnings
import datetime
import imutils
import json
import time
import cv2
# filter warnings, load the configuration and initialize the Dropbox
# client
warnings.filterwarnings(“ignore”)
client = None
# Put your token here:
db = dbx.Dropbox(“YOUR_TOKEN_HERE”)
# initialize the camera and grab a reference to the raw camera capture
camera = PiCamera()
camera.resolution = (640,480)
camera.framerate = 16
rawCapture = PiRGBArray(camera, size=(640,480))
# allow the camera to warmup, then initialize the average frame, last
# uploaded timestamp, and frame motion counter
print “[INFO] warming up...”
time.sleep(2.5)
avg = None
lastUploaded = datetime.datetime.now()
motionCounter = 0
# capture frames from the camera
for f in camera.capture_continuous(rawCapture, format=“bgr”, use_video_port=True):
# grab the raw NumPy array representing the image and initialize
# the timestamp and occupied/unoccupied text
frame = f.array
timestamp = datetime.datetime.now()
# resize the frame, convert it to grayscale, and blur it
frame = imutils.resize(frame, width=500)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
# if the average frame is None, initialize it
if avg is None:
print “[INFO] starting background model...”
avg = gray.copy().astype(“float”)
rawCapture.truncate(0)
continue
# accumulate the weighted average between the current frame and
# previous frames, then compute the difference between the current
# frame and running average
cv2.accumulateWeighted(gray, avg, 0.5)
frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg))
# threshold the delta image, dilate the thresholded image to fill
# in holes, then find contours on thresholded image
thresh = cv2.threshold(frameDelta, 5, 255,
cv2.THRESH_BINARY)[1]
thresh = cv2.dilate(thresh, None, iterations=2)
(cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
# loop over the contours
for c in cnts:
# if the contour is too small, ignore it
if cv2.contourArea(c) < 5000:
continue
# compute the bounding box for the contour, draw it on the frame,
# and update the text
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
text = “!”
# draw the text and timestamp on the frame
ts = timestamp.strftime(“%A %d %B %Y %I:%M:%S%p”)
cv2.putText(frame, “{}”.format(ts), (10, 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
# check to see if the room is occupied
if text == “!”:
# check to see if enough time has passed between uploads
if (timestamp - lastUploaded).seconds >= 3.0:
# increment the motion counter
motionCounter += 1
# check to see if the number of frames with consistent motion is
# high enough
if motionCounter >= 8:
# write the image to temporary file
t = TempImage()
cv2.imwrite(t.path, frame)
print “[UPLOAD] {}”.format(ts)
path = “{base_path}/{timestamp}.jpg”.format(base_path=“/“, timestamp=ts)
client.put_file(open(t.path, “rb”).read(), path)
t.cleanup()
# update the last uploaded timestamp and reset the motion
# counter
lastUploaded = timestamp
motionCounter = 0
# otherwise, the room is not occupied
else:
motionCounter = 0
# clear the stream in preparation for the next frame
rawCapture.truncate(0)