Skip to content

Commit

Permalink
bugfix virtual_cam and reflect changes of generation in spynnaker class
Browse files Browse the repository at this point in the history
  • Loading branch information
chanokin committed Mar 30, 2016
1 parent 0700cb8 commit 6b733f5
Show file tree
Hide file tree
Showing 2 changed files with 83 additions and 77 deletions.
126 changes: 64 additions & 62 deletions pydvs/external_dvs_emulator_device.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,3 @@
from __future__ import print_function


import logging

spinn_version = "master"
Expand Down Expand Up @@ -51,8 +48,7 @@
from numpy import where, logical_and, argmax
from numpy import uint8, uint16, int16
from multiprocessing import Process, Queue
from time import time as get_time_s,\
sleep as sleep_s
import time
from operator import itemgetter
import pickle

Expand Down Expand Up @@ -206,7 +202,7 @@ def __init__(self, n_neurons, machine_time_step, timescale_factor,
self._scaled_width = 0
self._fps = fps
self._max_time_ms = 0
self._prev_time = 0
self._time_per_frame = 0.

self._time_per_spike_pack_ms = 0

Expand Down Expand Up @@ -248,7 +244,7 @@ def __init__(self, n_neurons, machine_time_step, timescale_factor,

AbstractProvidesOutgoingConstraints.__init__(self)

#~ print("number of neurons for webcam = %d"%self._n_neurons)
print "number of neurons for webcam = %d"%self._n_neurons

self._live_conn = SpynnakerLiveSpikesConnection(send_labels = [self._label, ],
local_port = self._local_port)
Expand Down Expand Up @@ -309,18 +305,17 @@ def run(self, label, sender):
spike_gen_proc = Process(target=self.process_frame, args=(img_queue, spike_queue))
spike_gen_proc.start()

grab_times = []
start_time = 0.
app_start_time = get_time_s()
app_curr_time = get_time_s()
app_start_time = time.time()
app_curr_time = time.time()
first_frame = True
prev_time = get_time_s()
max_frame_time = self._max_time_ms/1000.
wait_time = 0

frame_time = 0.
while self._running:

start_time = get_time_s()
start_time = time.time()
valid_frame = self.grab_frame()
grab_times.append(time.time() - start_time)

if not valid_frame:
self._running = False
Expand All @@ -336,20 +331,14 @@ def run(self, label, sender):

img_queue.put(self._curr_frame)

app_curr_time = get_time_s()
wait_time = max_frame_time - (app_curr_time - prev_time)
if wait_time > 0:
#~ print "run frame => wait time = %3.6f"%wait_time
sleep_s(wait_time)

app_curr_time = get_time_s()

app_curr_time = time.time()
if app_curr_time - app_start_time > max_run_time_s:
self._running = False

prev_time = get_time_s()

print("sPyNNaker DVS emulator run time: %s s"%(app_curr_time - app_start_time))
self._running = False
frame_time = time.time() - start_time
if frame_time < self._time_per_frame:
time.sleep(self._time_per_frame - frame_time)

print "webcam runtime ", app_curr_time - app_start_time
img_queue.put(None)
spike_gen_proc.join()

Expand All @@ -358,65 +347,74 @@ def run(self, label, sender):

if self._video_source is not None:
self._video_source.release()

cv2.destroyAllWindows()
cv2.destroyAllWindows()


def process_frame(self, img_queue, spike_queue):

label = self._label
sender = self._sender
spikes_frame = self._spikes_frame
wait_time = self._max_time_ms
cv2.namedWindow (label)
spike_list = []
gen_times = []
compose_times = []
transform_times = []
ref_up_times = []
start_time = 0.
end_time = 0.
lists = None
prev_time = get_time_s()
max_frame_time = self._max_time_ms/1000.
wait_time = 0
curr_time = 0

while True:
image = img_queue.get()

if image is None or not self._running:
break

start_time = time.time()
self.generate_spikes(image)

gen_times.append(time.time()-start_time)

start_time = time.time()
self.update_reference()

ref_up_times.append(time.time()-start_time)

start_time = time.time()
lists = self.transform_spikes()
transform_times.append(time.time() - start_time)

spike_queue.put(lists)

if self._save_spikes is not None:
spike_list.append(lists)

start_time = time.time()
self.compose_output_frame()
compose_times.append(time.time()-start_time)


cv2.imshow (label, spikes_frame)

if cv2.waitKey(1) & 0xFF == ord('q'):#\
#or not sender.isAlive():
self._running = False
break

curr_time = get_time_s()
wait_time = max_frame_time - (curr_time - prev_time)
if wait_time > 0:
#~ print "process frame => wait time = %3.6f"%wait_time
sleep_s(wait_time)

prev_time = get_time_s()

#continue

print("gen times")
print(numpy.array(gen_times).mean())
print("update ref times")
print(numpy.array(ref_up_times).mean())
print("transform times")
print(numpy.array(transform_times).mean())
print("compose times")
print(numpy.array(compose_times).mean())

cv2.destroyAllWindows()

if self._save_spikes is not None:
#print spike_list
print("Saving generated spikes to %s"%(self._save_spikes))
print "attempting to save spike_list"
pickle.dump( spike_list, open(self._save_spikes, "wb") )


Expand All @@ -434,7 +432,6 @@ def send_spikes(self, spike_queue):


def acquire_device(self):

if isinstance(self._device_id, VirtualCam):
self._video_source = self._device_id
self._is_virtual_cam = True
Expand All @@ -456,37 +453,40 @@ def acquire_device(self):
self._fps = self._video_source.get(CV_CAP_PROP_FPS)

self._max_time_ms = int16((1./self._fps)*1000)
self._time_per_frame = 1./self._fps

self._time_per_spike_pack_ms = self.calculate_time_per_pack()




def grab_frame(self):
#~ start_time = time.time()
if self._is_virtual_cam:
valid_frame, self._curr_frame[:] = self._video_source.read(self._ref_frame)
return True

else:
#~ start_time = get_time_s()
if self._raw_frame is None or self._scale_changed:
valid_frame, self._raw_frame = self._video_source.read()
else:
valid_frame, self._raw_frame[:] = self._video_source.read()

#~ end_time = get_time_s()
#~ end_time = time.time()
#~ print("Time to capture frame = ", end_time - start_time)

if not valid_frame:
return False

#~ start_time = get_time_s()
#~ start_time = time.time()
if self._gray_frame is None or self._scale_changed:
self._gray_frame = convertColor(self._raw_frame, COLOR_BGR2GRAY).astype(int16)
else:
self._gray_frame[:] = convertColor(self._raw_frame, COLOR_BGR2GRAY)
#~ end_time = get_time_s()
#~ end_time = time.time()
#~ print("Time to convert to grayscale = ", end_time - start_time)

#~ start_time = get_time_s()
#~ start_time = time.time()
if self._get_sizes or self._scale_changed:
self._get_sizes = False
self._scale_changed = False
Expand All @@ -510,10 +510,10 @@ def grab_frame(self):
self._tmp_frame = numpy.zeros((self._out_res, self._img_scaled_width))


#~ end_time = get_time_s()
#~ end_time = time.time()
#~ print("Time to calculate sizes = ", end_time - start_time)

#~ start_time = get_time_s()
#~ start_time = time.time()
if self._scale_img:
self._tmp_frame[:] = cv2.resize(self._gray_frame, (self._img_scaled_width, self._out_res),
interpolation=CV_INTER_NN)
Expand All @@ -522,9 +522,10 @@ def grab_frame(self):
else:
self._curr_frame[:] = self._gray_frame[self._img_height_crop_u: self._img_height_crop_b,
self._img_width_crop_l: self._img_width_crop_r]
#~ end_time = get_time_s()
#~ end_time = time.time()
#~ print("Time to scale frame = ", end_time - start_time)


return True


Expand All @@ -544,11 +545,11 @@ def emit_spikes(self, sender, lists):
#from generate_spikes.pyx (cython)
if lists is not None:
for spike_pack in lists:
start_time = get_time_s()
start_time = time.time()
send_spikes(lbl, spike_pack, send_full_keys=False)
elapsed_time = get_time_s() - start_time
elapsed_time = time.time() - start_time
if elapsed_time < max_time_s:
sleep_s(max_time_s - elapsed_time)
time.sleep(max_time_s - elapsed_time)



Expand Down Expand Up @@ -651,6 +652,7 @@ def transform_spikes(self):
data_mask = self._data_mask
polarity = self._polarity_n
spikes = self._spikes
threshold = self._threshold
max_thresh = self._max_threshold
min_thresh = self._min_threshold
#~ lists = self._spikes_lists
Expand All @@ -665,7 +667,7 @@ def transform_spikes(self):
#from generate_spikes.pyx (cython)
if self._output_type == ExternalDvsEmulatorDevice.OUTPUT_RATE:
lists = make_spike_lists_rate(up_spks, dn_spks,
g_max,
g_max, threshold,
up_down_shift, data_shift, data_mask,
max_time_ms)

Expand All @@ -675,14 +677,14 @@ def transform_spikes(self):
up_down_shift, data_shift, data_mask,
num_bins,
max_time_ms,
min_thresh, max_thresh)
thresheshold, max_thresh)

elif self._output_type == ExternalDvsEmulatorDevice.OUTPUT_TIME_BIN:
lists = make_spike_lists_time_bin(up_spks, dn_spks,
g_max,
up_down_shift, data_shift, data_mask,
max_time_ms,
min_thresh, max_thresh,
threshold, max_thresh,
num_bins,
log2_table)

Expand All @@ -691,7 +693,7 @@ def transform_spikes(self):
g_max,
up_down_shift, data_shift, data_mask,
max_time_ms,
min_thresh, max_thresh,
threshold, max_thresh,
num_bins,
log2_table)
return lists
Expand Down
34 changes: 19 additions & 15 deletions pydvs/virtual_cam.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,10 +96,6 @@ def __init__(self, image_location, behaviour="SACCADE", fps=90, resolution=128,

self.frame_prev_time = get_time()

self.lock = Lock()

#~ self.locking_thread = Thread(name="locking", target=self.frame_rate_constraint,
#~ args=(self.time_period,))
self.frame_number = 0

self.current_buffer = 0
Expand Down Expand Up @@ -174,7 +170,24 @@ def __del__(self):
def stop(self):
self.running = False

def isOpened(self):
return True

def get(self, prop):
if prop == CV_CAP_PROP_FRAME_WIDTH:
return self.width
elif prop == CV_CAP_PROP_FRAME_HEIGHT:
return self.height
elif prop == CV_CAP_PROP_FPS:
return self.fps
else:
return False

def set(self, prop):
return False

def release(self):
self.stop()

def load_images(self, buffer_number):
from_idx = self.buffer_start_idx
Expand All @@ -201,14 +214,10 @@ def read(self, ref):
showing_img = self.showing_img
move_image = self.move_image
fps = self.fps
lock = self.lock
num_images = self.total_images
image_buffer = self.image_buffer[self.current_buffer]
all_in_buffer = self.all_in_buffer

#~ while self.locked: #wait for frame rate constraint allows new image to be fetched
#~ pass

start = get_time()
run_time = start - self.on_off_start_time

Expand All @@ -217,7 +226,8 @@ def read(self, ref):
if run_time >= inter_off_time:
self.showing_img = True
self.on_off_start_time = get_time()

self.frame_number = 0

self.current_image_idx += 1

if self.current_image_idx >= num_images:
Expand Down Expand Up @@ -251,12 +261,6 @@ def read(self, ref):


self.prev_time = get_time()
#~ print("time per read = %f"%(get_time() - start))
#~ lock.acquire()
#~ try:
#~ self.locked = True
#~ finally:
#~ lock.release()

return True, self.current_image

Expand Down

0 comments on commit 6b733f5

Please sign in to comment.