Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Working driver monitoring #5

Open
wants to merge 22 commits into
base: working-sim
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions selfdrive/manager/process_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from selfdrive.manager.process import PythonProcess, NativeProcess, DaemonProcess

WEBCAM = os.getenv("USE_WEBCAM") is not None

DM_SIM = os.getenv("DM_SIM") is not None
def driverview(started: bool, params: Params, CP: car.CarParams) -> bool:
return params.get_bool("IsDriverViewEnabled") # type: ignore

Expand All @@ -27,7 +27,7 @@ def logging(started, params, CP: car.CarParams) -> bool:
PythonProcess("timezoned", "system.timezoned", enabled=not PC, offroad=True),

DaemonProcess("manage_athenad", "selfdrive.athena.manage_athenad", "AthenadPid"),
NativeProcess("dmonitoringmodeld", "selfdrive/modeld", ["./dmonitoringmodeld"], enabled=(not PC or WEBCAM), callback=driverview),
NativeProcess("dmonitoringmodeld", "selfdrive/modeld", ["./dmonitoringmodeld"], enabled=(not PC or WEBCAM) or DM_SIM, callback=driverview),
NativeProcess("encoderd", "selfdrive/loggerd", ["./encoderd"]),
NativeProcess("loggerd", "selfdrive/loggerd", ["./loggerd"], onroad=False, callback=logging),
NativeProcess("modeld", "selfdrive/modeld", ["./modeld"]),
Expand All @@ -41,7 +41,7 @@ def logging(started, params, CP: car.CarParams) -> bool:
PythonProcess("torqued", "selfdrive.locationd.torqued"),
PythonProcess("controlsd", "selfdrive.controls.controlsd"),
PythonProcess("deleter", "selfdrive.loggerd.deleter", offroad=True),
PythonProcess("dmonitoringd", "selfdrive.monitoring.dmonitoringd", enabled=(not PC or WEBCAM), callback=driverview),
PythonProcess("dmonitoringd", "selfdrive.monitoring.dmonitoringd", enabled=(not PC or WEBCAM) or DM_SIM, callback=driverview),
PythonProcess("laikad", "selfdrive.locationd.laikad"),
PythonProcess("navd", "selfdrive.navd.navd"),
PythonProcess("pandad", "selfdrive.boardd.pandad", offroad=True),
Expand Down
115 changes: 100 additions & 15 deletions tools/sim/bridge.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,24 +22,33 @@
from common.realtime import DT_DMON, Ratekeeper
from selfdrive.car.honda.values import CruiseButtons
from selfdrive.test.helpers import set_params_enabled
from tools.sim.cameragui.camera_gui import CameraWidget
from tools.sim.lib.can import can_function

from PyQt5 import QtWidgets

import cv2

W, H = 1928, 1208
REPEAT_COUNTER = 5
PRINT_DECIMATION = 100
STEER_RATIO = 15.

pm = messaging.PubMaster(['roadCameraState', 'wideRoadCameraState', 'sensorEvents', 'can', "gpsLocationExternal"])
pm = messaging.PubMaster(
['roadCameraState', 'wideRoadCameraState', 'driverCameraState', 'sensorEvents', 'can', "gpsLocationExternal"])
sm = messaging.SubMaster(['carControl', 'controlsState'])


def parse_args(add_args=None):
parser = argparse.ArgumentParser(description='Bridge between CARLA and openpilot.')
parser.add_argument('--joystick', action='store_true')
parser.add_argument('--high_quality', action='store_true')
parser.add_argument('--dual_camera', action='store_true')
parser.add_argument('--town', type=str, default='Town04_Opt')
parser.add_argument('--spawn_point', dest='num_selected_spawn_point', type=int, default=16)

#Enables Driver Monitoring
parser.add_argument('--dm', action='store_true')
#Enables Camera only mode
parser.add_argument('--camera_gui', action='store_true')
return parser.parse_args(add_args)


Expand Down Expand Up @@ -69,16 +78,22 @@ class Camerad:
def __init__(self):
self.frame_road_id = 0
self.frame_wide_id = 0
self.frame_driver_id = 0
self.vipc_server = VisionIpcServer("camerad")

self.vipc_server.create_buffers(VisionStreamType.VISION_STREAM_ROAD, 5, False, W, H)
self.vipc_server.create_buffers(VisionStreamType.VISION_STREAM_WIDE_ROAD, 5, False, W, H)
self.vipc_server.create_buffers(VisionStreamType.VISION_STREAM_DRIVER, 5, False, W, H)
self.vipc_server.start_listener()

self.vipc_webcam_gui_server = VisionIpcServer("webcamguid")
self.vipc_webcam_gui_server.create_buffers(VisionStreamType.VISION_STREAM_DRIVER, 1, True, W, H)
self.vipc_webcam_gui_server.start_listener()

# set up for pyopencl rgb to yuv conversion
self.ctx = cl.create_some_context()
self.queue = cl.CommandQueue(self.ctx)
cl_arg = f" -DHEIGHT={H} -DWIDTH={W} -DRGB_STRIDE={W * 3} -DUV_WIDTH={W // 2} -DUV_HEIGHT={H // 2} -DRGB_SIZE={W * H} -DCL_DEBUG "
cl_arg = f" -DHEIGHT={H} -DWIDTH={W} -DRGB_STRIDE={W * 3} -DUV_WIDTH={W // 2} -DUV_HEIGHT={H // 2} -DRGB_SIZE={W * H} -DCL_DEBUG"

kernel_fn = os.path.join(BASEDIR, "tools/sim/rgb_to_nv12.cl")
with open(kernel_fn) as f:
Expand All @@ -95,17 +110,21 @@ def cam_callback_wide_road(self, image):
self._cam_callback(image, self.frame_wide_id, 'wideRoadCameraState', VisionStreamType.VISION_STREAM_WIDE_ROAD)
self.frame_wide_id += 1

def cam_callback_driver(self, image):
self._cam_callback(image, self.frame_driver_id, 'driverCameraState', VisionStreamType.VISION_STREAM_DRIVER)
self.frame_driver_id += 1

def _cam_callback(self, image, frame_id, pub_type, yuv_type):
img = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
img = np.reshape(img, (H, W, 4))

if pub_type == "driverCameraState":
img = np.reshape(image, (H, W, 4))
else:
img = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
img = np.reshape(img, (H, W, 4))
img = img[:, :, [0, 1, 2]].copy()

# convert RGB frame to YUV
rgb = np.reshape(img, (H, W * 3))
rgb_cl = cl_array.to_device(self.queue, rgb)
yuv_cl = cl_array.empty_like(rgb_cl)
self.krnl(self.queue, (np.int32(self.Wdiv4), np.int32(self.Hdiv4)), None, rgb_cl.data, yuv_cl.data).wait()
yuv = np.resize(yuv_cl.get(), rgb.size // 2)
yuv, rgb = self.convert_rgb_to_yuv(img)
eof = int(frame_id * 0.05 * 1e9)

self.vipc_server.send(yuv_type, yuv.data.tobytes(), frame_id, eof, eof)
Expand All @@ -120,6 +139,15 @@ def _cam_callback(self, image, frame_id, pub_type, yuv_type):
setattr(dat, pub_type, msg)
pm.send(pub_type, dat)

def convert_rgb_to_yuv(self, img):
rgb = np.reshape(img, (H, W * 3))
rgb_cl = cl_array.to_device(self.queue, rgb)
yuv_cl = cl_array.empty_like(rgb_cl)
self.krnl(self.queue, (np.int32(self.Wdiv4), np.int32(self.Hdiv4)), None, rgb_cl.data, yuv_cl.data).wait()
yuv = np.resize(yuv_cl.get(), rgb.size // 2)
return yuv, rgb


def imu_callback(imu, vehicle_state):
vehicle_state.bearing_deg = math.degrees(imu.compass)
dat = messaging.new_message('sensorEvents', 2)
Expand Down Expand Up @@ -197,6 +225,7 @@ def gps_callback(gps, vehicle_state):


def fake_driver_monitoring(exit_event: threading.Event):
print("Driver Monitoring disabled")
pm = messaging.PubMaster(['driverStateV2', 'driverMonitoringState'])
while not exit_event.is_set():
# dmonitoringmodeld output
Expand All @@ -216,6 +245,48 @@ def fake_driver_monitoring(exit_event: threading.Event):
time.sleep(DT_DMON)


def webcam_gui_function():
app = QtWidgets.QApplication([])

widget = CameraWidget()
widget.show()

app.exec_()


def webcam_function(self, camerad: Camerad, exit_event: threading.Event):
# Ratekeeper defines the limit of requests or in this case frames are sent
# Here the rate is set to 10 frames per second
rk = Ratekeeper(10)
# Load the video
myframeid = 0
cap = cv2.VideoCapture(0) # set camera ID here, index X in /dev/videoX
if self._args.dm:
print("Webcam only mode enabled")
if self._args.camera_gui:
print("Driver Monitoring enabled")
while not exit_event.is_set():
ret, frame = cap.read()
if not ret:
break

# Frame is resized and the color format is changed.
frame = cv2.resize(frame, (W, H))
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2BGRA)

if self._args.dm:
camerad.cam_callback_driver(frame)

img = np.reshape(frame, (H, W, 4))
img = img[:, :, [0, 1, 2]].copy()
yuv, rgb = camerad.convert_rgb_to_yuv(img)
eof = int(camerad.frame_driver_id * 0.05 * 1e9)
camerad.vipc_webcam_gui_server.send(VisionStreamType.VISION_STREAM_DRIVER, rgb.data.tobytes(),
camerad.frame_driver_id, eof, eof)

rk.keep_time()


def can_function_runner(vs: VehicleState, exit_event: threading.Event):
i = 0
while not exit_event.is_set():
Expand All @@ -234,7 +305,6 @@ class CarlaBridge:

def __init__(self, arguments):
set_params_enabled()

msg = messaging.new_message('liveCalibration')
msg.liveCalibration.validBlocks = 20
msg.liveCalibration.rpyCalib = [0.0, 0.0, 0.0]
Expand Down Expand Up @@ -305,7 +375,8 @@ def _run(self, q: Queue):
vehicle_bp = blueprint_library.filter('vehicle.tesla.*')[1]
vehicle_bp.set_attribute('role_name', 'hero')
spawn_points = world_map.get_spawn_points()
assert len(spawn_points) > self._args.num_selected_spawn_point, f'''No spawn point {self._args.num_selected_spawn_point}, try a value between 0 and
assert len(
spawn_points) > self._args.num_selected_spawn_point, f'''No spawn point {self._args.num_selected_spawn_point}, try a value between 0 and
{len(spawn_points)} for this town.'''
spawn_point = spawn_points[self._args.num_selected_spawn_point]
vehicle = world.spawn_actor(vehicle_bp, spawn_point)
Expand Down Expand Up @@ -340,7 +411,8 @@ def create_camera(fov, callback):
road_camera = create_camera(fov=40, callback=self._camerad.cam_callback_road)
self._carla_objects.append(road_camera)

road_wide_camera = create_camera(fov=120, callback=self._camerad.cam_callback_wide_road) # fov bigger than 120 shows unwanted artifacts
road_wide_camera = create_camera(fov=120,
callback=self._camerad.cam_callback_wide_road) # fov bigger than 120 shows unwanted artifacts
self._carla_objects.append(road_wide_camera)

vehicle_state = VehicleState()
Expand All @@ -355,10 +427,23 @@ def create_camera(fov, callback):
gps.listen(lambda gps: gps_callback(gps, vehicle_state))

self._carla_objects.extend([imu, gps])

def start_dm_threads():
if self._args.dm:
# 1: Enables real driver monitoring in the simulator
self._threads.append(threading.Thread(target=webcam_function, args=(self, self._camerad, self._exit_event)))
elif self._args.camera_gui:
# 2: Enables camera only mode with fake driver monitoring
self._threads.append(threading.Thread(target=webcam_function, args=(self, self._camerad, self._exit_event)))
self._threads.append(threading.Thread(target=fake_driver_monitoring, args=(self._exit_event,)))
else:
# Enables fake driver monitoring in the simulator
self._threads.append(threading.Thread(target=fake_driver_monitoring, args=(self._exit_event,)))

# launch fake car threads
self._threads.append(threading.Thread(target=panda_state_function, args=(vehicle_state, self._exit_event,)))
self._threads.append(threading.Thread(target=peripheral_state_function, args=(self._exit_event,)))
self._threads.append(threading.Thread(target=fake_driver_monitoring, args=(self._exit_event,)))
start_dm_threads()
self._threads.append(threading.Thread(target=can_function_runner, args=(vehicle_state, self._exit_event,)))
for t in self._threads:
t.start()
Expand Down
Empty file added tools/sim/cameragui/__init__.py
Empty file.
105 changes: 105 additions & 0 deletions tools/sim/cameragui/camera_gui.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,105 @@
import cv2
import sys
from PyQt5.QtWidgets import QWidget, QLabel, QApplication, QSizePolicy
from PyQt5.QtCore import QThread, Qt, pyqtSignal, pyqtSlot
from PyQt5.QtGui import QImage, QPixmap
import numpy as np
import cereal.messaging as messaging
from cereal.visionipc import VisionIpcClient, VisionStreamType

H = 1208
W = 1928


class Thread(QThread):
changePixmap = pyqtSignal(QImage)
client = None
sm = None

def run(self):
self.init_connections()
self.client.connect(True)

while True:
frame = self.get_frame()
faceDetected, isDistracted = self.get_drivermonitoring_state()
self.draw_text(faceDetected, isDistracted, frame)
pixMap = self.convert_frame_to_pixmap(frame)
self.changePixmap.emit(pixMap)

def init_connections(self):
self.client = VisionIpcClient("webcamguid", VisionStreamType.VISION_STREAM_DRIVER, False)
self.sm = messaging.SubMaster(['driverMonitoringState'])
def get_frame(self):
frame = self.client.recv()
frame = np.array(frame)

if frame.any():
frame = np.reshape(frame, (H, W, 3))
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
return frame

def get_drivermonitoring_state(self):
self.sm.update()
data = self.sm['driverMonitoringState']
faceDetected = data.faceDetected
isDistracted = data.isDistracted
return faceDetected, isDistracted

def draw_text(self, faceDetected, isDistracted, frame):
if faceDetected == True:
color = (0, 255, 0)
else:
color = (255, 0, 0)
cv2.putText(frame, 'faceDetected: ' + str(faceDetected), (50, 150), cv2.FONT_HERSHEY_PLAIN, 5, color, 5)

if isDistracted == False and faceDetected == False:
color = (255, 255, 0)
elif isDistracted == False:
color = (0, 255, 0)
else:
color = (255, 0, 0)
cv2.putText(frame, 'isDistracted: ' + str(isDistracted), (60, 225), cv2.FONT_HERSHEY_PLAIN, 5, color, 5)

def convert_frame_to_pixmap(self, frame):
bytesPerLine = W * 3

convertToQtFormat = QImage(frame.data, W, H, bytesPerLine, QImage.Format_RGB888)

pixMap = convertToQtFormat.scaled(640, 480, Qt.KeepAspectRatio)
return pixMap


class CameraWidget(QWidget):
def __init__(self):
super().__init__()
self.label = QLabel(self)
self.label.resize(640, 480)
self.label.setScaledContents(True)
self.label.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.pixmap = None
th = Thread(self)
th.changePixmap.connect(self.set_image)
th.start()

@pyqtSlot(QImage)
def set_image(self, image):
self.pixmap = QPixmap.fromImage(image)
self.label.setPixmap(self.pixmap)

def resizeEvent(self, event):
if self.pixmap is not None:
pixmap1 = self.pixmap
self.pixmap = pixmap1.scaled(self.width(), self.height())
self.label.setPixmap(self.pixmap)
self.label.resize(self.width(), self.height())


if __name__ == "__main__":
app = QApplication(sys.argv)

widget = CameraWidget()

widget.show()

app.exit(app.exec_())
16 changes: 15 additions & 1 deletion tools/sim/launch_openpilot.sh
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,26 @@ export PASSIVE="0"
export NOBOARD="1"
export SIMULATION="1"
export FINGERPRINT="HONDA CIVIC 2016"

export BLOCK="camerad,encoderd"

while getopts 'i' OPTION;
do
case "$OPTION" in
i)
read -p "Do you want to enable Driver Monitoring in the simulator? (y/n)";
if [ "${REPLY}" == "y" ]; then
export DM_SIM="1"
fi
;;
esac
done

if [[ "$CI" ]]; then
# TODO: offscreen UI should work
export BLOCK="${BLOCK},ui"
fi


DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
cd ../../selfdrive/manager && exec ./manager.py