diff --git a/README.md b/README.md
index aba6b3d597e7f8..6efb3fae684ee6 100644
--- a/README.md
+++ b/README.md
@@ -2,6 +2,7 @@
WARNING: TESLA ONLY OPENPILOT 0.7.4-T15
======
This repo contains code that was modified specifically for Tesla and will not work on other cars!
+Main Comma.ai code is Copyright (c) 2018, Comma.ai, Inc. Additonal work (ALCA, webcamera, any modifications to base Comma.ao code) is licensed under 
This work is licensed under a Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International License.
[](#)
diff --git a/SConstruct b/SConstruct
index 5944541b5b8155..14c4336eb6d9a6 100644
--- a/SConstruct
+++ b/SConstruct
@@ -12,19 +12,29 @@ AddOption('--asan',
help='turn on ASAN')
arch = subprocess.check_output(["uname", "-m"], encoding='utf8').rstrip()
+is_tbp = os.path.isfile('/data/tinkla_buddy_pro')
if platform.system() == "Darwin":
arch = "Darwin"
if arch == "aarch64":
- lenv = {
- "LD_LIBRARY_PATH": '/data/data/com.termux/files/usr/lib',
- "PATH": os.environ['PATH'],
- "ANDROID_DATA": os.environ['ANDROID_DATA'],
- "ANDROID_ROOT": os.environ['ANDROID_ROOT'],
- }
+ if is_tbp:
+ lenv = {
+ "LD_LIBRARY_PATH": '/usr/lib',
+ "PATH": os.environ['PATH'],
+ "ANDROID_DATA": "/data",
+ "ANDROID_ROOT": "/",
+ }
+ else:
+ lenv = {
+ "LD_LIBRARY_PATH": '/data/data/com.termux/files/usr/lib',
+ "PATH": os.environ['PATH'],
+ "ANDROID_DATA": os.environ['ANDROID_DATA'],
+ "ANDROID_ROOT": os.environ['ANDROID_ROOT'],
+ }
cpppath = [
"#phonelibs/opencl/include",
+ "#phonelibs/snpe/include",
]
libpath = [
"#phonelibs/snpe/aarch64-android-clang3.8",
@@ -35,11 +45,36 @@ if arch == "aarch64":
"#phonelibs/nanovg",
"#phonelibs/libyuv/lib",
]
-
- cflags = ["-DQCOM", "-mcpu=cortex-a57"]
- cxxflags = ["-DQCOM", "-mcpu=cortex-a57"]
-
- rpath = ["/system/vendor/lib64"]
+
+ if is_tbp:
+ cflags = ["-mcpu=cortex-a57"]
+ cxxflags = ["-mcpu=cortex-a57"]
+ cpppath = [
+ "#phonelibs/opencl/include",
+ "/data/op_rk3399_setup/support_files/include",
+ "/data/op_rk3399_setup/external/snpe/include",
+ ]
+ libpath = [
+ "/data/op_rk3399_setup/external/snpe/lib/lib",
+ "/usr/lib",
+ "/data/data/com.termux/files/usr/lib",
+ "/system/vendor/lib64",
+ "/system/comma/usr/lib",
+ "#phonelibs/nanovg",
+ "/data/op_rk3399_setup/support_files/lib",
+ ]
+ rpath = ["/system/vendor/lib64",
+ "/data/op_rk3399_setup/external/snpe/lib/lib",
+ "/data/op_rk3399_setup/support_files/lib",
+ "external/tensorflow/lib",
+ "cereal",
+ "/usr/lib",
+ "selfdrive/common",
+ ]
+ else:
+ cflags = ["-DQCOM", "-mcpu=cortex-a57"]
+ cxxflags = ["-DQCOM", "-mcpu=cortex-a57"]
+ rpath = ["/system/vendor/lib64"]
else:
lenv = {
"PATH": "#external/bin:" + os.environ['PATH'],
@@ -123,7 +158,6 @@ env = Environment(
"#phonelibs/android_hardware_libhardware/include",
"#phonelibs/android_system_core/include",
"#phonelibs/linux/include",
- "#phonelibs/snpe/include",
"#phonelibs/nanovg",
"#selfdrive/common",
"#selfdrive/camerad",
@@ -178,7 +212,7 @@ def abspath(x):
#zmq = 'zmq'
# still needed for apks
zmq = FindFile("libzmq.a", libpath)
-Export('env', 'arch', 'zmq', 'SHARED')
+Export('env', 'arch', 'zmq', 'SHARED', 'is_tbp')
# cereal and messaging are shared with the system
SConscript(['cereal/SConscript'])
@@ -222,7 +256,7 @@ SConscript(['selfdrive/proclogd/SConscript'])
SConscript(['selfdrive/ui/SConscript'])
SConscript(['selfdrive/loggerd/SConscript'])
-if arch == "aarch64":
+if arch == "aarch64" and not is_tbp:
SConscript(['selfdrive/logcatd/SConscript'])
SConscript(['selfdrive/sensord/SConscript'])
SConscript(['selfdrive/clocksd/SConscript'])
diff --git a/cereal/SConscript b/cereal/SConscript
index 9fd24d0d327d90..a7fa9560e9109d 100644
--- a/cereal/SConscript
+++ b/cereal/SConscript
@@ -1,5 +1,4 @@
-Import('env', 'arch', 'zmq')
-
+Import('env', 'arch', 'zmq', 'is_tbp')
gen_dir = Dir('gen')
messaging_dir = Dir('messaging')
@@ -55,8 +54,11 @@ Depends('messaging/impl_zmq.cc', services_h)
# note, this rebuilds the deps shared, zmq is statically linked to make APK happy
# TODO: get APK to load system zmq to remove the static link
-shared_lib_shared_lib = [zmq, 'm', 'stdc++'] + ["gnustl_shared"] if arch == "aarch64" else [zmq]
-env.SharedLibrary('messaging_shared', messaging_objects, LIBS=shared_lib_shared_lib)
+if is_tbp:
+ shared_lib_shared_lib = [zmq, 'm', 'stdc++']
+else:
+ shared_lib_shared_lib = [zmq, 'm', 'stdc++'] + ["gnustl_shared"] if arch == "aarch64" else [zmq]
+ env.SharedLibrary('messaging_shared', messaging_objects, LIBS=shared_lib_shared_lib)
env.Program('messaging/bridge', ['messaging/bridge.cc'], LIBS=[messaging_lib, 'zmq'])
Depends('messaging/bridge.cc', services_h)
diff --git a/cereal/SConstruct b/cereal/SConstruct
index a72286b279bd74..bddb9bc0abac28 100644
--- a/cereal/SConstruct
+++ b/cereal/SConstruct
@@ -3,7 +3,7 @@ import subprocess
zmq = 'zmq'
arch = subprocess.check_output(["uname", "-m"], encoding='utf8').rstrip()
-
+is_tbp = os.path.isfile('/data/tinkla_buddy_pro')
cereal_dir = Dir('.')
cpppath = [
@@ -45,5 +45,5 @@ env = Environment(
)
-Export('env', 'zmq', 'arch')
+Export('env', 'zmq', 'arch','is_tbp')
SConscript(['SConscript'])
diff --git a/cereal/messaging/messaging_pyx_setup.py b/cereal/messaging/messaging_pyx_setup.py
index a763d89f862586..547c138ffa3dd7 100644
--- a/cereal/messaging/messaging_pyx_setup.py
+++ b/cereal/messaging/messaging_pyx_setup.py
@@ -33,10 +33,11 @@ def get_ext_filename(self, ext_name):
extra_compile_args = ["-std=c++11"]
libraries = ['zmq']
ARCH = subprocess.check_output(["uname", "-m"], encoding='utf8').rstrip() # pylint: disable=unexpected-keyword-arg
-
+is_tbp = os.path.isfile('/data/tinkla_buddy_pro')
if ARCH == "aarch64":
extra_compile_args += ["-Wno-deprecated-register"]
- libraries += ['gnustl_shared']
+ if not is_tbp:
+ libraries += ['gnustl_shared']
setup(name='CAN parser',
cmdclass={'build_ext': BuildExtWithoutPlatformSuffix},
diff --git a/phonelibs/json11/json11.o b/phonelibs/json11/json11.o
deleted file mode 100644
index 7f9fe2e9b1ab00..00000000000000
Binary files a/phonelibs/json11/json11.o and /dev/null differ
diff --git a/selfdrive/camerad/SConscript b/selfdrive/camerad/SConscript
index 12e1afe5acd492..5cfe57103890fb 100644
--- a/selfdrive/camerad/SConscript
+++ b/selfdrive/camerad/SConscript
@@ -1,10 +1,14 @@
-Import('env', 'arch', 'messaging', 'common', 'gpucommon', 'visionipc', 'cereal')
+Import('env', 'arch', 'is_tbp', 'messaging', 'common', 'gpucommon', 'visionipc', 'cereal')
libs = ['m', 'pthread', common, 'jpeg', 'json', cereal, 'OpenCL', messaging, 'czmq', 'zmq', 'capnp', 'kj', 'capnp_c', visionipc, gpucommon]
if arch == "aarch64":
- libs += ['gsl', 'CB', 'adreno_utils', 'EGL', 'GLESv3', 'cutils', 'ui']
- cameras = ['cameras/camera_qcom.c']
+ if is_tbp:
+ libs += []
+ cameras = ['cameras/camera_frame_stream.cc']
+ else:
+ libs += ['gsl', 'CB', 'adreno_utils', 'EGL', 'GLESv3', 'cutils', 'ui']
+ cameras = ['cameras/camera_qcom.c']
else:
libs += []
cameras = ['cameras/camera_frame_stream.cc']
diff --git a/selfdrive/common/SConscript b/selfdrive/common/SConscript
index 6f40e6a8f0a20e..9f2b2706af42c3 100644
--- a/selfdrive/common/SConscript
+++ b/selfdrive/common/SConscript
@@ -1,4 +1,4 @@
-Import('env', 'arch', 'SHARED')
+Import('env', 'arch', 'SHARED','is_tbp')
if SHARED:
fxn = env.SharedLibrary
@@ -17,13 +17,20 @@ files = [
]
if arch == "aarch64":
- defines = {}
- files += [
- 'framebuffer.cc',
- 'touch.c',
- 'visionbuf_ion.c',
- ]
- _gpu_libs = ['gui', 'adreno_utils']
+ if is_tbp:
+ defines = {"CLU_NO_CACHE": None}
+ files += [
+ 'visionbuf_cl.c',
+ ]
+ _gpu_libs = ["GL"]
+ else:
+ defines = {}
+ files += [
+ 'framebuffer.cc',
+ 'touch.c',
+ 'visionbuf_ion.c',
+ ]
+ _gpu_libs = ['gui', 'adreno_utils']
else:
defines = {"CLU_NO_CACHE": None}
files += [
diff --git a/selfdrive/loggerd/SConscript b/selfdrive/loggerd/SConscript
index 1feb899c2aa124..cc63c1b017ef1a 100644
--- a/selfdrive/loggerd/SConscript
+++ b/selfdrive/loggerd/SConscript
@@ -1,4 +1,4 @@
-Import('env', 'arch', 'messaging', 'common', 'visionipc')
+Import('env', 'arch', 'is_tbp', 'messaging', 'common', 'visionipc')
src = ['loggerd.cc', 'logger.cc']
libs = ['zmq', 'czmq', 'capnp', 'kj', 'z',
@@ -6,7 +6,8 @@ libs = ['zmq', 'czmq', 'capnp', 'kj', 'z',
'yuv', 'bz2', common, 'json', messaging, visionipc]
if arch == "aarch64":
- src += ['encoder.c', 'raw_logger.cc']
- libs += ['OmxVenc', 'OmxCore', 'cutils']
+ if not is_tbp:
+ src += ['encoder.c', 'raw_logger.cc']
+ libs += ['OmxVenc', 'OmxCore', 'cutils']
env.Program(src, LIBS=libs)
diff --git a/selfdrive/modeld/SConscript b/selfdrive/modeld/SConscript
index 4c58e278f032db..b60e9f579deb28 100644
--- a/selfdrive/modeld/SConscript
+++ b/selfdrive/modeld/SConscript
@@ -1,4 +1,4 @@
-Import('env', 'arch', 'messaging', 'common', 'gpucommon', 'visionipc')
+Import('env', 'arch','is_tbp', 'messaging', 'common', 'gpucommon', 'visionipc')
lenv = env.Clone()
libs = [messaging, common, 'OpenCL', 'SNPE', 'capnp', 'zmq', 'kj', 'yuv', gpucommon, visionipc]
@@ -8,9 +8,12 @@ common_src = [
"runners/snpemodel.cc",
"transforms/loadyuv.c",
"transforms/transform.c"]
-
+if is_tbp:
+ arch = "aarch64_TBP"
if arch == "aarch64":
libs += ['gsl', 'CB', 'gnustl_shared']
+elif arch == "aarch64_TBP":
+ libs += ['pthread']
else:
libs += ['symphony-cpu', 'pthread']
diff --git a/selfdrive/modeld/runners/tfmodel.cc b/selfdrive/modeld/runners/tfmodel.cc
new file mode 100644
index 00000000000000..502ac4ba63a6e7
--- /dev/null
+++ b/selfdrive/modeld/runners/tfmodel.cc
@@ -0,0 +1,98 @@
+#include "tfmodel.h"
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include "common/util.h"
+#include "common/utilpp.h"
+#include "common/swaglog.h"
+#include
+
+
+TFModel::TFModel(const char *path, float *_output, size_t _output_size, int runtime) {
+ output = _output;
+ output_size = _output_size;
+
+ char tmp[1024];
+ strncpy(tmp, path, sizeof(tmp));
+ strstr(tmp, ".dlc")[0] = '\0';
+ strcat(tmp, ".keras");
+ LOGD("loading model %s", tmp);
+
+ assert(pipe(pipein) == 0);
+ assert(pipe(pipeout) == 0);
+
+ std::string exe_dir = util::dir_name(util::readlink("/proc/self/exe"));
+ std::string keras_runner = exe_dir + "/runners/keras_runner.py";
+
+ proc_pid = fork();
+ if (proc_pid == 0) {
+ LOGD("spawning keras process %s", keras_runner.c_str());
+ char *argv[] = {(char*)keras_runner.c_str(), tmp, NULL};
+ dup2(pipein[0], 0);
+ dup2(pipeout[1], 1);
+ close(pipein[0]);
+ close(pipein[1]);
+ close(pipeout[0]);
+ close(pipeout[1]);
+ execvp(keras_runner.c_str(), argv);
+ }
+
+ // parent
+ close(pipein[0]);
+ close(pipeout[1]);
+}
+
+TFModel::~TFModel() {
+ close(pipein[1]);
+ close(pipeout[0]);
+ kill(proc_pid, SIGTERM);
+}
+
+void TFModel::pwrite(float *buf, int size) {
+ char *cbuf = (char *)buf;
+ int tw = size*sizeof(float);
+ while (tw > 0) {
+ int err = write(pipein[1], cbuf, tw);
+ //printf("host write %d\n", err);
+ assert(err >= 0);
+ cbuf += err;
+ tw -= err;
+ }
+ //printf("host write done\n");
+}
+
+void TFModel::pread(float *buf, int size) {
+ char *cbuf = (char *)buf;
+ int tr = size*sizeof(float);
+ while (tr > 0) {
+ int err = read(pipeout[0], cbuf, tr);
+ //printf("host read %d/%d\n", err, tr);
+ assert(err >= 0);
+ cbuf += err;
+ tr -= err;
+ }
+ //printf("host read done\n");
+}
+
+void TFModel::addRecurrent(float *state, int state_size) {
+ rnn_input_buf = state;
+ rnn_state_size = state_size;
+}
+
+void TFModel::addDesire(float *state, int state_size) {
+ desire_input_buf = state;
+ desire_state_size = state_size;
+}
+
+void TFModel::execute(float *net_input_buf, int buf_size) {
+ // order must be this
+ pwrite(net_input_buf, buf_size);
+ pwrite(desire_input_buf, desire_state_size);
+ pwrite(rnn_input_buf, rnn_state_size);
+ pread(output, output_size);
+}
+
diff --git a/selfdrive/modeld/runners/tfmodel.h b/selfdrive/modeld/runners/tfmodel.h
new file mode 100644
index 00000000000000..66e50b4a1dbcb7
--- /dev/null
+++ b/selfdrive/modeld/runners/tfmodel.h
@@ -0,0 +1,35 @@
+#ifndef TFMODEL_H
+#define TFMODEL_H
+
+#include
+#include "runmodel.h"
+
+struct TFState;
+
+class TFModel : public RunModel {
+public:
+ TFModel(const char *path, float *output, size_t output_size, int runtime);
+ ~TFModel();
+ void addRecurrent(float *state, int state_size);
+ void addDesire(float *state, int state_size);
+ void execute(float *net_input_buf, int buf_size);
+private:
+ int proc_pid;
+
+ float *output;
+ size_t output_size;
+
+ float *rnn_input_buf = NULL;
+ int rnn_state_size;
+ float *desire_input_buf = NULL;
+ int desire_state_size;
+
+ // pipe to communicate to keras subprocess
+ void pread(float *buf, int size);
+ void pwrite(float *buf, int size);
+ int pipein[2];
+ int pipeout[2];
+};
+
+#endif
+
diff --git a/selfdrive/registration.py b/selfdrive/registration.py
index 5b01d1446f5a4b..60f372e25cb46a 100644
--- a/selfdrive/registration.py
+++ b/selfdrive/registration.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python3.7
import os
import json
diff --git a/selfdrive/tbp_manager.py b/selfdrive/tbp_manager.py
new file mode 100755
index 00000000000000..687ee5669428f7
--- /dev/null
+++ b/selfdrive/tbp_manager.py
@@ -0,0 +1,613 @@
+#!/usr/bin/env python3.7
+import os
+import time
+import sys
+import fcntl
+import errno
+import signal
+import shutil
+import subprocess
+import time
+from selfdrive.tinklad.tinkla_interface import TinklaClient
+from cereal import tinkla
+from selfdrive.car.tesla.readconfig import CarSettings
+import datetime
+
+from common.basedir import BASEDIR, PARAMS
+from common.android import ANDROID
+sys.path.append(os.path.join(BASEDIR, "pyextra"))
+os.environ['BASEDIR'] = BASEDIR
+
+TOTAL_SCONS_NODES = 1195
+prebuilt = os.path.exists(os.path.join(BASEDIR, 'prebuilt'))
+
+# Create folders needed for msgq
+try:
+ os.mkdir("/dev/shm")
+except FileExistsError:
+ pass
+except PermissionError:
+ print("WARNING: failed to make /dev/shm")
+
+if ANDROID:
+ os.chmod("/dev/shm", 0o777)
+
+def unblock_stdout():
+ # get a non-blocking stdout
+ child_pid, child_pty = os.forkpty()
+ if child_pid != 0: # parent
+
+ # child is in its own process group, manually pass kill signals
+ signal.signal(signal.SIGINT, lambda signum, frame: os.kill(child_pid, signal.SIGINT))
+ signal.signal(signal.SIGTERM, lambda signum, frame: os.kill(child_pid, signal.SIGTERM))
+
+ fcntl.fcntl(sys.stdout, fcntl.F_SETFL,
+ fcntl.fcntl(sys.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
+
+ while True:
+ try:
+ dat = os.read(child_pty, 4096)
+ except OSError as e:
+ if e.errno == errno.EIO:
+ break
+ continue
+
+ if not dat:
+ break
+
+ try:
+ sys.stdout.write(dat.decode('utf8'))
+ except (OSError, IOError, UnicodeDecodeError):
+ pass
+
+ # os.wait() returns a tuple with the pid and a 16 bit value
+ # whose low byte is the signal number and whose high byte is the exit satus
+ exit_status = os.wait()[1] >> 8
+ os._exit(exit_status)
+
+
+if __name__ == "__main__":
+ unblock_stdout()
+ from common.spinner import Spinner
+else:
+ from common.spinner import FakeSpinner as Spinner
+
+import importlib
+import traceback
+from multiprocessing import Process
+
+# Run scons
+spinner = Spinner()
+spinner.update("0")
+
+if not prebuilt:
+ for retry in [True, False]:
+ # run scons
+ env = os.environ.copy()
+ env['SCONS_PROGRESS'] = "1"
+ env['SCONS_CACHE'] = "1"
+
+ nproc = os.cpu_count()
+ j_flag = "" if nproc is None else "-j%d" % (nproc - 1)
+ scons = subprocess.Popen(["scons", j_flag], cwd=BASEDIR, env=env, stderr=subprocess.PIPE)
+
+ # Read progress from stderr and update spinner
+ while scons.poll() is None:
+ try:
+ line = scons.stderr.readline()
+ if line is None:
+ continue
+
+ line = line.rstrip()
+ prefix = b'progress: '
+ if line.startswith(prefix):
+ i = int(line[len(prefix):])
+ if spinner is not None:
+ spinner.update("%d" % (50.0 * (i / TOTAL_SCONS_NODES)))
+ elif len(line):
+ print(line.decode('utf8'))
+ except Exception:
+ pass
+
+ if scons.returncode != 0:
+ if retry:
+ print("scons build failed, cleaning in")
+ for i in range(3,-1,-1):
+ print("....%d" % i)
+ time.sleep(1)
+ subprocess.check_call(["scons", "-c"], cwd=BASEDIR, env=env)
+ shutil.rmtree("/tmp/scons_cache")
+ else:
+ raise RuntimeError("scons build failed")
+ else:
+ break
+
+import cereal
+import cereal.messaging as messaging
+
+from common.params import Params
+import selfdrive.crash as crash
+from selfdrive.swaglog import cloudlog
+from selfdrive.registration import register
+from selfdrive.version import version, dirty
+from selfdrive.loggerd.config import ROOT
+from selfdrive.launcher import launcher
+from common import android
+from common.apk import update_apks, pm_apply_packages, start_offroad
+from common.manager_helpers import print_cpu_usage
+
+ThermalStatus = cereal.log.ThermalData.ThermalStatus
+
+# comment out anything you don't want to run
+managed_processes = {
+ "tinklad": "selfdrive.tinklad.tinklad",
+ "thermald": "selfdrive.thermald.thermald",
+ "uploader": "selfdrive.loggerd.uploader",
+ "deleter": "selfdrive.loggerd.deleter",
+ "controlsd": "selfdrive.controls.controlsd",
+ "plannerd": "selfdrive.controls.plannerd",
+ "radard": "selfdrive.controls.radard",
+ "dmonitoringd": "selfdrive.controls.dmonitoringd",
+ "ubloxd": ("selfdrive/locationd", ["./ubloxd"]),
+ "loggerd": ("selfdrive/loggerd", ["./loggerd"]),
+ "logmessaged": "selfdrive.logmessaged",
+ "locationd": "selfdrive.locationd.locationd",
+ "tombstoned": "selfdrive.tombstoned",
+ "logcatd": ("selfdrive/logcatd", ["./logcatd"]),
+ "proclogd": ("selfdrive/proclogd", ["./proclogd"]),
+ "boardd": ("selfdrive/boardd", ["./boardd"]), # not used directly
+ "pandad": "selfdrive.pandad",
+ "ui": ("selfdrive/ui", ["./ui"]),
+ "calibrationd": "selfdrive.locationd.calibrationd",
+ "paramsd": ("selfdrive/locationd", ["./paramsd"]),
+ "camerad": ("selfdrive/camerad", ["./camerad"]),
+ "sensord": ("selfdrive/sensord", ["./sensord"]),
+ "clocksd": ("selfdrive/clocksd", ["./clocksd"]),
+ "gpsd": ("selfdrive/sensord", ["./gpsd"]),
+ "updated": "selfdrive.updated",
+ "dmonitoringmodeld": ("selfdrive/modeld", ["./dmonitoringmodeld"]),
+ "modeld": ("selfdrive/modeld", ["./modeld"]),
+}
+
+daemon_processes = {
+ "manage_athenad": ("selfdrive.athena.manage_athenad", "AthenadPid"),
+}
+
+running = {}
+def get_running():
+ return running
+
+# due to qualcomm kernel bugs SIGKILLing camerad sometimes causes page table corruption
+unkillable_processes = ['camerad']
+
+# processes to end with SIGINT instead of SIGTERM
+interrupt_processes = []
+
+# processes to end with SIGKILL instead of SIGTERM
+kill_processes = ['sensord', 'paramsd']
+
+# processes to end if thermal conditions exceed Green parameters
+green_temp_processes = ['uploader']
+
+persistent_processes = [
+ 'tinklad',
+ 'thermald',
+ 'logmessaged',
+ 'ui',
+ 'uploader',
+]
+if ANDROID:
+ persistent_processes += [
+ 'logcatd',
+ 'tombstoned',
+ 'updated',
+ ]
+
+car_started_processes = [
+ 'controlsd',
+ 'plannerd',
+ 'loggerd',
+ 'radard',
+ 'dmonitoringd',
+ 'calibrationd',
+ 'paramsd',
+ 'camerad',
+ 'modeld',
+ 'proclogd',
+ 'ubloxd',
+ 'locationd',
+]
+if ANDROID:
+ car_started_processes += [
+ 'sensord',
+ 'clocksd',
+ 'gpsd',
+ 'dmonitoringmodeld',
+ 'deleter',
+ ]
+
+def register_managed_process(name, desc, car_started=False):
+ global managed_processes, car_started_processes, persistent_processes
+ print("registering %s" % name)
+ managed_processes[name] = desc
+ if car_started:
+ car_started_processes.append(name)
+ else:
+ persistent_processes.append(name)
+
+# ****************** process management functions ******************
+def nativelauncher(pargs, cwd):
+ # exec the process
+ os.chdir(cwd)
+
+ # because when extracted from pex zips permissions get lost -_-
+ os.chmod(pargs[0], 0o700)
+
+ os.execvp(pargs[0], pargs)
+
+def start_managed_process(name):
+ if name in running or name not in managed_processes:
+ return
+ proc = managed_processes[name]
+ if isinstance(proc, str):
+ cloudlog.info("starting python %s" % proc)
+ running[name] = Process(name=name, target=launcher, args=(proc,))
+ else:
+ pdir, pargs = proc
+ cwd = os.path.join(BASEDIR, pdir)
+ cloudlog.info("starting process %s" % name)
+ running[name] = Process(name=name, target=nativelauncher, args=(pargs, cwd))
+ running[name].start()
+
+def start_daemon_process(name):
+ params = Params()
+ proc, pid_param = daemon_processes[name]
+ pid = params.get(pid_param, encoding='utf-8')
+
+ if pid is not None:
+ try:
+ os.kill(int(pid), 0)
+ with open(f'/proc/{pid}/cmdline') as f:
+ if proc in f.read():
+ # daemon is running
+ return
+ except (OSError, FileNotFoundError):
+ # process is dead
+ pass
+
+ cloudlog.info("starting daemon %s" % name)
+ proc = subprocess.Popen(['python', '-m', proc],
+ stdin=open('/dev/null', 'r'),
+ stdout=open('/dev/null', 'w'),
+ stderr=open('/dev/null', 'w'),
+ preexec_fn=os.setpgrp)
+
+ params.put(pid_param, str(proc.pid))
+
+def prepare_managed_process(p):
+ proc = managed_processes[p]
+ if isinstance(proc, str):
+ # import this python
+ cloudlog.info("preimporting %s" % proc)
+ importlib.import_module(proc)
+ elif os.path.isfile(os.path.join(BASEDIR, proc[0], "Makefile")):
+ # build this process
+ cloudlog.info("building %s" % (proc,))
+ try:
+ subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
+ except subprocess.CalledProcessError:
+ # make clean if the build failed
+ cloudlog.warning("building %s failed, make clean" % (proc, ))
+ subprocess.check_call(["make", "clean"], cwd=os.path.join(BASEDIR, proc[0]))
+ subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
+
+
+def join_process(process, timeout):
+ # Process().join(timeout) will hang due to a python 3 bug: https://bugs.python.org/issue28382
+ # We have to poll the exitcode instead
+ t = time.time()
+ while time.time() - t < timeout and process.exitcode is None:
+ time.sleep(0.001)
+
+
+def kill_managed_process(name):
+ if name not in running or name not in managed_processes:
+ return
+ cloudlog.info("killing %s" % name)
+
+ if running[name].exitcode is None:
+ if name in interrupt_processes:
+ os.kill(running[name].pid, signal.SIGINT)
+ elif name in kill_processes:
+ os.kill(running[name].pid, signal.SIGKILL)
+ else:
+ running[name].terminate()
+
+ join_process(running[name], 5)
+
+ if running[name].exitcode is None:
+ if name in unkillable_processes:
+ cloudlog.critical("unkillable process %s failed to exit! rebooting in 15 if it doesn't die" % name)
+ join_process(running[name], 15)
+ if running[name].exitcode is None:
+ cloudlog.critical("FORCE REBOOTING PHONE!")
+ os.system("date >> /sdcard/unkillable_reboot")
+ os.system("reboot")
+ raise RuntimeError
+ else:
+ cloudlog.info("killing %s with SIGKILL" % name)
+ os.kill(running[name].pid, signal.SIGKILL)
+ running[name].join()
+
+ cloudlog.info("%s is dead with %d" % (name, running[name].exitcode))
+ del running[name]
+
+
+def cleanup_all_processes(signal, frame):
+ cloudlog.info("caught ctrl-c %s %s" % (signal, frame))
+
+ if ANDROID:
+ pm_apply_packages('disable')
+
+ for name in list(running.keys()):
+ kill_managed_process(name)
+ cloudlog.info("everything is dead")
+
+# ****************** run loop ******************
+
+def manager_init(should_register=True):
+ if should_register:
+ reg_res = register()
+ if reg_res:
+ dongle_id, dongle_secret = reg_res
+ else:
+ raise Exception("server registration failed")
+ else:
+ dongle_id = "c"*16
+
+ # set dongle id
+ cloudlog.info("dongle id is " + dongle_id)
+ os.environ['DONGLE_ID'] = dongle_id
+
+ cloudlog.info("dirty is %d" % dirty)
+ if not dirty:
+ os.environ['CLEAN'] = '1'
+
+ cloudlog.bind_global(dongle_id=dongle_id, version=version, dirty=dirty, is_eon=True)
+ crash.bind_user(id=dongle_id)
+ crash.bind_extra(version=version, dirty=dirty, is_eon=True)
+
+ os.umask(0)
+ try:
+ os.mkdir(ROOT, 0o777)
+ except OSError:
+ pass
+
+ # ensure shared libraries are readable by apks
+ if ANDROID:
+ os.chmod(BASEDIR, 0o755)
+ os.chmod(os.path.join(BASEDIR, "cereal"), 0o755)
+ os.chmod(os.path.join(BASEDIR, "cereal", "libmessaging_shared.so"), 0o755)
+
+def system(cmd):
+ try:
+ cloudlog.info("running %s" % cmd)
+ subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
+ except subprocess.CalledProcessError as e:
+ cloudlog.event("running failed",
+ cmd=e.cmd,
+ output=e.output[-1024:],
+ returncode=e.returncode)
+
+def sendUserInfoToTinkla(params, tinklaClient):
+ carSettings = CarSettings()
+ gitRemote = params.get("GitRemote")
+ gitBranch = params.get("GitBranch")
+ gitHash = params.get("GitCommit")
+ dongleId = params.get("DongleId")
+ userHandle = carSettings.userHandle
+ info = tinkla.Interface.UserInfo.new_message(
+ openPilotId=dongleId,
+ userHandle=userHandle,
+ gitRemote=gitRemote,
+ gitBranch=gitBranch,
+ gitHash=gitHash
+ )
+ tinklaClient.setUserInfo(info)
+
+def manager_thread():
+ # now loop
+ thermal_sock = messaging.sub_sock('thermal')
+
+ if os.getenv("GET_CPU_USAGE"):
+ proc_sock = messaging.sub_sock('procLog', conflate=True)
+
+ cloudlog.info("manager start")
+ cloudlog.info({"environ": os.environ})
+
+ # save boot log
+ subprocess.call(["./loggerd", "--bootlog"], cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))
+
+ params = Params()
+
+ # start daemon processes
+ for p in daemon_processes:
+ start_daemon_process(p)
+
+ # start persistent processes
+ for p in persistent_processes:
+ start_managed_process(p)
+
+ # start offroad
+ if ANDROID:
+ pm_apply_packages('enable')
+ start_offroad()
+
+ if os.getenv("NOBOARD") is None:
+ start_managed_process("pandad")
+
+ if os.getenv("BLOCK") is not None:
+ for k in os.getenv("BLOCK").split(","):
+ del managed_processes[k]
+
+ logger_dead = False
+
+ # Tinkla interface
+ last_tinklad_send_attempt_time = 0
+ tinklaClient = TinklaClient()
+ sendUserInfoToTinkla(params=params, tinklaClient=tinklaClient)
+ start_t = time.time()
+ first_proc = None
+
+ while 1:
+ msg = messaging.recv_sock(thermal_sock, wait=True)
+
+ # heavyweight batch processes are gated on favorable thermal conditions
+ if msg.thermal.thermalStatus >= ThermalStatus.yellow:
+ for p in green_temp_processes:
+ if p in persistent_processes:
+ kill_managed_process(p)
+ else:
+ for p in green_temp_processes:
+ if p in persistent_processes:
+ start_managed_process(p)
+
+ # Attempt to send pending messages if there's any that queued while offline
+ # Seems this loop runs every second or so, throttle to once every 30s
+ now = time.time()
+ if now - last_tinklad_send_attempt_time >= 30:
+ tinklaClient.attemptToSendPendingMessages()
+ last_tinklad_send_attempt_time = now
+
+ if msg.thermal.freeSpace < 0.05:
+ logger_dead = True
+
+ if msg.thermal.started:
+ for p in car_started_processes:
+ if p == "loggerd" and logger_dead:
+ kill_managed_process(p)
+ else:
+ start_managed_process(p)
+ else:
+ logger_dead = False
+ for p in reversed(car_started_processes):
+ kill_managed_process(p)
+
+ # check the status of all processes, did any of them die?
+ running_list = ["%s%s\u001b[0m" % ("\u001b[32m" if running[p].is_alive() else "\u001b[31m", p) for p in running]
+ #cloudlog.debug(' '.join(running_list))
+
+ # Exit main loop when uninstall is needed
+ if params.get("DoUninstall", encoding='utf8') == "1":
+ break
+
+ if os.getenv("GET_CPU_USAGE"):
+ dt = time.time() - start_t
+
+ # Get first sample
+ if dt > 30 and first_proc is None:
+ first_proc = messaging.recv_sock(proc_sock)
+
+ # Get last sample and exit
+ if dt > 90:
+ first_proc = first_proc
+ last_proc = messaging.recv_sock(proc_sock, wait=True)
+
+ cleanup_all_processes(None, None)
+ sys.exit(print_cpu_usage(first_proc, last_proc))
+
+def manager_prepare(spinner=None):
+
+ carSettings = CarSettings()
+ # build all processes
+ os.chdir(os.path.dirname(os.path.abspath(__file__)))
+
+ # Spinner has to start from 70 here
+ total = 100.0 if prebuilt else 50.0
+
+ for i, p in enumerate(managed_processes):
+ if spinner is not None:
+ spinText = carSettings.spinnerText
+ spinner.update(spinText % ((100.0 - total) + total * (i + 1) / len(managed_processes),))
+ prepare_managed_process(p)
+
+def uninstall():
+ cloudlog.warning("uninstalling")
+ with open('/cache/recovery/command', 'w') as f:
+ f.write('--wipe_data\n')
+ # IPowerManager.reboot(confirm=false, reason="recovery", wait=true)
+ android.reboot(reason="recovery")
+
+def main():
+ os.environ['PARAMS_PATH'] = PARAMS
+
+ # the flippening!
+ os.system('LD_LIBRARY_PATH="" content insert --uri content://settings/system --bind name:s:user_rotation --bind value:i:1')
+
+ # disable bluetooth
+ os.system('service call bluetooth_manager 8')
+
+ params = Params()
+ params.manager_start()
+
+ default_params = [
+ ("CommunityFeaturesToggle", "0"),
+ ("CompletedTrainingVersion", "0"),
+ ("IsMetric", "0"),
+ ("RecordFront", "0"),
+ ("HasAcceptedTerms", "0"),
+ ("HasCompletedSetup", "0"),
+ ("IsUploadRawEnabled", "1"),
+ ("IsLdwEnabled", "1"),
+ ("IsGeofenceEnabled", "-1"),
+ ("SpeedLimitOffset", "0"),
+ ("LongitudinalControl", "0"),
+ ("LimitSetSpeed", "0"),
+ ("LimitSetSpeedNeural", "0"),
+ ("LastUpdateTime", datetime.datetime.now().isoformat().encode('utf8')),
+ ("OpenpilotEnabledToggle", "1"),
+ ("LaneChangeEnabled", "1"),
+ ]
+
+ # set unset params
+ for k, v in default_params:
+ if params.get(k) is None:
+ params.put(k, v)
+
+ # is this chffrplus?
+ if os.getenv("PASSIVE") is not None:
+ params.put("Passive", str(int(os.getenv("PASSIVE"))))
+
+ if params.get("Passive") is None:
+ raise Exception("Passive must be set to continue")
+
+ if ANDROID:
+ update_apks()
+ manager_init()
+ manager_prepare(spinner)
+ spinner.close()
+
+ if os.getenv("PREPAREONLY") is not None:
+ return
+
+ # SystemExit on sigterm
+ signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1))
+
+ try:
+ manager_thread()
+ except SystemExit:
+ raise
+ except Exception:
+ traceback.print_exc()
+ crash.capture_exception()
+ print ("EXIT ON EXCEPTION")
+ finally:
+ cleanup_all_processes(None, None)
+
+ if params.get("DoUninstall", encoding='utf8') == "1":
+ uninstall()
+
+if __name__ == "__main__":
+ main()
+ # manual exit because we are forked
+ sys.exit(0)
diff --git a/selfdrive/ui/SConscript b/selfdrive/ui/SConscript
index e0c353b43ef3af..8b7b4a8290471e 100644
--- a/selfdrive/ui/SConscript
+++ b/selfdrive/ui/SConscript
@@ -1,8 +1,10 @@
-Import('env', 'arch', 'common', 'messaging', 'gpucommon', 'visionipc', 'cereal')
+Import('env', 'arch','is_tbp', 'common', 'messaging', 'gpucommon', 'visionipc', 'cereal')
src = ['ui.cc', 'paint.cc', 'sidebar.cc', '#phonelibs/nanovg/nanovg.c']
libs = [common, 'zmq', 'czmq', 'capnp', 'capnp_c', 'm', cereal, messaging, gpucommon, visionipc]
+if is_tbp:
+ arch = "aarch64_TBP"
if arch == "aarch64":
src += ['sound.cc', 'slplay.c']
libs += ['EGL', 'GLESv3', 'gnustl_shared', 'log', 'utils', 'gui', 'hardware', 'ui', 'CB', 'gsl', 'adreno_utils', 'OpenSLES', 'cutils', 'uuid', 'OpenCL']
diff --git a/start_tbp.sh b/start_tbp.sh
new file mode 100755
index 00000000000000..c30c981f2fbb37
--- /dev/null
+++ b/start_tbp.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+cd /data/openpilot/selfdrive
+PASSIVE=0 NOSENSOR=1 ./manager.py
diff --git a/tbp_strt.sh b/tbp_strt.sh
new file mode 100755
index 00000000000000..e4ce9c86be76c4
--- /dev/null
+++ b/tbp_strt.sh
@@ -0,0 +1,112 @@
+#!/bin/bash
+
+export OMP_NUM_THREADS=1
+export MKL_NUM_THREADS=1
+export NUMEXPR_NUM_THREADS=1
+export OPENBLAS_NUM_THREADS=1
+export VECLIB_MAXIMUM_THREADS=1
+
+if [ -z "$BASEDIR" ]; then
+ BASEDIR="/data/openpilot"
+fi
+
+if [ -z "$PASSIVE" ]; then
+ export PASSIVE="1"
+fi
+
+. /data/openpilot/selfdrive/car/tesla/readconfig.sh
+STAGING_ROOT="/data/safe_staging"
+
+
+function launch {
+ # Wifi scan
+ wpa_cli IFNAME=wlan0 SCAN
+
+ #BB here was to prevent the autoupdate; need to find another way
+ # # apply update
+ # if [ $do_auto_update == "True" ]; then
+ # if [ "$(git rev-parse HEAD)" != "$(git rev-parse @{u})" ]; then
+ # git reset --hard @{u} &&
+ # git clean -xdf &&
+
+ # # Touch all files on release2 after checkout to prevent rebuild
+ # BRANCH=$(git rev-parse --abbrev-ref HEAD)
+ # if [[ "$BRANCH" == "release2" ]]; then
+ # touch **
+ # fi
+
+ # Check to see if there's a valid overlay-based update available. Conditions
+ # are as follows:
+ #
+ # 1. The BASEDIR init file has to exist, with a newer modtime than anything in
+ # the BASEDIR Git repo. This checks for local development work or the user
+ # switching branches/forks, which should not be overwritten.
+ # 2. The FINALIZED consistent file has to exist, indicating there's an update
+ # that completed successfully and synced to disk.
+
+
+
+ if [ $do_auto_update == "True" ] && [ -f "${BASEDIR}/.overlay_init" ]; then
+ find ${BASEDIR}/.git -newer ${BASEDIR}/.overlay_init | grep -q '.' 2> /dev/null
+ if [ $? -eq 0 ]; then
+ echo "${BASEDIR} has been modified, skipping overlay update installation"
+ else
+ if [ -f "${STAGING_ROOT}/finalized/.overlay_consistent" ]; then
+ if [ ! -d /data/safe_staging/old_openpilot ]; then
+ echo "Valid overlay update found, installing"
+ LAUNCHER_LOCATION="${BASH_SOURCE[0]}"
+
+ mv $BASEDIR /data/safe_staging/old_openpilot
+ mv "${STAGING_ROOT}/finalized" $BASEDIR
+
+ # The mv changed our working directory to /data/safe_staging/old_openpilot
+ cd "${BASEDIR}"
+
+ echo "Restarting launch script ${LAUNCHER_LOCATION}"
+ exec "${LAUNCHER_LOCATION}"
+ else
+ echo "openpilot backup found, not updating"
+ # TODO: restore backup? This means the updater didn't start after swapping
+ fi
+ fi
+ fi
+ fi
+
+ # no cpu rationing for now
+ echo 0-3 > /dev/cpuset/background/cpus
+ echo 0-3 > /dev/cpuset/system-background/cpus
+ echo 0-3 > /dev/cpuset/foreground/boost/cpus
+ echo 0-3 > /dev/cpuset/foreground/cpus
+ echo 0-3 > /dev/cpuset/android/cpus
+
+ DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
+
+ # Remove old NEOS update file
+ # TODO: move this code to the updater
+ if [ -d /data/neoupdate ]; then
+ rm -rf /data/neoupdate
+ fi
+
+ # Check for NEOS update
+ if [ $(< /VERSION) != "14" ]; then
+ if [ -f "$DIR/scripts/continue.sh" ]; then
+ cp "$DIR/scripts/continue.sh" "/data/data/com.termux/files/continue.sh"
+ fi
+
+ "$DIR/installer/updater/updater" "file://$DIR/installer/updater/update.json"
+ fi
+
+
+ # handle pythonpath
+ ln -sfn $(pwd) /data/pythonpath
+ export PYTHONPATH="$PWD"
+
+ # start manager
+ cd selfdrive
+ ./tbp_manager.py
+
+ # if broken, keep on screen error
+ while true; do sleep 1; done
+}
+
+launch