From 89db482189e92f69c15eaf879a2a550c937fd35f Mon Sep 17 00:00:00 2001 From: skhshibo Date: Mon, 21 Feb 2022 12:32:50 +0200 Subject: [PATCH] - add build_raw_data to the debug_protocol API - add the new API to C# and Python wrappers - add python unit-test under unit_tests/debug_protocol/ - fix old tests/scenarios to use this new API - fix some formatting in python files to meet PEP8 coding style --- include/librealsense2/h/rs_device.h | 17 + include/librealsense2/hpp/rs_device.hpp | 25 ++ src/core/debug.h | 7 + src/ds5/ds5-device.cpp | 10 + src/ds5/ds5-device.h | 7 + src/hw-monitor.cpp | 11 + src/hw-monitor.h | 3 + src/ivcam/sr300.h | 11 + src/l500/l500-device.cpp | 9 + src/l500/l500-device.h | 6 + src/realsense.def | 1 + src/rs.cpp | 17 +- .../debug_protocol/test-build-raw-data.py | 53 +++ .../live/hw-errors/l500/l500-error-common.h | 8 +- unit-tests/live/options/test-drops-on-set.py | 5 +- .../test-l500-json-load.py | 42 ++- unit-tests/py/rspy/file.py | 79 ++-- unit-tests/py/rspy/libci.py | 255 ++++++------- unit-tests/py/rspy/repo.py | 46 +-- unit-tests/run-unit-tests.py | 353 +++++++++--------- unit-tests/syncer/sw.py | 118 +++--- unit-tests/syncer/test-ts-desync.py | 37 +- unit-tests/syncer/test-ts-diff-fps.py | 114 +++--- unit-tests/syncer/test-ts-eof.py | 97 ++--- unit-tests/syncer/test-ts-same-fps.py | 57 ++- unit-tests/test-fw-update.py | 187 +++++----- unit-tests/unit-test-config.py | 284 +++++++------- unit-tests/unit-tests-internal.cpp | 10 +- unit-tests/unit-tests-live.cpp | 10 +- .../Intel.RealSense/Devices/DebugDevice.cs | 33 ++ .../csharp/Intel.RealSense/NativeMethods.cs | 7 +- wrappers/python/pyrs_device.cpp | 4 +- 32 files changed, 1091 insertions(+), 832 deletions(-) create mode 100644 unit-tests/debug_protocol/test-build-raw-data.py diff --git a/include/librealsense2/h/rs_device.h b/include/librealsense2/h/rs_device.h index b1912789a1..ff29c3e277 100644 --- a/include/librealsense2/h/rs_device.h +++ b/include/librealsense2/h/rs_device.h @@ -80,6 +80,23 @@ int rs2_supports_device_info(const rs2_device* device, rs2_camera_info info, rs2 */ void rs2_hardware_reset(const rs2_device * device, rs2_error ** error); +/** +* Build raw data command from opcode, parameters and data. +* The result can be used as raw_data_to_send parameter in send_and_receive_raw_data +* \param[in] device RealSense device to send data to +* \param[in] opcode Commad opcode +* \param[in] param1 First input parameter +* \param[in] param2 Second parameter +* \param[in] param3 Third parameter +* \param[in] param4 Fourth parameter +* \param[in] data Input Data (up to 1024 bytes) +* \param[in] size_of_data Size of input data in bytes +* \param[out] error If non-null, receives any error that occurs during this call, otherwise, errors are ignored +* \return rs2_raw_data_buffer which includes raw command +*/ +const rs2_raw_data_buffer* rs2_build_raw_data(rs2_device* device, unsigned opcode, unsigned param1, unsigned param2, + unsigned param3, unsigned param4, void* data, unsigned size_of_data, rs2_error** error); + /** * Send raw data to device * \param[in] device RealSense device to send data to diff --git a/include/librealsense2/hpp/rs_device.hpp b/include/librealsense2/hpp/rs_device.hpp index c6dbd641d3..5e781eb814 100644 --- a/include/librealsense2/hpp/rs_device.hpp +++ b/include/librealsense2/hpp/rs_device.hpp @@ -830,6 +830,31 @@ namespace rs2 error::handle(e); } + std::vector build_raw_data(const uint32_t opcode = 0, + const uint32_t param1 = 0, + const uint32_t param2 = 0, + const uint32_t param3 = 0, + const uint32_t param4 = 0, + const std::vector& data = std::vector()) const + { + std::vector results; + + rs2_error* e = nullptr; + auto buffer = rs2_build_raw_data(_dev.get(), opcode, param1, param2, param3, param4, + (void*)data.data(), (uint32_t)data.size(), &e); + std::shared_ptr list(buffer, rs2_delete_raw_data); + error::handle(e); + + auto size = rs2_get_raw_data_size(list.get(), &e); + error::handle(e); + + auto start = rs2_get_raw_data(list.get(), &e); + + results.insert(results.begin(), start, start + size); + + return results; + } + std::vector send_and_receive_raw_data(const std::vector& input) const { std::vector results; diff --git a/src/core/debug.h b/src/core/debug.h index f3060dac46..cc433dc149 100644 --- a/src/core/debug.h +++ b/src/core/debug.h @@ -12,6 +12,13 @@ namespace librealsense { public: virtual std::vector send_receive_raw_data(const std::vector& input) = 0; + virtual std::vector build_raw_data(const uint32_t opcode, + const uint32_t param1 = 0, + const uint32_t param2 = 0, + const uint32_t param3 = 0, + const uint32_t param4 = 0, + const std::vector& data = std::vector()) = 0; + }; MAP_EXTENSION(RS2_EXTENSION_DEBUG, librealsense::debug_interface); diff --git a/src/ds5/ds5-device.cpp b/src/ds5/ds5-device.cpp index fbcce72477..8f6dafdf14 100644 --- a/src/ds5/ds5-device.cpp +++ b/src/ds5/ds5-device.cpp @@ -122,6 +122,16 @@ namespace librealsense { return _hw_monitor->send(input); } + + std::vector ds5_device::build_raw_data(const uint32_t opcode, + const uint32_t param1, + const uint32_t param2, + const uint32_t param3, + const uint32_t param4, + const std::vector& data) + { + return _hw_monitor->build_raw_data(opcode, param1, param2, param3, param4, data); + } void ds5_device::hardware_reset() { diff --git a/src/ds5/ds5-device.h b/src/ds5/ds5-device.h index 57b341f1c7..873c077d31 100644 --- a/src/ds5/ds5-device.h +++ b/src/ds5/ds5-device.h @@ -55,6 +55,13 @@ namespace librealsense std::vector send_receive_raw_data(const std::vector& input) override; + std::vector build_raw_data(const uint32_t opcode, + const uint32_t param1 = 0, + const uint32_t param2 = 0, + const uint32_t param3 = 0, + const uint32_t param4 = 0, + const std::vector& data = std::vector()) override; + void hardware_reset() override; void create_snapshot(std::shared_ptr& snapshot) const override; diff --git a/src/hw-monitor.cpp b/src/hw-monitor.cpp index 3478160f46..25ec8dc8ef 100644 --- a/src/hw-monitor.cpp +++ b/src/hw-monitor.cpp @@ -174,6 +174,17 @@ namespace librealsense newCommand.receivedCommandData + newCommand.receivedCommandDataLength); } + std::vector hw_monitor::build_raw_data(const uint32_t opcode, const uint32_t param1, const uint32_t param2, + const uint32_t param3, const uint32_t param4, const std::vector& data) const + { + int length; + std::vector result; + result.resize(IVCAM_MONITOR_MAX_BUFFER_SIZE); + fill_usb_buffer(opcode, param1, param2, param3, param4, data.data(), (uint32_t)data.size(), result.data(), length); + result.resize(length); + return result; + } + std::string hwmon_error_string( command const & cmd, hwmon_response e ) { auto str = hwmon_error2str( e ); diff --git a/src/hw-monitor.h b/src/hw-monitor.h index eb6493c061..4fd30328b1 100644 --- a/src/hw-monitor.h +++ b/src/hw-monitor.h @@ -332,6 +332,9 @@ namespace librealsense std::vector< uint8_t > send( std::vector< uint8_t > const & data ) const; std::vector send( command cmd, hwmon_response * = nullptr, bool locked_transfer = false ) const; + std::vector build_raw_data(const uint32_t opcode, const uint32_t param1 = 0, const uint32_t param2 = 0, + const uint32_t param3 = 0, const uint32_t param4 = 0, const std::vector& data = std::vector()) const; + void get_gvd(size_t sz, unsigned char* gvd, uint8_t gvd_cmd) const; static std::string get_firmware_version_string(const std::vector& buff, size_t index, size_t length = 4); static std::string get_module_serial_string(const std::vector& buff, size_t index, size_t length = 6); diff --git a/src/ivcam/sr300.h b/src/ivcam/sr300.h index 0ac054e015..214ac9b26c 100644 --- a/src/ivcam/sr300.h +++ b/src/ivcam/sr300.h @@ -341,6 +341,17 @@ namespace librealsense return _hw_monitor->send(input); } + + std::vector build_raw_data(const uint32_t opcode, + const uint32_t param1 = 0, + const uint32_t param2 = 0, + const uint32_t param3 = 0, + const uint32_t param4 = 0, + const std::vector& data = std::vector()) + { + return _hw_monitor->build_raw_data(opcode, param1, param2, param3, param4, data); + } + void hardware_reset() override { force_hardware_reset(); diff --git a/src/l500/l500-device.cpp b/src/l500/l500-device.cpp index a3df320a6e..b51c087fc1 100644 --- a/src/l500/l500-device.cpp +++ b/src/l500/l500-device.cpp @@ -587,6 +587,15 @@ namespace librealsense return _hw_monitor->send(input); } + std::vector l500_device::build_raw_data(const uint32_t opcode, + const uint32_t param1, + const uint32_t param2, + const uint32_t param3, + const uint32_t param4, + const std::vector& data) + { + return _hw_monitor->build_raw_data(opcode, param1, param2, param3, param4, data); + } ivcam2::extended_temperatures l500_device::get_temperatures() const { diff --git a/src/l500/l500-device.h b/src/l500/l500-device.h index 0be0f60a09..c57e913895 100644 --- a/src/l500/l500-device.h +++ b/src/l500/l500-device.h @@ -48,6 +48,12 @@ namespace librealsense } std::vector< uint8_t > send_receive_raw_data(const std::vector< uint8_t > & input) override; + std::vector l500_device::build_raw_data(const uint32_t opcode, + const uint32_t param1 = 0, + const uint32_t param2 = 0, + const uint32_t param3 = 0, + const uint32_t param4 = 0, + const std::vector& data = std::vector()); void hardware_reset() override { diff --git a/src/realsense.def b/src/realsense.def index 1874bf376a..84edbd2bf5 100644 --- a/src/realsense.def +++ b/src/realsense.def @@ -101,6 +101,7 @@ EXPORTS rs2_set_region_of_interest rs2_get_region_of_interest + rs2_build_raw_data rs2_send_and_receive_raw_data rs2_get_raw_data_size rs2_delete_raw_data diff --git a/src/rs.cpp b/src/rs.cpp index b343018653..69ae146b86 100644 --- a/src/rs.cpp +++ b/src/rs.cpp @@ -557,6 +557,21 @@ rs2_stream_profile* rs2_clone_video_stream_profile(const rs2_stream_profile* mod } HANDLE_EXCEPTIONS_AND_RETURN(nullptr, mode, stream, index, format, width, height, intr) +const rs2_raw_data_buffer* rs2_build_raw_data(rs2_device* device, unsigned opcode, unsigned param1, unsigned param2, + unsigned param3, unsigned param4, void* data, unsigned size_of_data, rs2_error** error) BEGIN_API_CALL +{ + VALIDATE_NOT_NULL(device); + + auto debug_interface = VALIDATE_INTERFACE(device->device, librealsense::debug_interface); + + auto raw_data_buffer = static_cast(data); + std::vector buffer_to_send(raw_data_buffer, raw_data_buffer + size_of_data); + auto ret_data = debug_interface->build_raw_data(opcode, param1, param2, param3, param4, buffer_to_send); + return new rs2_raw_data_buffer{ ret_data }; + +} +HANDLE_EXCEPTIONS_AND_RETURN(nullptr, device) + const rs2_raw_data_buffer* rs2_send_and_receive_raw_data(rs2_device* device, void* raw_data_to_send, unsigned size_of_raw_data_to_send, rs2_error** error) BEGIN_API_CALL { VALIDATE_NOT_NULL(device); @@ -1778,7 +1793,7 @@ void rs2_synthetic_frame_ready(rs2_source* source, rs2_frame* frame, rs2_error** HANDLE_EXCEPTIONS_AND_RETURN(, source, frame) rs2_pipeline* rs2_create_pipeline(rs2_context* ctx, rs2_error ** error) BEGIN_API_CALL -{ + { VALIDATE_NOT_NULL(ctx); auto pipe = std::make_shared(ctx->ctx); diff --git a/unit-tests/debug_protocol/test-build-raw-data.py b/unit-tests/debug_protocol/test-build-raw-data.py new file mode 100644 index 0000000000..e8491cf5bd --- /dev/null +++ b/unit-tests/debug_protocol/test-build-raw-data.py @@ -0,0 +1,53 @@ +import pyrealsense2 as rs +from rspy import devices, log, test, file, repo + + +def convert_bytes_to_decimal(command): + command_input = [] # array of uint_8t + + # Parsing the command to array of unsigned integers(size should be < 8bits) + # threw out spaces + command = command.lower() + command = command.split() + + for byte in command: + command_input.append(int('0x' + byte, 0)) + + return command_input + + +def send_hardware_monitor_command(device, command): + raw_result = rs.debug_protocol(device).send_and_receive_raw_data(command) + return raw_result[4:] + + +test.start("Init") + +try: + ctx = rs.context() + dev = ctx.query_devices()[0] + + print("======================= old scenario ==========================\n") + gvd_command = "14 00 ab cd 10 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00" + converted_cmd = convert_bytes_to_decimal(gvd_command) + old_result = send_hardware_monitor_command(dev, converted_cmd) + print("old_result", old_result) + + print("\n") + + print("======================= NEW USAGE ==========================") + gvd_opcode = 0x10 + new_command = rs.debug_protocol(dev).build_raw_data(gvd_opcode) + new_result = send_hardware_monitor_command(dev, new_command) + print("new_result", new_result) + + print("\n") + + equal = test.check_equal_lists(old_result, new_result) + if not equal: + log.w("expected results are not equal.") +except: + test.unexpected_exception() + +test.finish() +test.print_results_and_exit() diff --git a/unit-tests/live/hw-errors/l500/l500-error-common.h b/unit-tests/live/hw-errors/l500/l500-error-common.h index bdd0ab3384..ff29e802f2 100644 --- a/unit-tests/live/hw-errors/l500/l500-error-common.h +++ b/unit-tests/live/hw-errors/l500/l500-error-common.h @@ -35,17 +35,11 @@ std::map< uint8_t, std::pair< std::string, rs2_log_severity > > build_log_errors void trigger_error_or_exit( const rs2::device & dev, uint8_t num ) { - std::vector< uint8_t > raw_data( 24, 0 ); - raw_data[0] = 0x14; - raw_data[2] = 0xab; - raw_data[3] = 0xcd; - raw_data[4] = l500_trigger_error_opcode; - raw_data[12] = num; - if( auto debug = dev.as< debug_protocol >() ) { try { + auto raw_data = debug.build_raw_data(l500_trigger_error_opcode, 0, num); debug.send_and_receive_raw_data( raw_data ); } catch(std::exception const& e) diff --git a/unit-tests/live/options/test-drops-on-set.py b/unit-tests/live/options/test-drops-on-set.py index 1df693eea7..7902f01262 100644 --- a/unit-tests/live/options/test-drops-on-set.py +++ b/unit-tests/live/options/test-drops-on-set.py @@ -5,8 +5,7 @@ import platform import pyrealsense2 as rs -from rspy import test -from rspy import log +from rspy import test, log import time dev = test.find_first_device_or_exit() @@ -29,6 +28,7 @@ def get_allowed_drops(): # Our KPI is to prevent sequential frame drops, therefore single frame drop is allowed. return 1 + def set_new_value(sensor, option, value): global after_set_option after_set_option = True @@ -101,6 +101,7 @@ def check_color_frame_drops(frame): if product_line == "D400": options_to_ignore = [rs.option.visual_preset, rs.option.inter_cam_sync_mode] + def test_option_changes(sensor): global options_to_ignore options = sensor.get_supported_options() diff --git a/unit-tests/live/serializable-device/test-l500-json-load.py b/unit-tests/live/serializable-device/test-l500-json-load.py index 0a8a591133..e4183983ba 100644 --- a/unit-tests/live/serializable-device/test-l500-json-load.py +++ b/unit-tests/live/serializable-device/test-l500-json-load.py @@ -1,7 +1,7 @@ # License: Apache 2.0. See LICENSE file in root directory. # Copyright(c) 2020 Intel Corporation. All Rights Reserved. -#test:device L500* +# test:device L500* import pyrealsense2 as rs from rspy import test, log @@ -14,7 +14,8 @@ visual_preset_number = depth_sensor.get_option(rs.option.visual_preset) visual_preset_name = rs.l500_visual_preset(int(visual_preset_number)) -def json_to_dict( json ): + +def json_to_dict(json): """ :param json: a string representing a json file :return: a dictionary with all settings @@ -22,36 +23,39 @@ def json_to_dict( json ): translation_table = dict.fromkeys(map(ord, '",\''), None) json_dict = {} for line in json.splitlines(): - if ':' not in line: # ignoring lines that are not for settings such as empty lines + if ':' not in line: # ignoring lines that are not for settings such as empty lines continue setting, value = line.split(':') setting = setting.strip().translate(translation_table) value = value.strip().translate(translation_table) - json_dict[ setting ] = value + json_dict[setting] = value return json_dict -def log_settings_differences( data ): + +def log_settings_differences(data): global depth_sensor, sd depth_sensor.set_option(rs.option.visual_preset, int(rs.l500_visual_preset.low_ambient_light)) - actual_data = str( sd.serialize_json() ) - data_dict = json_to_dict( data ) - actual_data_dict = json_to_dict( actual_data ) + actual_data = str(sd.serialize_json()) + data_dict = json_to_dict(data) + actual_data_dict = json_to_dict(actual_data) log.debug_indent() try: # logging the differences in the settings between the expected and the actual values for key in actual_data_dict.keys(): if key not in data_dict: - log.d( "New setting added to json:", key) + log.d("New setting added to json:", key) elif "Visual Preset" in key or "Temperature" in key or "temperature" in key: - # the line regarding the visual preset will always be different because we load 1 from data but set it to - # 3 for low ambient. Also all lines regarding temperatures depend on the camera and don't affect the preset + # the line regarding the visual preset will always be different because + # we load 1 from data but set it to 3 for low ambient. Also, all lines regarding + # temperatures depend on the camera and don't affect the preset continue - elif data_dict[ key ] != actual_data_dict[ key ]: - log.d( key, "was expected to have value of", data_dict[ key ], - "but actually had value of", actual_data_dict[ key ]) + elif data_dict[key] != actual_data_dict[key]: + log.d(key, "was expected to have value of", data_dict[key], + "but actually had value of", actual_data_dict[key]) finally: log.debug_unindent() + ############################################################################################# # This test checks backward compatibility to old json files that saved with default preset # The default preset is deprecated but json files that saved with default preset @@ -99,15 +103,15 @@ def log_settings_differences( data ): test.start("Trying to load settings with default preset from json") try: - sd.load_json( low_ambient_data_with_default_preset ) + sd.load_json(low_ambient_data_with_default_preset) visual_preset_number = depth_sensor.get_option(rs.option.visual_preset) visual_preset_name = rs.l500_visual_preset(int(visual_preset_number)) # if this check fails it is most likely because FW changed the default settings equal = test.check_equal(visual_preset_name, rs.l500_visual_preset.low_ambient_light) if not equal: - log.w( "It is possible that FW changed the default settings of the camera." ) - log_settings_differences( low_ambient_data_with_default_preset ) + log.w("It is possible that FW changed the default settings of the camera.") + log_settings_differences(low_ambient_data_with_default_preset) except: test.unexpected_exception() test.finish() @@ -153,7 +157,7 @@ def log_settings_differences( data ): test.start("Trying to load wrong settings, should get custom preset") try: - sd.load_json( wrong_data_with_default_preset ) + sd.load_json(wrong_data_with_default_preset) visual_preset_number = depth_sensor.get_option(rs.option.visual_preset) visual_preset_name = rs.l500_visual_preset(int(visual_preset_number)) @@ -204,7 +208,7 @@ def log_settings_differences( data ): test.start("Trying to load wrong settings with specified preset") try: - sd.load_json( wrong_data_with_low_ambient_preset ) + sd.load_json(wrong_data_with_low_ambient_preset) visual_preset_number = depth_sensor.get_option(rs.option.visual_preset) visual_preset_name = rs.l500_visual_preset(int(visual_preset_number)) diff --git a/unit-tests/py/rspy/file.py b/unit-tests/py/rspy/file.py index 96b2fb42a4..f0d2bb35d9 100644 --- a/unit-tests/py/rspy/file.py +++ b/unit-tests/py/rspy/file.py @@ -1,30 +1,31 @@ # License: Apache 2.0. See LICENSE file in root directory. # Copyright(c) 2021 Intel Corporation. All Rights Reserved. -import os, re, platform, subprocess, sys - +import os +import re +import platform from rspy import log # get os and directories for future use # NOTE: WSL will read as 'Linux' but the build is Windows-based! system = platform.system() -if system == 'Linux' and "microsoft" not in platform.uname()[3].lower(): +if system == 'Linux' and "microsoft" not in platform.uname()[3].lower(): linux = True else: linux = False -def inside_dir( root ): +def inside_dir(root): """ Yield all files found in root, using relative names ('root/a' would be yielded as 'a') """ - for (path,subdirs,leafs) in os.walk( root ): + for (path, subdirs, leafs) in os.walk(root): for leaf in leafs: # We have to stick to Unix conventions because CMake on Windows is fubar... - yield os.path.relpath( path + '/' + leaf, root ).replace( '\\', '/' ) + yield os.path.relpath(path + '/' + leaf, root).replace('\\', '/') -def is_inside( file, directory ): +def is_inside(file, directory): """ :param file: The file/directory we're checking :param directory: The parent directory @@ -33,26 +34,27 @@ def is_inside( file, directory ): NOTE: A directory is considered inside itself! is_inside( dir, dir ) is True """ - directory = os.path.join( os.path.realpath( directory ), '' ) - file = os.path.realpath( file ) + directory = os.path.join(os.path.realpath(directory), '') + file = os.path.realpath(file) # Return True if the common prefix of both is equal to directory # E.g. /a/b/c/d.rst and directory is /a/b, the common prefix is /a/b - common = os.path.commonprefix( [file, directory] ) - return common == directory or os.path.join( common, '' ) == directory + common = os.path.commonprefix([file, directory]) + return common == directory or os.path.join(common, '') == directory -def find( dir, mask ): +def find(dir, mask): """ Yield all files in given directory (including sub-directories) that fit the given mask :param dir: directory in which to search :param mask: mask to compare file names to """ - pattern = re.compile( mask ) - for leaf in inside_dir( dir ): - if pattern.search( leaf ): + pattern = re.compile(mask) + for leaf in inside_dir(dir): + if pattern.search(leaf): yield leaf + def is_executable(path_to_file): """ :param path_to_file: path to a file @@ -64,13 +66,15 @@ def is_executable(path_to_file): else: return path_to_file.endswith('.exe') -def remove_newlines (lines): + +def remove_newlines(lines): for line in lines: if line[-1] == '\n': - line = line[:-1] # excluding the endline + line = line[:-1] # excluding the endline yield line -def _grep( pattern, lines, context ): + +def _grep(pattern, lines, context): """ helper function for grep """ @@ -78,10 +82,10 @@ def _grep( pattern, lines, context ): matches = 0 for line in lines: index = index + 1 - match = pattern.search( line ) + match = pattern.search(line) if match: context['index'] = index - context['line'] = line + context['line'] = line context['match'] = match yield context matches += 1 @@ -90,22 +94,24 @@ def _grep( pattern, lines, context ): del context['line'] del context['match'] -def grep( expr, *args ): - pattern = re.compile( expr ) + +def grep(expr, *args): + pattern = re.compile(expr) context = dict() for filename in args: context['filename'] = filename - with open( filename, errors = 'ignore' ) as file: - for line in _grep( pattern, remove_newlines( file ), context ): + with open(filename, errors='ignore') as file: + for line in _grep(pattern, remove_newlines(file), context): yield line -def cat( filename ): - with open( filename, errors = 'ignore' ) as file: - for line in remove_newlines( file ): - log.out( line ) +def cat(filename): + with open(filename, errors='ignore') as file: + for line in remove_newlines(file): + log.out(line) -def split_comments( filename, comment_delim_regex = '#' ): + +def split_comments(filename, comment_delim_regex='#'): """ Yields all lines in a file, but with comments separated: ' line' yields (' line', None ) @@ -113,13 +119,10 @@ def split_comments( filename, comment_delim_regex = '#' ): '# comment line ' yields ('', 'comment line') """ context = dict() - pattern = re.compile( r'^(.*?)(?:\s*' + comment_delim_regex + r'\s*(.*?)\s*)?$' ) # to end-of-line - with open( filename, errors = 'ignore' ) as file: - for line in remove_newlines( file ): - match = pattern.search( line ) + pattern = re.compile(r'^(.*?)(?:\s*' + comment_delim_regex + r'\s*(.*?)\s*)?$') # to end-of-line + with open(filename, errors='ignore') as file: + for line in remove_newlines(file): + match = pattern.search(line) line_without_comment = match.group(1) - comment = match.group(2) # can be None - yield (line_without_comment, comment) - - - + comment = match.group(2) # can be None + yield line_without_comment, comment diff --git a/unit-tests/py/rspy/libci.py b/unit-tests/py/rspy/libci.py index 3ac29a9ffc..a18583c0d7 100644 --- a/unit-tests/py/rspy/libci.py +++ b/unit-tests/py/rspy/libci.py @@ -1,13 +1,17 @@ # License: Apache 2.0. See LICENSE file in root directory. # Copyright(c) 2021 Intel Corporation. All Rights Reserved. -import re, os, subprocess, time, sys, platform +import re +import os +import subprocess +import time +import sys +import platform from abc import ABC, abstractmethod - from rspy import log, file # this script is in unit-test/py/rspy -unit_tests_dir = os.path.dirname( os.path.dirname( os.path.dirname( os.path.abspath( __file__ ) ) ) ) +unit_tests_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) # the full path to the directory that should hold the unit-tests logs. It is updated in run-unit-tests when we know # the target directory. If None we assume the output should go to stdout logdir = None @@ -20,8 +24,8 @@ home = '/usr/local/lib/ci' else: home = 'C:\\LibCI' -if not os.path.isdir( home ): - home = os.path.normpath( os.path.expanduser( '~/LibCI' )) +if not os.path.isdir(home): + home = os.path.normpath(os.path.expanduser('~/LibCI')) # # Configuration (git config format) is kept in this file: configfile = home + os.sep + 'configfile' @@ -30,7 +34,7 @@ exceptionsfile = home + os.sep + 'exceptions.specs' -def run( cmd, stdout = None, timeout = 200, append = False ): +def run(cmd, stdout=None, timeout=200, append=False): """ Wrapper function for subprocess.run. If the child process times out or ends with a non-zero exit status an exception is raised! @@ -42,46 +46,46 @@ def run( cmd, stdout = None, timeout = 200, append = False ): overwriting it :return: the output written by the child, if stdout is None -- otherwise N/A """ - log.d( 'running:', cmd ) + log.d('running:', cmd) handle = None start_time = time.time() try: log.debug_indent() if stdout and stdout != subprocess.PIPE: if append: - handle = open( stdout, "a" ) + handle = open(stdout, "a") handle.write( - "\n----------TEST-SEPARATOR----------\n\n" ) + "\n----------TEST-SEPARATOR----------\n\n") handle.flush() else: - handle = open( stdout, "w" ) + handle = open(stdout, "w") stdout = handle - rv = subprocess.run( cmd, - stdout=stdout, - stderr=subprocess.STDOUT, - universal_newlines=True, - timeout=timeout, - check=True ) + rv = subprocess.run(cmd, + stdout=stdout, + stderr=subprocess.STDOUT, + universal_newlines=True, + timeout=timeout, + check=True) result = rv.stdout if not result: result = [] else: - result = result.split( '\n' ) + result = result.split('\n') return result finally: if handle: handle.close() log.debug_unindent() run_time = time.time() - start_time - log.d( "test took", run_time, "seconds" ) + log.d("test took", run_time, "seconds") -class TestConfig( ABC ): # Abstract Base Class +class TestConfig(ABC): # Abstract Base Class """ Configuration for a test, encompassing any metadata needed to control its run, like retries etc. """ - def __init__( self, context ): + def __init__(self, context): self._configurations = list() self._priority = 1000 self._tags = set() @@ -90,50 +94,51 @@ def __init__( self, context ): self._context = context self._donotrun = False - def debug_dump( self ): + def debug_dump(self): if self._donotrun: - log.d( 'THIS TEST WILL BE SKIPPED (donotrun specified)' ) + log.d('THIS TEST WILL BE SKIPPED (donotrun specified)') if self._priority != 1000: - log.d( 'priority:', self._priority ) + log.d('priority:', self._priority) if self._timeout != 200: - log.d( 'timeout:', self._timeout ) - if len( self._tags ) > 1: - log.d( 'tags:', { tag for tag in self._tags if tag != "exe" and tag != "py" } ) + log.d('timeout:', self._timeout) + if len(self._tags) > 1: + log.d('tags:', {tag for tag in self._tags if tag != "exe" and tag != "py"}) if self._flags: - log.d( 'flags:', self._flags ) - if len( self._configurations ) > 1: - log.d( len( self._configurations ), 'configurations' ) + log.d('flags:', self._flags) + if len(self._configurations) > 1: + log.d(len(self._configurations), 'configurations') # don't show them... they are output separately @property - def configurations( self ): + def configurations(self): return self._configurations @property - def priority( self ): + def priority(self): return self._priority @property - def timeout( self ): + def timeout(self): return self._timeout @property - def tags( self ): + def tags(self): return self._tags @property - def flags( self ): + def flags(self): return self._flags @property - def context( self ): + def context(self): return self._context @property - def donotrun( self ): - return self._donotrun + def donotrun(self): + return self._donotrun + -class TestConfigFromText( TestConfig ): +class TestConfigFromText(TestConfig): """ Configuration for a test -- from any text-based syntax with a given prefix, e.g. for python: #test:usb2 @@ -144,36 +149,36 @@ class TestConfigFromText( TestConfig ): //#test:... """ - def __init__( self, source, line_prefix, context ): + def __init__(self, source, line_prefix, context): """ :param source: The absolute path to the text file :param line_prefix: A regex to denote a directive (must be first thing in a line), which will be immediately followed by the directive itself and optional arguments :param context: context in which to configure the test """ - TestConfig.__init__( self, context ) + TestConfig.__init__(self, context) - self.derive_config_from_text( source, line_prefix ) - self.derive_tags_from_path( source ) + self.derive_config_from_text(source, line_prefix) + self.derive_tags_from_path(source) - def derive_config_from_text( self, source, line_prefix ): + def derive_config_from_text(self, source, line_prefix): # Configuration is made up of directives: # #test:[:[!]] * # If a context is not specified, the directive always applies. Any directive with a context # will only get applied if we're running under the context it specifies (! means not, so # !nightly means when not under nightly). - regex = r'^' + line_prefix - regex += r'([^\s:]+)' # 1: directive - regex += r'(?::(\S+))?' # 2: optional context - regex += r'((?:\s+\S+)*?)' # 3: params + regex = r'^' + line_prefix + regex += r'([^\s:]+)' # 1: directive + regex += r'(?::(\S+))?' # 2: optional context + regex += r'((?:\s+\S+)*?)' # 3: params regex += r'\s*(?:#\s*(.*))?$' # 4: optional comment - for line in file.grep( regex, source ): + for line in file.grep(regex, source): match = line['match'] - directive = match.group( 1 ) - directive_context = match.group( 2 ) - text_params = match.group( 3 ).strip() + directive = match.group(1) + directive_context = match.group(2) + text_params = match.group(3).strip() params = [s for s in text_params.split()] - comment = match.group( 4 ) + comment = match.group(4) if directive_context: not_context = directive_context.startswith('!') if not_context: @@ -191,89 +196,89 @@ def derive_config_from_text( self, source, line_prefix ): if directive == 'device': # log.d( ' configuration:', params ) if not params: - log.e( source + '+' + str( line['index'] ) + ': device directive with no devices listed' ) - elif 'each' in text_params.lower() and len( params ) > 1: - log.e( source + '+' + str( - line['index'] ) + ': each() cannot be used in combination with other specs', params ) - elif 'each' in text_params.lower() and not re.fullmatch( r'each\(.+\)', text_params, re.IGNORECASE ): - log.e( source + '+' + str( line['index'] ) + ': invalid \'each\' syntax:', params ) + log.e(source + '+' + str(line['index']) + ': device directive with no devices listed') + elif 'each' in text_params.lower() and len(params) > 1: + log.e(source + '+' + str( + line['index']) + ': each() cannot be used in combination with other specs', params) + elif 'each' in text_params.lower() and not re.fullmatch(r'each\(.+\)', text_params, re.IGNORECASE): + log.e(source + '+' + str(line['index']) + ': invalid \'each\' syntax:', params) else: - self._configurations.append( params ) + self._configurations.append(params) elif directive == 'priority': - if len( params ) == 1 and params[0].isdigit(): - self._priority = int( params[0] ) + if len(params) == 1 and params[0].isdigit(): + self._priority = int(params[0]) else: - log.e( source + '+' + str( line['index'] ) + ': priority directive with invalid parameters:', - params ) + log.e(source + '+' + str(line['index']) + ': priority directive with invalid parameters:', + params) elif directive == 'timeout': - if len( params ) == 1 and params[0].isdigit(): - self._timeout = int( params[0] ) + if len(params) == 1 and params[0].isdigit(): + self._timeout = int(params[0]) else: - log.e( source + '+' + str( line['index'] ) + ': timeout directive with invalid parameters:', - params ) + log.e(source + '+' + str(line['index']) + ': timeout directive with invalid parameters:', + params) elif directive == 'tag': - self._tags.update( map( str.lower, params )) # tags are case-insensitive + self._tags.update(map(str.lower, params)) # tags are case-insensitive elif directive == 'flag': - self._flags.update( params ) + self._flags.update(params) elif directive == 'donotrun': if params: - log.e( source + '+' + str( line['index'] ) + ': donotrun directive should not have parameters:', - params ) + log.e(source + '+' + str(line['index']) + ': donotrun directive should not have parameters:', + params) self._donotrun = True else: - log.e( source + '+' + str( line['index'] ) + ': invalid directive "' + directive + '"; ignoring' ) + log.e(source + '+' + str(line['index']) + ': invalid directive "' + directive + '"; ignoring') - def derive_tags_from_path( self, source ): + def derive_tags_from_path(self, source): # we need the relative path starting at the unit-tests directory - relative_path = re.split( r"[/\\]unit-tests[/\\]", source )[-1] - sub_dirs = re.split( r"[/\\]", relative_path )[:-1] # last element will be the name of the test - self._tags.update( sub_dirs ) + relative_path = re.split(r"[/\\]unit-tests[/\\]", source)[-1] + sub_dirs = re.split(r"[/\\]", relative_path)[:-1] # last element will be the name of the test + self._tags.update(sub_dirs) -class TestConfigFromCpp( TestConfigFromText ): - def __init__( self, source, context ): - TestConfigFromText.__init__( self, source, r'//#\s*test:', context ) - self._tags.add( 'exe' ) +class TestConfigFromCpp(TestConfigFromText): + def __init__(self, source, context): + TestConfigFromText.__init__(self, source, r'//#\s*test:', context) + self._tags.add('exe') -class TestConfigFromPy( TestConfigFromText ): - def __init__( self, source, context ): - TestConfigFromText.__init__( self, source, r'#\s*test:', context ) - self._tags.add( 'py' ) +class TestConfigFromPy(TestConfigFromText): + def __init__(self, source, context): + TestConfigFromText.__init__(self, source, r'#\s*test:', context) + self._tags.add('py') -class Test( ABC ): # Abstract Base Class +class Test(ABC): # Abstract Base Class """ Abstract class for a test. Holds the name of the test """ - def __init__( self, testname ): + def __init__(self, testname): # log.d( 'found', testname ) self._name = testname self._config = None self._ran = False @abstractmethod - def run_test( self, configuration = None, log_path = None, opts = set() ): + def run_test(self, configuration=None, log_path=None, opts=set()): pass - def debug_dump( self ): + def debug_dump(self): if self._config: self._config.debug_dump() @property - def config( self ): + def config(self): return self._config @property - def name( self ): + def name(self): return self._name @property - def ran( self ): + def ran(self): return self._ran - def get_log( self ): + def get_log(self): global logdir if not logdir: path = None @@ -281,13 +286,13 @@ def get_log( self ): path = logdir + os.sep + self.name + ".log" return path - def is_live( self ): + def is_live(self): """ Returns True if the test configurations specify devices (test has a 'device' directive) """ - return self._config and len( self._config.configurations ) > 0 + return self._config and len(self._config.configurations) > 0 - def find_source_path( self ): + def find_source_path(self): """ :return: The relative path from unit-tests directory to the test's source file (cpp or py). If the source file is not found None will be returned @@ -301,7 +306,7 @@ def find_source_path( self ): # test-func.cpp will not be found! global unit_tests_dir - split_testname = self.name.split( '-' ) + split_testname = self.name.split('-') path = unit_tests_dir relative_path = "" found_test_dir = False @@ -309,53 +314,53 @@ def find_source_path( self ): while not found_test_dir: # index 0 should be 'test' as tests always start with it found_test_dir = True - for i in range( 2, - len( split_testname ) ): # Checking if the next part of the test name is a sub-directory - possible_sub_dir = '-'.join( split_testname[1:i] ) # The next sub-directory could have several words + for i in range(2, + len(split_testname)): # Checking if the next part of the test name is a sub-directory + possible_sub_dir = '-'.join(split_testname[1:i]) # The next sub-directory could have several words sub_dir_path = path + os.sep + possible_sub_dir - if os.path.isdir( sub_dir_path ): + if os.path.isdir(sub_dir_path): path = sub_dir_path relative_path += possible_sub_dir + os.sep del split_testname[1:i] found_test_dir = False break - path += os.sep + '-'.join( split_testname ) - relative_path += '-'.join( split_testname ) - if os.path.isfile( path + ".cpp" ): + path += os.sep + '-'.join(split_testname) + relative_path += '-'.join(split_testname) + if os.path.isfile(path + ".cpp"): relative_path += ".cpp" - elif os.path.isfile( path + ".py" ): + elif os.path.isfile(path + ".py"): relative_path += ".py" else: - log.w( log.red + self.name + log.reset + ':', - 'No matching .cpp or .py file was found; no configuration will be used!' ) + log.w(log.red + self.name + log.reset + ':', + 'No matching .cpp or .py file was found; no configuration will be used!') return None return relative_path -class PyTest( Test ): +class PyTest(Test): """ Class for python tests. Hold the path to the script of the test """ - def __init__( self, testname, path_to_test, context = None ): + def __init__(self, testname, path_to_test, context=None): """ :param testname: name of the test :param path_to_test: the relative path from the current directory to the path :param context: context in which the test will run """ global unit_tests_dir - Test.__init__( self, testname ) + Test.__init__(self, testname) self.path_to_script = unit_tests_dir + os.sep + path_to_test - self._config = TestConfigFromPy( self.path_to_script, context ) + self._config = TestConfigFromPy(self.path_to_script, context) - def debug_dump( self ): - log.d( 'script:', self.path_to_script ) - Test.debug_dump( self ) + def debug_dump(self): + log.d('script:', self.path_to_script) + Test.debug_dump(self) @property - def command( self ): + def command(self): cmd = [sys.executable] # # PYTHON FLAGS @@ -385,41 +390,41 @@ def command( self ): cmd += ['--context', self.config.context] return cmd - def run_test( self, configuration = None, log_path = None, opts = set() ): + def run_test(self, configuration=None, log_path=None, opts=set()): try: cmd = self.command if opts: cmd += [opt for opt in opts] - run( cmd, stdout=log_path, append=self.ran, timeout=self.config.timeout ) + run(cmd, stdout=log_path, append=self.ran, timeout=self.config.timeout) finally: self._ran = True -class ExeTest( Test ): +class ExeTest(Test): """ Class for c/cpp tests. Hold the path to the executable for the test """ - def __init__( self, testname, exe = None, context = None ): + def __init__(self, testname, exe=None, context=None): """ :param testname: name of the test :param exe: full path to executable :param context: context in which the test will run """ global unit_tests_dir - if exe and not os.path.isfile( exe ): - log.d( "Tried to create exe test with invalid exe file: " + exe ) - Test.__init__( self, testname ) + if exe and not os.path.isfile(exe): + log.d("Tried to create exe test with invalid exe file: " + exe) + Test.__init__(self, testname) self.exe = exe relative_test_path = self.find_source_path() if relative_test_path: - self._config = TestConfigFromCpp( unit_tests_dir + os.sep + relative_test_path, context ) + self._config = TestConfigFromCpp(unit_tests_dir + os.sep + relative_test_path, context) else: self._config = TestConfig(context) @property - def command( self ): + def command(self): cmd = [self.exe] if 'custom-args' not in self.config.flags: # Assume we're a Catch2 exe, so: @@ -435,13 +440,13 @@ def command( self ): cmd += ['--context', self.config.context] return cmd - def run_test( self, configuration = None, log_path = None, opts = set() ): + def run_test(self, configuration=None, log_path=None, opts=set()): if not self.exe: raise RuntimeError("Tried to run test " + self.name + " with no exe file provided") try: cmd = self.command if opts: cmd += [opt for opt in opts] - run( cmd, stdout=log_path, append=self.ran, timeout=self.config.timeout ) + run(cmd, stdout=log_path, append=self.ran, timeout=self.config.timeout) finally: self._ran = True diff --git a/unit-tests/py/rspy/repo.py b/unit-tests/py/rspy/repo.py index ab2888feb3..f9fe8336dd 100644 --- a/unit-tests/py/rspy/repo.py +++ b/unit-tests/py/rspy/repo.py @@ -1,25 +1,26 @@ # License: Apache 2.0. See LICENSE file in root directory. # Copyright(c) 2021 Intel Corporation. All Rights Reserved. -import os, platform +import os +import platform from rspy import log # this script is located in librealsense/unit-tests/py/rspy, so main repository is: -root = os.path.dirname( os.path.dirname( os.path.dirname( os.path.dirname( os.path.abspath( __file__ ))))) +root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) # Usually we expect the build directory to be directly under the root, named 'build' # ... but first check the expected LibCI build directories: # if platform.system() == 'Linux': - build = os.path.join( root, 'x86_64', 'static' ) + build = os.path.join(root, 'x86_64', 'static') else: - build = os.path.join( root, 'win10', 'win64', 'static' ) -if not os.path.isdir( build ): + build = os.path.join(root, 'win10', 'win64', 'static') +if not os.path.isdir(build): # - build = os.path.join( root, 'build' ) - if not os.path.isdir( build ): - log.w( 'repo.build directory wasn\'t found' ) - log.d( 'repo.root=', root ) + build = os.path.join(root, 'build') + if not os.path.isdir(build): + log.w('repo.build directory wasn\'t found') + log.d('repo.root=', root) build = None @@ -30,11 +31,11 @@ def find_pyrs(): global build from rspy import file if platform.system() == 'Linux': - for so in file.find( build, '(^|/)pyrealsense2.*\.so$' ): - return os.path.join( build, so ) + for so in file.find(build, '(^|/)pyrealsense2.*\.so$'): + return os.path.join(build, so) else: - for pyd in file.find( build, '(^|/)pyrealsense2.*\.pyd$' ): - return os.path.join( build, pyd ) + for pyd in file.find(build, '(^|/)pyrealsense2.*\.pyd$'): + return os.path.join(build, pyd) def find_pyrs_dir(): @@ -43,18 +44,18 @@ def find_pyrs_dir(): """ pyrs = find_pyrs() if pyrs: - pyrs_dir = os.path.dirname( pyrs ) + pyrs_dir = os.path.dirname(pyrs) return pyrs_dir -def pretty_fw_version( fw_version_as_string ): +def pretty_fw_version(fw_version_as_string): """ :return: a version with leading zeros removed, so as to be a little easier to read """ - return '.'.join( [str(int(c)) for c in fw_version_as_string.split( '.' )] ) + return '.'.join([str(int(c)) for c in fw_version_as_string.split('.')]) -def find_built_exe( source, name ): +def find_built_exe(source, name): """ Find an executable that was built in the repo @@ -65,18 +66,17 @@ def find_built_exe( source, name ): exe = None if platform.system() == 'Linux': global build - exe = os.path.join( build, source, name ) - if not os.path.isfile( exe ): + exe = os.path.join(build, source, name) + if not os.path.isfile(exe): return None else: - # In Windows, the name will be without extension and we need to find it somewhere + # In Windows, the name will be without extension, and we need to find it somewhere # in the path import sys for p in sys.path: - exe = os.path.join( p, name + '.exe' ) - if os.path.isfile( exe ): + exe = os.path.join(p, name + '.exe') + if os.path.isfile(exe): break else: return None return exe - diff --git a/unit-tests/run-unit-tests.py b/unit-tests/run-unit-tests.py index e34b3ec8e2..8ed4a61685 100644 --- a/unit-tests/run-unit-tests.py +++ b/unit-tests/run-unit-tests.py @@ -3,11 +3,16 @@ # License: Apache 2.0. See LICENSE file in root directory. # Copyright(c) 2021 Intel Corporation. All Rights Reserved. -import sys, os, subprocess, re, platform, getopt, time +import sys +import os +import subprocess +import re +import platform +import getopt # Add our py/ module directory so we can find our own libraries -current_dir = os.path.dirname( os.path.abspath( __file__ ) ) -sys.path.append( os.path.join( current_dir, 'py' )) +current_dir = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(os.path.join(current_dir, 'py')) from rspy import log, file, repo, libci @@ -15,50 +20,54 @@ # to avoid those to take only the pyrealsense2 we actually compiled! # # Rather than rebuilding the whole sys.path, we instead remove: -from site import getusersitepackages # not the other stuff, like quit(), exit(), etc.! -#log.d( 'site packages=', getusersitepackages() ) -#log.d( 'sys.path=', sys.path ) -#log.d( 'removing', [p for p in sys.path if file.is_inside( p, getusersitepackages() )]) -sys.path = [p for p in sys.path if not file.is_inside( p, getusersitepackages() )] -#log.d( 'modified=', sys.path ) +from site import getusersitepackages # not the other stuff, like quit(), exit(), etc.! + +# log.d( 'site packages=', getusersitepackages() ) +# log.d( 'sys.path=', sys.path ) +# log.d( 'removing', [p for p in sys.path if file.is_inside( p, getusersitepackages() )]) +sys.path = [p for p in sys.path if not file.is_inside(p, getusersitepackages())] + + +# log.d( 'modified=', sys.path ) def usage(): - ourname = os.path.basename( sys.argv[0] ) - print( 'Syntax: ' + ourname + ' [options] [dir]' ) - print( ' dir: location of executable tests to run' ) - print( 'Options:' ) - print( ' --debug Turn on debugging information (does not include LibRS debug logs; see --rslog)' ) - print( ' -v, --verbose Errors will dump the log to stdout' ) - print( ' -q, --quiet Suppress output; rely on exit status (0=no failures)' ) - print( ' -s, --stdout Do not redirect stdout to logs' ) - print( ' -r, --regex Run all tests whose name matches the following regular expression' ) - print( ' -t, --tag Run all tests with the following tag. If used multiple times runs all tests matching' ) - print( ' all tags. e.g. -t tag1 -t tag2 will run tests who have both tag1 and tag2' ) - print( ' tests automatically get tagged with \'exe\' or \'py\' and based on their location' ) - print( ' inside unit-tests/, e.g. unit-tests/func/test-hdr.py gets [func, py]' ) - print( ' --list-tags Print out all available tags. This option will not run any tests' ) - print( ' --list-tests Print out all available tests. This option will not run any tests' ) - print( ' If both list-tags and list-tests are specified each test will be printed along' ) - print( ' with its tags' ) - print( ' --no-exceptions Do not load the LibCI/exceptions.specs file' ) - print( ' --context <> The context to use for test configuration' ) - print( ' --repeat <#> Repeat each test <#> times' ) - print( ' --config <> Ignore test configurations; use the one provided' ) - print( ' --no-reset Do not try to reset any devices, with or without Acroname' ) - print( ' --rslog Enable LibRS logging (LOG_DEBUG etc.) to console in each test' ) + ourname = os.path.basename(sys.argv[0]) + print('Syntax: ' + ourname + ' [options] [dir]') + print(' dir: location of executable tests to run') + print('Options:') + print(' --debug Turn on debugging information (does not include LibRS debug logs; see --rslog)') + print(' -v, --verbose Errors will dump the log to stdout') + print(' -q, --quiet Suppress output; rely on exit status (0=no failures)') + print(' -s, --stdout Do not redirect stdout to logs') + print(' -r, --regex Run all tests whose name matches the following regular expression') + print( + ' -t, --tag Run all tests with the following tag. If used multiple times runs all tests matching') + print(' all tags. e.g. -t tag1 -t tag2 will run tests who have both tag1 and tag2') + print(' tests automatically get tagged with \'exe\' or \'py\' and based on their location') + print(' inside unit-tests/, e.g. unit-tests/func/test-hdr.py gets [func, py]') + print(' --list-tags Print out all available tags. This option will not run any tests') + print(' --list-tests Print out all available tests. This option will not run any tests') + print(' If both list-tags and list-tests are specified each test will be printed along') + print(' with its tags') + print(' --no-exceptions Do not load the LibCI/exceptions.specs file') + print(' --context <> The context to use for test configuration') + print(' --repeat <#> Repeat each test <#> times') + print(' --config <> Ignore test configurations; use the one provided') + print(' --no-reset Do not try to reset any devices, with or without Acroname') + print(' --rslog Enable LibRS logging (LOG_DEBUG etc.) to console in each test') print() - print( 'Examples:' ) - print( 'Running: python run-unit-tests.py -s' ) - print( ' Runs all tests, but direct their output to the console rather than log files' ) - print( 'Running: python run-unit-tests.py --list-tests --list-tags' ) - print( " Will find all tests and print for each one what tags it has in the following format:" ) - print( ' has tags: ' ) - print( 'Running: python run-unit-tests.py -r name -t log ~/my-build-directory' ) - print( " Will run all tests whose name contains 'name' and who have the tag 'log' while searching for the" ) - print( " exe files in the provided directory. Each test will create its own .log file to which its" ) - print( " output will be written." ) - sys.exit( 2 ) + print('Examples:') + print('Running: python run-unit-tests.py -s') + print(' Runs all tests, but direct their output to the console rather than log files') + print('Running: python run-unit-tests.py --list-tests --list-tags') + print(" Will find all tests and print for each one what tags it has in the following format:") + print(' has tags: ') + print('Running: python run-unit-tests.py -r name -t log ~/my-build-directory') + print(" Will run all tests whose name contains 'name' and who have the tag 'log' while searching for the") + print(" exe files in the provided directory. Each test will create its own .log file to which its") + print(" output will be written.") + sys.exit(2) # get os and directories for future use @@ -71,12 +80,12 @@ def usage(): # Parse command-line: try: - opts, args = getopt.getopt( sys.argv[1:], 'hvqr:st:', - longopts=['help', 'verbose', 'debug', 'quiet', 'regex=', 'stdout', 'tag=', 'list-tags', - 'list-tests', 'no-exceptions', 'context=', 'repeat=', 'config=', 'no-reset', - 'rslog'] ) + opts, args = getopt.getopt(sys.argv[1:], 'hvqr:st:', + longopts=['help', 'verbose', 'debug', 'quiet', 'regex=', 'stdout', 'tag=', 'list-tags', + 'list-tests', 'no-exceptions', 'context=', 'repeat=', 'config=', 'no-reset', + 'rslog']) except getopt.GetoptError as err: - log.e( err ) # something like "option -a not recognized" + log.e(err) # something like "option -a not recognized" usage() regex = None to_stdout = False @@ -101,7 +110,7 @@ def usage(): elif opt in ('-s', '--stdout'): to_stdout = True elif opt in ('-t', '--tag'): - required_tags.append( arg ) + required_tags.append(arg) elif opt == '--list-tags': list_tags = True elif opt == '--list-tests': @@ -111,8 +120,8 @@ def usage(): elif opt == '--context': context = arg elif opt == '--repeat': - if not arg.isnumeric() or int(arg) < 1: - log.e( "--repeat must be a number greater than 0" ) + if not arg.isnumeric() or int(arg) < 1: + log.e("--repeat must be a number greater than 0") usage() repeat = int(arg) elif opt == '--config': @@ -122,7 +131,8 @@ def usage(): elif opt == '--rslog': rslog = True -def find_build_dir( dir ): + +def find_build_dir(dir): """ Given a directory we know must be within the build tree, go up the tree until we find a file we know must be in the root build directory... @@ -131,45 +141,46 @@ def find_build_dir( dir ): """ build_dir = dir while True: - if os.path.isfile( os.path.join( build_dir, 'CMakeCache.txt' )): - log.d( 'assuming build dir path:', build_dir ) + if os.path.isfile(os.path.join(build_dir, 'CMakeCache.txt')): + log.d('assuming build dir path:', build_dir) return build_dir - base = os.path.dirname( build_dir ) + base = os.path.dirname(build_dir) if base == build_dir: - log.d( 'could not find CMakeCache.txt; cannot assume build dir from', dir ) + log.d('could not find CMakeCache.txt; cannot assume build dir from', dir) break build_dir = base -if len( args ) > 1: + +if len(args) > 1: usage() -exe_dir = None # the directory in which we expect to find exes -if len( args ) == 1: +exe_dir = None # the directory in which we expect to find exes +if len(args) == 1: exe_dir = args[0] - if not os.path.isdir( exe_dir ): - log.f( 'Not a directory:', exe_dir ) - build_dir = find_build_dir( exe_dir ) + if not os.path.isdir(exe_dir): + log.f('Not a directory:', exe_dir) + build_dir = find_build_dir(exe_dir) else: - build_dir = repo.build # may not actually contain exes - #log.d( 'repo.build:', build_dir ) + build_dir = repo.build # may not actually contain exes + # log.d( 'repo.build:', build_dir ) # Python scripts should be able to find the pyrealsense2 .pyd or else they won't work. We don't know # if the user (Travis included) has pyrealsense2 installed but even if so, we want to use the one we compiled. # we search the librealsense repository for the .pyd file (.so file in linux) pyrs = "" if linux: - for so in file.find( exe_dir or build_dir or repo.root, '(^|/)pyrealsense2.*\.so$' ): + for so in file.find(exe_dir or build_dir or repo.root, '(^|/)pyrealsense2.*\.so$'): pyrs = so else: - for pyd in file.find( exe_dir or build_dir or repo.root, '(^|/)pyrealsense2.*\.pyd$' ): + for pyd in file.find(exe_dir or build_dir or repo.root, '(^|/)pyrealsense2.*\.pyd$'): pyrs = pyd if pyrs: # The path is relative; make it absolute and add to PYTHONPATH so it can be found by tests - pyrs_path = os.path.join( exe_dir or build_dir or repo.root, pyrs ) + pyrs_path = os.path.join(exe_dir or build_dir or repo.root, pyrs) # We need to add the directory not the file itself - pyrs_path = os.path.dirname( pyrs_path ) - log.d( 'found pyrealsense pyd in:', pyrs_path ) + pyrs_path = os.path.dirname(pyrs_path) + log.d('found pyrealsense pyd in:', pyrs_path) if not exe_dir: - build_dir = find_build_dir( pyrs_path ) + build_dir = find_build_dir(pyrs_path) if linux: exe_dir = build_dir else: @@ -182,23 +193,23 @@ def find_build_dir( dir ): mask += r'$' else: mask += r'\.exe' - for executable in file.find( build_dir, mask ): - executable = os.path.join( build_dir, executable ) - #log.d( 'found exe=', executable ) - if not file.is_executable( executable ): + for executable in file.find(build_dir, mask): + executable = os.path.join(build_dir, executable) + # log.d( 'found exe=', executable ) + if not file.is_executable(executable): continue - dir_with_test = os.path.dirname( executable ) + dir_with_test = os.path.dirname(executable) if exe_dir and exe_dir != dir_with_test: - log.f( "Ambiguous executable tests in 2 directories:\n\t", exe_dir, "\n\t", dir_with_test, - "\n\tSpecify the directory manually..." ) + log.f("Ambiguous executable tests in 2 directories:\n\t", exe_dir, "\n\t", dir_with_test, + "\n\tSpecify the directory manually...") exe_dir = dir_with_test if not to_stdout: if exe_dir: logdir = exe_dir + os.sep + 'unit-tests' else: # no test executables were found. We put the logs directly in build directory - logdir = os.path.join( repo.root, 'build', 'unit-tests' ) - os.makedirs( logdir, exist_ok=True ) + logdir = os.path.join(repo.root, 'build', 'unit-tests') + os.makedirs(logdir, exist_ok=True) libci.logdir = logdir n_tests = 0 @@ -212,19 +223,19 @@ def find_build_dir( dir ): os.environ["PYTHONPATH"] += os.pathsep + pyrs_path -def configuration_str( configuration, repetition = 1, prefix = '', suffix = '' ): +def configuration_str(configuration, repetition=1, prefix='', suffix=''): """ Return a string repr (with a prefix and/or suffix) of the configuration or '' if it's None """ s = '' if configuration is not None: - s += '[' + ' '.join( configuration ) + ']' + s += '[' + ' '.join(configuration) + ']' if repetition: - s += '[' + str(repetition+1) + ']' + s += '[' + str(repetition + 1) + ']' if s: s = prefix + s + suffix return s -def check_log_for_fails( path_to_log, testname, configuration = None, repetition = 1 ): +def check_log_for_fails(path_to_log, testname, configuration=None, repetition=1): # Normal logs are expected to have in last line: # "All tests passed (11 assertions in 1 test case)" # Tests that have failures, however, will show: @@ -235,8 +246,8 @@ def check_log_for_fails( path_to_log, testname, configuration = None, repetition if path_to_log is None: return False results = None - for ctx in file.grep( r'^test cases:\s*(\d+) \|\s*(\d+) (passed|failed)|^----------TEST-SEPARATOR----------$', - path_to_log ): + for ctx in file.grep(r'^test cases:\s*(\d+) \|\s*(\d+) (passed|failed)|^----------TEST-SEPARATOR----------$', + path_to_log): m = ctx['match'] if m.string == "----------TEST-SEPARATOR----------": results = None @@ -246,26 +257,27 @@ def check_log_for_fails( path_to_log, testname, configuration = None, repetition if not results: return False - total = int( results.group( 1 ) ) - passed = int( results.group( 2 ) ) - if results.group( 3 ) == 'failed': + total = int(results.group(1)) + passed = int(results.group(2)) + if results.group(3) == 'failed': # "test cases: 1 | 1 failed" passed = total - passed if passed < total: if total == 1 or passed == 0: desc = 'failed' else: - desc = str( total - passed ) + ' of ' + str( total ) + ' failed' + desc = str(total - passed) + ' of ' + str(total) + ' failed' if log.is_verbose_on(): - log.e( log.red + testname + log.reset + ': ' + configuration_str( configuration, repetition, suffix=' ' ) + desc ) - log.i( 'Log: >>>' ) + log.e( + log.red + testname + log.reset + ': ' + configuration_str(configuration, repetition, suffix=' ') + desc) + log.i('Log: >>>') log.out() - file.cat( path_to_log ) - log.out( '<<<' ) + file.cat(path_to_log) + log.out('<<<') else: - log.e( log.red + testname + log.reset + ': ' + configuration_str( configuration, repetition, - suffix=' ' ) + desc + '; see ' + path_to_log ) + log.e(log.red + testname + log.reset + ': ' + configuration_str(configuration, repetition, + suffix=' ') + desc + '; see ' + path_to_log) return True return False @@ -273,42 +285,42 @@ def check_log_for_fails( path_to_log, testname, configuration = None, repetition def get_tests(): global regex, exe_dir, pyrs, current_dir, linux, context, list_only if regex: - pattern = re.compile( regex ) + pattern = re.compile(regex) if list_only: # We want to list all tests, even if they weren't built. # So we look for the source files instead of using the manifest - for cpp_test in file.find( current_dir, '(^|/)test-.*\.cpp' ): - testparent = os.path.dirname( cpp_test ) # "log/internal" <- "log/internal/test-all.py" + for cpp_test in file.find(current_dir, '(^|/)test-.*\.cpp'): + testparent = os.path.dirname(cpp_test) # "log/internal" <- "log/internal/test-all.py" if testparent: - testname = 'test-' + testparent.replace( '/', '-' ) + '-' + os.path.basename( cpp_test )[ - 5:-4] # remove .cpp + testname = 'test-' + testparent.replace('/', '-') + '-' + os.path.basename(cpp_test)[ + 5:-4] # remove .cpp else: - testname = os.path.basename( cpp_test )[:-4] + testname = os.path.basename(cpp_test)[:-4] - if regex and not pattern.search( testname ): + if regex and not pattern.search(testname): continue - yield libci.ExeTest( testname ) + yield libci.ExeTest(testname) elif exe_dir: # In Linux, the build targets are located elsewhere than on Windows # Go over all the tests from a "manifest" we take from the result of the last CMake # run (rather than, for example, looking for test-* in the build-directory): - manifestfile = os.path.join( build_dir, 'CMakeFiles', 'TargetDirectories.txt' ) + manifestfile = os.path.join(build_dir, 'CMakeFiles', 'TargetDirectories.txt') if linux: manifestfile = exe_dir + '/CMakeFiles/TargetDirectories.txt' # log.d( manifestfile ) - for manifest_ctx in file.grep( r'(?<=unit-tests/build/)\S+(?=/CMakeFiles/test-\S+.dir$)', manifestfile ): + for manifest_ctx in file.grep(r'(?<=unit-tests/build/)\S+(?=/CMakeFiles/test-\S+.dir$)', manifestfile): # We need to first create the test name so we can see if it fits the regex - testdir = manifest_ctx['match'].group( 0 ) # "log/internal/test-all" + testdir = manifest_ctx['match'].group(0) # "log/internal/test-all" # log.d( testdir ) - testparent = os.path.dirname( testdir ) # "log/internal" + testparent = os.path.dirname(testdir) # "log/internal" if testparent: - testname = 'test-' + testparent.replace( '/', '-' ) + '-' + os.path.basename( testdir )[ - 5:] # "test-log-internal-all" + testname = 'test-' + testparent.replace('/', '-') + '-' + os.path.basename(testdir)[ + 5:] # "test-log-internal-all" else: testname = testdir # no parent folder so we get "test-all" - if regex and not pattern.search( testname ): + if regex and not pattern.search(testname): continue if linux: @@ -316,28 +328,28 @@ def get_tests(): else: exe = exe_dir + '/' + testname + '.exe' - yield libci.ExeTest( testname, exe, context ) + yield libci.ExeTest(testname, exe, context) # Python unit-test scripts are in the same directory as us... we want to consider running them # (we may not if they're live and we have no pyrealsense2.pyd): - for py_test in file.find( current_dir, '(^|/)test-.*\.py' ): - testparent = os.path.dirname( py_test ) # "log/internal" <- "log/internal/test-all.py" + for py_test in file.find(current_dir, '(^|/)test-.*\.py'): + testparent = os.path.dirname(py_test) # "log/internal" <- "log/internal/test-all.py" if testparent: - testname = 'test-' + testparent.replace( '/', '-' ) + '-' + os.path.basename( py_test )[5:-3] # remove .py + testname = 'test-' + testparent.replace('/', '-') + '-' + os.path.basename(py_test)[5:-3] # remove .py else: - testname = os.path.basename( py_test )[:-3] + testname = os.path.basename(py_test)[:-3] - if regex and not pattern.search( testname ): + if regex and not pattern.search(testname): continue - yield libci.PyTest( testname, py_test, context ) + yield libci.PyTest(testname, py_test, context) -def prioritize_tests( tests ): - return sorted( tests, key=lambda t: t.config.priority ) +def prioritize_tests(tests): + return sorted(tests, key=lambda t: t.config.priority) -def devices_by_test_config( test, exceptions ): +def devices_by_test_config(test, exceptions): """ Yield pairs for each valid configuration under which the test should run. @@ -348,42 +360,43 @@ def devices_by_test_config( test, exceptions ): :param test: The test (of class type Test) we're interested in """ global forced_configurations - for configuration in ( forced_configurations or test.config.configurations ): + for configuration in (forced_configurations or test.config.configurations): try: - for serial_numbers in devices.by_configuration( configuration, exceptions ): + for serial_numbers in devices.by_configuration(configuration, exceptions): yield configuration, serial_numbers except RuntimeError as e: if devices.acroname: - log.e( log.red + test.name + log.reset + ': ' + str( e ) ) + log.e(log.red + test.name + log.reset + ': ' + str(e)) else: - log.w( log.yellow + test.name + log.reset + ': ' + str( e ) ) + log.w(log.yellow + test.name + log.reset + ': ' + str(e)) continue -def test_wrapper( test, configuration = None, repetition = 1 ): +def test_wrapper(test, configuration=None, repetition=1): global n_tests, rslog n_tests += 1 # if not log.is_debug_on() or log.is_color_on(): - log.progress( configuration_str( configuration, repetition, suffix=' ' ) + test.name, '...' ) + log.progress(configuration_str(configuration, repetition, suffix=' ') + test.name, '...') # log_path = test.get_log() # opts = set() if rslog: - opts.add( '--rslog' ) + opts.add('--rslog') try: - test.run_test( configuration = configuration, log_path = log_path, opts = opts ) + test.run_test(configuration=configuration, log_path=log_path, opts=opts) except FileNotFoundError as e: - log.e( log.red + test.name + log.reset + ':', str( e ) + configuration_str( configuration, repetition, prefix=' ' ) ) + log.e(log.red + test.name + log.reset + ':', str(e) + configuration_str(configuration, repetition, prefix=' ')) except subprocess.TimeoutExpired: - log.e( log.red + test.name + log.reset + ':', configuration_str( configuration, repetition, suffix=' ' ) + 'timed out' ) + log.e(log.red + test.name + log.reset + ':', + configuration_str(configuration, repetition, suffix=' ') + 'timed out') except subprocess.CalledProcessError as cpe: - if not check_log_for_fails( log_path, test.name, configuration, repetition ): + if not check_log_for_fails(log_path, test.name, configuration, repetition): # An unexpected error occurred - log.e( log.red + test.name + log.reset + ':', - configuration_str( configuration, repetition, suffix=' ' ) + 'exited with non-zero value (' + str( - cpe.returncode ) + ')' ) + log.e(log.red + test.name + log.reset + ':', + configuration_str(configuration, repetition, suffix=' ') + 'exited with non-zero value (' + str( + cpe.returncode) + ')') # Run all tests @@ -391,26 +404,26 @@ def test_wrapper( test, configuration = None, repetition = 1 ): list_only = list_tags or list_tests if not list_only: if pyrs: - sys.path.insert( 1, pyrs_path ) # Make sure we pick up the right pyrealsense2! + sys.path.insert(1, pyrs_path) # Make sure we pick up the right pyrealsense2! from rspy import devices devices.query() devices.map_unknown_ports() # # Under Travis, we'll have no devices and no acroname - skip_live_tests = len( devices.all() ) == 0 and not devices.acroname + skip_live_tests = len(devices.all()) == 0 and not devices.acroname # if not skip_live_tests: if not to_stdout: - log.i( 'Logs in:', libci.logdir ) + log.i('Logs in:', libci.logdir) exceptions = None - if not no_exceptions and os.path.isfile( libci.exceptionsfile ): + if not no_exceptions and os.path.isfile(libci.exceptionsfile): try: - log.d( 'loading device exceptions from:', libci.exceptionsfile ) + log.d('loading device exceptions from:', libci.exceptionsfile) log.debug_indent() - exceptions = devices.load_specs_from_file( libci.exceptionsfile ) - exceptions = devices.expand_specs( exceptions ) - log.d( '==>', exceptions ) + exceptions = devices.load_specs_from_file(libci.exceptionsfile) + exceptions = devices.expand_specs(exceptions) + log.d('==>', exceptions) finally: log.debug_unindent() # @@ -418,10 +431,10 @@ def test_wrapper( test, configuration = None, repetition = 1 ): available_tags = set() tests = [] if context: - log.d( 'running under context:', context ) - for test in prioritize_tests( get_tests() ): + log.d('running under context:', context) + for test in prioritize_tests(get_tests()): # - log.d( 'found', test.name, '...' ) + log.d('found', test.name, '...') try: log.debug_indent() test.debug_dump() @@ -429,43 +442,43 @@ def test_wrapper( test, configuration = None, repetition = 1 ): if test.config.donotrun: continue # - if required_tags and not all( tag in test.config.tags for tag in required_tags ): - log.d( 'does not fit --tag:', test.config.tags ) + if required_tags and not all(tag in test.config.tags for tag in required_tags): + log.d('does not fit --tag:', test.config.tags) continue # if 'Windows' in test.config.flags and linux: - log.d( 'test has Windows flag and OS is Linux' ) + log.d('test has Windows flag and OS is Linux') continue if 'Linux' in test.config.flags and not linux: - log.d( 'test has Linux flag and OS is Windows' ) + log.d('test has Linux flag and OS is Windows') continue # - available_tags.update( test.config.tags ) - tests.append( test ) + available_tags.update(test.config.tags) + tests.append(test) if list_only: n_tests += 1 continue # if not test.is_live(): for repetition in range(repeat): - test_wrapper( test, repetition = repetition ) + test_wrapper(test, repetition=repetition) continue # if skip_live_tests: - log.w( test.name + ':', 'is live and there are no cameras; skipping' ) + log.w(test.name + ':', 'is live and there are no cameras; skipping') continue # - for configuration, serial_numbers in devices_by_test_config( test, exceptions ): + for configuration, serial_numbers in devices_by_test_config(test, exceptions): for repetition in range(repeat): try: - log.d( 'configuration:', configuration ) + log.d('configuration:', configuration) log.debug_indent() if not no_reset: - devices.enable_only( serial_numbers, recycle=True ) + devices.enable_only(serial_numbers, recycle=True) except RuntimeError as e: - log.w( log.red + test.name + log.reset + ': ' + str( e ) ) + log.w(log.red + test.name + log.reset + ': ' + str(e)) else: - test_wrapper( test, configuration, repetition ) + test_wrapper(test, configuration, repetition) finally: log.debug_unindent() # @@ -475,29 +488,29 @@ def test_wrapper( test, configuration = None, repetition = 1 ): log.progress() # if not n_tests: - log.f( 'No unit-tests found!' ) + log.f('No unit-tests found!') # if list_only: if list_tags and list_tests: - for t in sorted( tests, key= lambda x: x.name ): - print( t.name, "has tags:", ' '.join( t.config.tags ) ) + for t in sorted(tests, key=lambda x: x.name): + print(t.name, "has tags:", ' '.join(t.config.tags)) # elif list_tags: - for t in sorted( list( available_tags ) ): - print( t ) + for t in sorted(list(available_tags)): + print(t) # elif list_tests: - for t in sorted( tests, key= lambda x: x.name ): - print( t.name ) + for t in sorted(tests, key=lambda x: x.name): + print(t.name) # else: n_errors = log.n_errors() if n_errors: - log.out( log.red + str( n_errors ) + log.reset, 'of', n_tests, 'test(s)', - log.red + 'failed!' + log.reset + log.clear_eos ) - sys.exit( 1 ) + log.out(log.red + str(n_errors) + log.reset, 'of', n_tests, 'test(s)', + log.red + 'failed!' + log.reset + log.clear_eos) + sys.exit(1) # - log.out( str( n_tests ) + ' unit-test(s) completed successfully' + log.clear_eos ) + log.out(str(n_tests) + ' unit-test(s) completed successfully' + log.clear_eos) # finally: # @@ -506,4 +519,4 @@ def test_wrapper( test, configuration = None, repetition = 1 ): if devices.acroname: devices.acroname.disconnect() # -sys.exit( 0 ) +sys.exit(0) diff --git a/unit-tests/syncer/sw.py b/unit-tests/syncer/sw.py index 1098a9d5c7..ebac6b3356 100644 --- a/unit-tests/syncer/sw.py +++ b/unit-tests/syncer/sw.py @@ -5,10 +5,9 @@ from rspy import log, test import time - # Constants # -domain = rs.timestamp_domain.hardware_clock # For either depth/color +domain = rs.timestamp_domain.hardware_clock # For either depth/color # # To be set before init() or playback() # @@ -30,7 +29,7 @@ playback_status = None -def init( syncer_matcher = rs.matchers.default ): +def init(syncer_matcher=rs.matchers.default): """ One of the two initialization functions: @@ -50,16 +49,16 @@ def init( syncer_matcher = rs.matchers.default ): gap_c = 1000 / fps_c # global pixels, w, h - pixels = bytearray( b'\x00' * ( w * h * 2 )) # Dummy data + pixels = bytearray(b'\x00' * (w * h * 2)) # Dummy data # global device device = rs.software_device() if syncer_matcher is not None: - device.create_matcher( syncer_matcher ) + device.create_matcher(syncer_matcher) # global depth_sensor, color_sensor - depth_sensor = device.add_sensor( "Depth" ) - color_sensor = device.add_sensor( "Color" ) + depth_sensor = device.add_sensor("Depth") + color_sensor = device.add_sensor("Color") # depth_stream = rs.video_stream() depth_stream.type = rs.stream.depth @@ -71,7 +70,7 @@ def init( syncer_matcher = rs.matchers.default ): depth_stream.fps = fps_d # global depth_profile - depth_profile = rs.video_stream_profile( depth_sensor.add_video_stream( depth_stream )) + depth_profile = rs.video_stream_profile(depth_sensor.add_video_stream(depth_stream)) # color_stream = rs.video_stream() color_stream.type = rs.stream.color @@ -83,28 +82,28 @@ def init( syncer_matcher = rs.matchers.default ): color_stream.fps = fps_c # global color_profile - color_profile = rs.video_stream_profile( color_sensor.add_video_stream( color_stream )) + color_profile = rs.video_stream_profile(color_sensor.add_video_stream(color_stream)) # # We don't want to lose any frames so use a big queue size (default is 1) global syncer if syncer_matcher is not None: - syncer = rs.syncer( 100 ) + syncer = rs.syncer(100) else: - syncer = rs.frame_queue( 100 ) + syncer = rs.frame_queue(100) # global playback_status playback_status = None -def playback_callback( status ): +def playback_callback(status): """ """ global playback_status playback_status = status - log.d( "...", status ) + log.d("...", status) -def playback( filename, use_syncer = True ): +def playback(filename, use_syncer=True): """ One of the two initialization functions: @@ -122,24 +121,24 @@ def playback( filename, use_syncer = True ): ctx = rs.context() # global device - device = rs.playback( ctx.load_device( filename ) ) - device.set_real_time( False ) - device.set_status_changed_callback( playback_callback ) + device = rs.playback(ctx.load_device(filename)) + device.set_real_time(False) + device.set_status_changed_callback(playback_callback) # global depth_sensor, color_sensor sensors = device.query_sensors() - depth_sensor = next( s for s in sensors if s.name == "Depth" ) - color_sensor = next( s for s in sensors if s.name == "Color" ) + depth_sensor = next(s for s in sensors if s.name == "Depth") + color_sensor = next(s for s in sensors if s.name == "Color") # global depth_profile, color_profile - depth_profile = next( p for p in depth_sensor.profiles if p.stream_type() == rs.stream.depth ) - color_profile = next( p for p in color_sensor.profiles if p.stream_type() == rs.stream.color ) + depth_profile = next(p for p in depth_sensor.profiles if p.stream_type() == rs.stream.depth) + color_profile = next(p for p in color_sensor.profiles if p.stream_type() == rs.stream.color) # global syncer if use_syncer: - syncer = rs.syncer( 100 ) # We don't want to lose any frames so uses a big queue size (default is 1) + syncer = rs.syncer(100) # We don't want to lose any frames so uses a big queue size (default is 1) else: - syncer = rs.frame_queue( 100 ) + syncer = rs.frame_queue(100) # global playback_status playback_status = rs.playback_status.unknown @@ -149,10 +148,10 @@ def start(): """ """ global depth_profile, color_profile, depth_sensor, color_sensor, syncer - depth_sensor.open( depth_profile ) - color_sensor.open( color_profile ) - depth_sensor.start( syncer ) - color_sensor.start( syncer ) + depth_sensor.open(depth_profile) + color_sensor.open(color_profile) + depth_sensor.start(syncer) + color_sensor.start(syncer) def stop(): @@ -181,12 +180,12 @@ def reset(): syncer = None -def generate_depth_frame( frame_number, timestamp ): +def generate_depth_frame(frame_number, timestamp): """ """ global playback_status if playback_status is not None: - raise RuntimeError( "cannot generate frames when playing back" ) + raise RuntimeError("cannot generate frames when playing back") # global depth_profile, domain, pixels, depth_sensor, w, bpp depth_frame = rs.software_video_frame() @@ -198,15 +197,16 @@ def generate_depth_frame( frame_number, timestamp ): depth_frame.domain = domain depth_frame.profile = depth_profile # - log.d( "-->", depth_frame ) - depth_sensor.on_video_frame( depth_frame ) + log.d("-->", depth_frame) + depth_sensor.on_video_frame(depth_frame) + -def generate_color_frame( frame_number, timestamp ): +def generate_color_frame(frame_number, timestamp): """ """ global playback_status if playback_status is not None: - raise RuntimeError( "cannot generate frames when playing back" ) + raise RuntimeError("cannot generate frames when playing back") # global color_profile, domain, pixels, color_sensor, w, bpp color_frame = rs.software_video_frame() @@ -218,14 +218,16 @@ def generate_color_frame( frame_number, timestamp ): color_frame.domain = domain color_frame.profile = color_profile # - log.d( "-->", color_frame ) - color_sensor.on_video_frame( color_frame ) + log.d("-->", color_frame) + color_sensor.on_video_frame(color_frame) + + +def generate_depth_and_color(frame_number, timestamp): + generate_depth_frame(frame_number, timestamp) + generate_color_frame(frame_number, timestamp) -def generate_depth_and_color( frame_number, timestamp ): - generate_depth_frame( frame_number, timestamp ) - generate_color_frame( frame_number, timestamp ) -def expect( depth_frame = None, color_frame = None, nothing_else = False ): +def expect(depth_frame=None, color_frame=None, nothing_else=False): """ Looks at the syncer queue and gets the next frame from it if available, checking its contents against the expected frame numbers. @@ -234,49 +236,49 @@ def expect( depth_frame = None, color_frame = None, nothing_else = False ): f = syncer.poll_for_frame() if playback_status is not None: countdown = 50 # 5 seconds - while not f and playback_status != rs.playback_status.stopped: + while not f and playback_status != rs.playback_status.stopped: countdown -= 1 if countdown == 0: break - time.sleep( 0.1 ) + time.sleep(0.1) f = syncer.poll_for_frame() # NOTE: f will never be None if not f: - test.check( depth_frame is None, "expected a depth frame" ) - test.check( color_frame is None, "expected a color frame" ) + test.check(depth_frame is None, "expected a depth frame") + test.check(color_frame is None, "expected a color frame") return False - log.d( "Got", f ) + log.d("Got", f) - fs = rs.composite_frame( f ) + fs = rs.composite_frame(f) if fs: depth = fs.get_depth_frame() else: - depth = rs.depth_frame( f ) - test.info( "actual depth", depth ) - test.check_equal( depth_frame is None, not depth ) + depth = rs.depth_frame(f) + test.info("actual depth", depth) + test.check_equal(depth_frame is None, not depth) if depth_frame is not None and depth: - test.check_equal( depth.get_frame_number(), depth_frame ) - + test.check_equal(depth.get_frame_number(), depth_frame) + if fs: color = fs.get_color_frame() elif not depth: - color = rs.video_frame( f ) + color = rs.video_frame(f) else: color = None - test.info( "actual color", color ) - test.check_equal( color_frame is None, not color ) + test.info("actual color", color) + test.check_equal(color_frame is None, not color) if color_frame is not None and color: - test.check_equal( color.get_frame_number(), color_frame ) + test.check_equal(color.get_frame_number(), color_frame) if nothing_else: f = syncer.poll_for_frame() - test.info( "Expected nothing else; actual", f ) - test.check( not f ) + test.info("Expected nothing else; actual", f) + test.check(not f) return True -def expect_nothing(): - expect( nothing_else = True ) +def expect_nothing(): + expect(nothing_else=True) diff --git a/unit-tests/syncer/test-ts-desync.py b/unit-tests/syncer/test-ts-desync.py index 6d81c3f59d..87b99ea798 100644 --- a/unit-tests/syncer/test-ts-desync.py +++ b/unit-tests/syncer/test-ts-desync.py @@ -5,16 +5,15 @@ from rspy import log, test import sw - # The timestamp jumps are closely correlated to the FPS passed to the video streams: # syncer expects frames to arrive every 1000/FPS milliseconds! sw.fps_c = sw.fps_d = 30 -sw.init( syncer_matcher = rs.matchers.dic_c ) +sw.init(syncer_matcher=rs.matchers.dic_c) sw.start() ############################################################################################# # -test.start( "Init" ) +test.start("Init") # It can take a few frames for the syncer to actually produce a matched frameset (it doesn't # know what to match to in the beginning) @@ -24,9 +23,9 @@ # 0 @0 so next expected frame timestamp is at 0+16.67 # 0 @0 # -sw.generate_depth_and_color( frame_number = 0, timestamp = 0 ) -sw.expect( depth_frame = 0 ) # syncer doesn't know about C yet, so releases right away -sw.expect( color_frame = 0, nothing_else = True ) # no hope for a match: D@0 is already out, so it's released +sw.generate_depth_and_color(frame_number=0, timestamp=0) +sw.expect(depth_frame=0) # syncer doesn't know about C yet, so releases right away +sw.expect(color_frame=0, nothing_else=True) # no hope for a match: D@0 is already out, so it's released # # The syncer now knows about both streams, and is empty -- that was what we wanted @@ -34,54 +33,54 @@ # ############################################################################################# # -test.start( "Go past Color's Next Expected; get a lone Depth frame" ) +test.start("Go past Color's Next Expected; get a lone Depth frame") # 1 @7952 -> NE=7985; it's released because WAY past C.NE # -sw.generate_depth_frame( 1, 7952 ) -sw.expect( depth_frame = 1, nothing_else = True ) +sw.generate_depth_frame(1, 7952) +sw.expect(depth_frame=1, nothing_else=True) test.finish() # ############################################################################################# # -test.start( "Generate a Color frame which will wait for Depth" ) +test.start("Generate a Color frame which will wait for Depth") # 2 @7978 will wait, as it's ~= D.NE # -sw.generate_color_frame( 2, 7978 ) +sw.generate_color_frame(2, 7978) sw.expect_nothing() test.finish() # ############################################################################################# # -test.start( "Generate Depth for release BEFORE the waiting Color" ) +test.start("Generate Depth for release BEFORE the waiting Color") # 3 @7952 -> needs to be released BEFORE C2!! # # NOTE: the timestamp is the SAME AS BEFORE! Imagine that, instead of a Depth frame, this was -# an Infrared: the matcher would be (TS: (TS: Depth Infra Confidence) Color). But we have no -# Infra or Confidence mechanism (in sw) so we just generate another D -- it should have the +# an Infrared: the matcher would be (TS: (TS: Depth Infra Confidence) Color). But we have no +# Infra or Confidence mechanism (in sw) so we just generate another D -- it should have the # same effect: # # NOTE: this used to crash (see LRS-289)! # -sw.generate_depth_frame( 3, 7952 ) -sw.expect( depth_frame = 3 ) +sw.generate_depth_frame(3, 7952) +sw.expect(depth_frame=3) test.finish() # ############################################################################################# # -test.start( "And only then get the Color when we generate a matching Depth" ) +test.start("And only then get the Color when we generate a matching Depth") sw.expect_nothing() # C is still waiting for D.NE! # 4 @7986 # -sw.generate_depth_frame( 4, 7986 ) -sw.expect( depth_frame = 4, color_frame = 2, nothing_else = True ) +sw.generate_depth_frame(4, 7986) +sw.expect(depth_frame=4, color_frame=2, nothing_else=True) test.finish() # diff --git a/unit-tests/syncer/test-ts-diff-fps.py b/unit-tests/syncer/test-ts-diff-fps.py index b518e2d46d..b6b1e344d0 100644 --- a/unit-tests/syncer/test-ts-diff-fps.py +++ b/unit-tests/syncer/test-ts-diff-fps.py @@ -1,22 +1,19 @@ # License: Apache 2.0. See LICENSE file in root directory. # Copyright(c) 2021 Intel Corporation. All Rights Reserved. -import pyrealsense2 as rs -from rspy import log, test import sw - +from rspy import log, test # The timestamp jumps are closely correlated to the FPS passed to the video streams: # syncer expects frames to arrive every 1000/FPS milliseconds! sw.fps_d = 100 -sw.fps_c = 10 +sw.fps_c = 10 sw.init() sw.start() - ############################################################################################# # -test.start( "Wait for framesets" ) +test.start("Wait for framesets") # It can take a few frames for the syncer to actually produce a matched frameset (it doesn't # know what to match to in the beginning) @@ -27,25 +24,25 @@ # 1 @10 # 0 @0 # -sw.generate_depth_frame( frame_number = 0, timestamp = sw.gap_d * 0 ) -sw.generate_depth_frame( 1, sw.gap_d * 1 ) # @10 -sw.generate_color_frame( 0, sw.gap_c * 0 ) # @0 -- small latency -sw.expect( depth_frame = 0 ) # syncer doesn't know about color yet -sw.expect( depth_frame = 1 ) -#expect( color_frame = 0, nothing_else = True ) +sw.generate_depth_frame(frame_number=0, timestamp=sw.gap_d * 0) +sw.generate_depth_frame(1, sw.gap_d * 1) # @10 +sw.generate_color_frame(0, sw.gap_c * 0) # @0 -- small latency +sw.expect(depth_frame=0) # syncer doesn't know about color yet +sw.expect(depth_frame=1) +# expect( color_frame = 0, nothing_else = True ) # We'd expect C0 to not wait for another frame, but it does: @0 is comparable to @20 (D.NE) # because it's using C.fps (10 fps -> 100 gap / 2 = 50ms error, so 0~=20)! sw.expect_nothing() # 2 @20 -sw.generate_depth_frame( 2, sw.gap_d * 2 ) -sw.expect( depth_frame = 2, color_frame = 0, nothing_else = True ) +sw.generate_depth_frame(2, sw.gap_d * 2) +sw.expect(depth_frame=2, color_frame=0, nothing_else=True) test.finish() # ############################################################################################# # -test.start( "Depth waits for next Color" ) +test.start("Depth waits for next Color") # 3 @ 30 # 4 @ 40 @@ -57,35 +54,42 @@ # 10 @100 -> wait # 11 @110 # -sw.generate_depth_frame( 3, sw.gap_d * 3 ); sw.expect( depth_frame = 3 ) -sw.generate_depth_frame( 4, sw.gap_d * 4 ); sw.expect( depth_frame = 4 ) -sw.generate_depth_frame( 5, sw.gap_d * 5 ); sw.expect( depth_frame = 5 ) -sw.generate_depth_frame( 6, sw.gap_d * 6 ); sw.expect( depth_frame = 6 ) -sw.generate_depth_frame( 7, sw.gap_d * 7 ); sw.expect( depth_frame = 7 ) -sw.generate_depth_frame( 8, sw.gap_d * 8 ); sw.expect( depth_frame = 8 ) -sw.generate_depth_frame( 9, sw.gap_d * 9 ); sw.expect( depth_frame = 9 ) -sw.generate_depth_frame( 10, sw.gap_d * 10 ); #sw.expect( depth_frame = 10 ) +sw.generate_depth_frame(3, sw.gap_d * 3) +sw.expect(depth_frame=3) +sw.generate_depth_frame(4, sw.gap_d * 4) +sw.expect(depth_frame=4) +sw.generate_depth_frame(5, sw.gap_d * 5) +sw.expect(depth_frame=5) +sw.generate_depth_frame(6, sw.gap_d * 6) +sw.expect(depth_frame=6) +sw.generate_depth_frame(7, sw.gap_d * 7) +sw.expect(depth_frame=7) +sw.generate_depth_frame(8, sw.gap_d * 8) +sw.expect(depth_frame=8) +sw.generate_depth_frame(9, sw.gap_d * 9) +sw.expect(depth_frame=9) +sw.generate_depth_frame(10, sw.gap_d * 10) # sw.expect( depth_frame = 10 ) # C.NE is @100, so it should wait... sw.expect_nothing() -sw.generate_depth_frame( 11, sw.gap_d * 11 ); +sw.generate_depth_frame(11, sw.gap_d * 11) sw.expect_nothing() # 1 @100 -> release (D10,C1) and (D11) -sw.generate_color_frame( 1, sw.gap_c * 1 ); # @100 -- small latency -sw.expect( depth_frame = 10, color_frame = 1 ) -sw.expect( depth_frame = 11 ) +sw.generate_color_frame(1, sw.gap_c * 1) # @100 -- small latency +sw.expect(depth_frame=10, color_frame=1) +sw.expect(depth_frame=11) sw.expect_nothing() # 12 @120 doesn't wait -sw.generate_depth_frame( 12, sw.gap_d * 12 ); -sw.expect( depth_frame = 12 ) +sw.generate_depth_frame(12, sw.gap_d * 12) +sw.expect(depth_frame=12) sw.expect_nothing() test.finish() # ############################################################################################# # -test.start( "Color is early" ) +test.start("Color is early") # 13 @130 # 14 @140 @@ -95,46 +99,52 @@ # 2 @200 -> wait # 18 @180 -> (D18,C2) ?! why not wait for (D20,C2) ?! # -sw.generate_depth_frame( 13, sw.gap_d * 13 ); sw.expect( depth_frame = 13 ) -sw.generate_depth_frame( 14, sw.gap_d * 14 ); sw.expect( depth_frame = 14 ) -sw.generate_depth_frame( 15, sw.gap_d * 15 ); sw.expect( depth_frame = 15 ) -sw.generate_depth_frame( 16, sw.gap_d * 16 ); sw.expect( depth_frame = 16 ) -sw.generate_depth_frame( 17, sw.gap_d * 17 ); sw.expect( depth_frame = 17 ) - -sw.generate_color_frame( 2, sw.gap_c * 2 ); sw.expect_nothing() -# We're waiting for D.NE @180, because it's comparable (using min fps of the two) +sw.generate_depth_frame(13, sw.gap_d * 13) +sw.expect(depth_frame=13) +sw.generate_depth_frame(14, sw.gap_d * 14) +sw.expect(depth_frame=14) +sw.generate_depth_frame(15, sw.gap_d * 15) +sw.expect(depth_frame=15) +sw.generate_depth_frame(16, sw.gap_d * 16) +sw.expect(depth_frame=16) +sw.generate_depth_frame(17, sw.gap_d * 17) +sw.expect(depth_frame=17) + +sw.generate_color_frame(2, sw.gap_c * 2) +sw.expect_nothing() +# We're waiting for D.NE @180, because it's comparable (using min fps of the two) -sw.generate_depth_frame( 18, sw.gap_d * 18 ) +sw.generate_depth_frame(18, sw.gap_d * 18) # Now we get both back: -sw.expect( depth_frame = 18, color_frame = 2, nothing_else = True ) +sw.expect(depth_frame=18, color_frame=2, nothing_else=True) # But wait... why match C@200 to D@180 and not wait for D@200?? # If we used the faster FPS of the two, 180!=200: we'd get D18, D19, then (D20,C20) -#expect( depth_frame = 18, nothing_else = True ) -#generate_depth_frame( 19, sw.gap_d * 19 ) -#expect( depth_frame = 19, nothing_else = True ) -#generate_depth_frame( 20, sw.gap_d * 20 ) -#expect( depth_frame = 20, color_frame = 2, nothing_else = True ) +# expect( depth_frame = 18, nothing_else = True ) +# generate_depth_frame( 19, sw.gap_d * 19 ) +# expect( depth_frame = 19, nothing_else = True ) +# generate_depth_frame( 20, sw.gap_d * 20 ) +# expect( depth_frame = 20, color_frame = 2, nothing_else = True ) test.finish() # ############################################################################################# # -test.start( "Stop depth" ) +test.start("Stop depth") -sw.generate_color_frame( 3, sw.gap_c * 3 ) # @300 +sw.generate_color_frame(3, sw.gap_c * 3) # @300 # D.NE is @190, plus 7*gap_d gives a cutout of @260, so the frame shouldn't wait for depth # to arrive, but it does: sw.expect_nothing() # The reason is that it's not using gap_d: it's using gap_c, so cutout is @890: -sw.generate_color_frame( 4, sw.gap_c * 8 ) +sw.generate_color_frame(4, sw.gap_c * 8) sw.expect_nothing() -sw.generate_color_frame( 5, sw.gap_c * 9 ) -sw.expect( color_frame = 3 ) -sw.expect( color_frame = 4 ) -sw.expect( color_frame = 5 ) +sw.generate_color_frame(5, sw.gap_c * 9) +sw.expect(color_frame=3) +sw.expect(color_frame=4) +sw.expect(color_frame=5) sw.expect_nothing() test.finish() diff --git a/unit-tests/syncer/test-ts-eof.py b/unit-tests/syncer/test-ts-eof.py index 41fbeaf6b9..6a6720408a 100644 --- a/unit-tests/syncer/test-ts-eof.py +++ b/unit-tests/syncer/test-ts-eof.py @@ -1,9 +1,12 @@ # License: Apache 2.0. See LICENSE file in root directory. # Copyright(c) 2021 Intel Corporation. All Rights Reserved. -import pyrealsense2 as rs -from rspy import log, test import sw +import sys +import tempfile +import os +import pyrealsense2 as rs +from rspy import log, test, repo # The timestamp jumps are closely correlated to the FPS passed to the video streams: @@ -11,16 +14,15 @@ sw.fps_c = sw.fps_d = 60 sw.init() -import tempfile, os -temp_dir = tempfile.TemporaryDirectory( prefix = 'recordings_' ) -filename = os.path.join( temp_dir.name, 'rec.bag' ) -recorder = rs.recorder( filename, sw.device ) +temp_dir = tempfile.TemporaryDirectory(prefix='recordings_') +filename = os.path.join(temp_dir.name, 'rec.bag') +recorder = rs.recorder(filename, sw.device) sw.start() ############################################################################################# # -test.start( "Init" ) +test.start("Init") # It can take a few frames for the syncer to actually produce a matched frameset (it doesn't # know what to match to in the beginning) @@ -30,9 +32,9 @@ # 0 @0 so next expected frame timestamp is at 0+16.67 # 0 @0 # -sw.generate_depth_and_color( frame_number = 0, timestamp = 0 ) -sw.expect( depth_frame = 0 ) # syncer doesn't know about C yet, so releases right away -sw.expect( color_frame = 0, nothing_else = True ) # no hope for a match: D@0 is already out, so it's released +sw.generate_depth_and_color(frame_number=0, timestamp=0) +sw.expect(depth_frame=0) # syncer doesn't know about C yet, so releases right away +sw.expect(color_frame=0, nothing_else=True) # no hope for a match: D@0 is already out, so it's released # # NOTE: if the syncer queue wasn't 100 (see above) then we'd only get the color frame! # (it will output D to the queue, then C to the queue, but the queue size is 1 so we lose D) @@ -40,32 +42,32 @@ # 1 @16 # 1 @16 # -sw.generate_depth_and_color( 1, sw.gap_d * 1 ) -sw.expect( depth_frame = 1, color_frame = 1, nothing_else = True ) # frameset 1 +sw.generate_depth_and_color(1, sw.gap_d * 1) +sw.expect(depth_frame=1, color_frame=1, nothing_else=True) # frameset 1 test.finish() # ############################################################################################# # -test.start( "Keep going" ) +test.start("Keep going") # 2 @33 # 2 @33 # -sw.generate_depth_and_color( 2, sw.gap_d * 2 ) -sw.expect( depth_frame = 2, color_frame = 2, nothing_else = True ) # frameset 2 +sw.generate_depth_and_color(2, sw.gap_d * 2) +sw.expect(depth_frame=2, color_frame=2, nothing_else=True) # frameset 2 test.finish() # ############################################################################################# # -test.start( "Stop giving color; nothing output" ) +test.start("Stop giving color; nothing output") # 3 @50 # -sw.generate_depth_frame( 3, sw.gap_d * 3 ) +sw.generate_depth_frame(3, sw.gap_d * 3) -# The depth frame will be kept in the syncer, and never make it out (no matching color frame +# The depth frame will be kept in the syncer, and never make it out (no matching color frame, # and we're not going to push additional frames that would cause it to eventually flush): # sw.expect_nothing() @@ -76,11 +78,11 @@ # ############################################################################################# # -test.start( "Dump the file" ) +test.start("Dump the file") recorder.pause() recorder = None # otherwise the file will be open when we exit -log.d( "filename=", filename ) +log.d("filename=", filename) sw.stop() sw.reset() # @@ -93,33 +95,34 @@ # [Color/1 #2 @33.333333] # [Depth/0 #3 @50.000000] <--- the frame that was "lost" # -from rspy import repo -rs_convert = repo.find_built_exe( 'tools/convert', 'rs-convert' ) + +rs_convert = repo.find_built_exe('tools/convert', 'rs-convert') if rs_convert: import subprocess - subprocess.run( [rs_convert, '-i', filename, '-T'], - stdout=None, - stderr=subprocess.STDOUT, - universal_newlines=True, - timeout=10, - check=False ) # don't fail on errors + + subprocess.run([rs_convert, '-i', filename, '-T'], + stdout=None, + stderr=subprocess.STDOUT, + universal_newlines=True, + timeout=10, + check=False) # don't fail on errors else: - log.w( 'no rs-convert was found!' ) - log.d( 'sys.path=\n' + '\n '.join( sys.path ) ) + log.w('no rs-convert was found!') + log.d('sys.path=\n' + '\n '.join(sys.path)) test.finish() # ############################################################################################# # -test.start( "Play it back, with syncer -- lose last frame" ) +test.start("Play it back, with syncer -- lose last frame") -sw.playback( filename ) +sw.playback(filename) sw.start() -sw.expect( depth_frame = 0 ) # syncer doesn't know about color yet -sw.expect( color_frame = 0 ) # less than next expected of D -sw.expect( depth_frame = 1, color_frame = 1 ) -sw.expect( depth_frame = 2, color_frame = 2 ) +sw.expect(depth_frame=0) # syncer doesn't know about color yet +sw.expect(color_frame=0) # less than next expected of D +sw.expect(depth_frame=1, color_frame=1) +sw.expect(depth_frame=2, color_frame=2) # We know there should be another frame in the file: # [Depth/0 #3 @50.000000] @@ -127,10 +130,10 @@ # know that we've reached the EOF. There is a flush when we reach the EOF, but not on the # syncer -- the playback device knows not that its client is a syncer! # -#sw.expect( depth_frame = 3 ) +# sw.expect( depth_frame = 3 ) sw.expect_nothing() # -# There is no API to flush the syncer, but it can easily be added. Or we can implement a +# There is no API to flush the syncer, but it can easily be added. Or we can implement a # special frame type, an "end-of-file frame", which would cause the syncer to flush... sw.stop() @@ -140,21 +143,21 @@ # ############################################################################################# # -test.start( "Play it back, without syncer -- and now expect the lost frame" ) +test.start("Play it back, without syncer -- and now expect the lost frame") -sw.playback( filename, use_syncer = False ) +sw.playback(filename, use_syncer=False) sw.start() -sw.expect( depth_frame = 0 ) # none of these is synced (no syncer) -sw.expect( color_frame = 0 ) -sw.expect( depth_frame = 1 ) -sw.expect( color_frame = 1 ) -sw.expect( depth_frame = 2 ) -sw.expect( color_frame = 2 ) +sw.expect(depth_frame=0) # none of these is synced (no syncer) +sw.expect(color_frame=0) +sw.expect(depth_frame=1) +sw.expect(color_frame=1) +sw.expect(depth_frame=2) +sw.expect(color_frame=2) # This line is the difference from the last test: # -sw.expect( depth_frame = 3 ) +sw.expect(depth_frame=3) sw.expect_nothing() sw.stop() diff --git a/unit-tests/syncer/test-ts-same-fps.py b/unit-tests/syncer/test-ts-same-fps.py index 4e73c0b039..b1415ee2c6 100644 --- a/unit-tests/syncer/test-ts-same-fps.py +++ b/unit-tests/syncer/test-ts-same-fps.py @@ -1,54 +1,51 @@ # License: Apache 2.0. See LICENSE file in root directory. # Copyright(c) 2021 Intel Corporation. All Rights Reserved. -import pyrealsense2 as rs from rspy import log, test import sw - # The timestamp jumps are closely correlated to the FPS passed to the video streams: # syncer expects frames to arrive every 1000/FPS milliseconds! sw.fps_c = sw.fps_d = 60 sw.init() sw.start() - ############################################################################################# # -test.start( "Wait for framesets" ) +test.start("Wait for framesets") # It can take a few frames for the syncer to actually produce a matched frameset (it doesn't # know what to match to in the beginning) -sw.generate_depth_and_color( frame_number = 0, timestamp = 0 ) -sw.expect( depth_frame = 0 ) # syncer doesn't know about color yet -sw.expect( color_frame = 0, nothing_else = True ) # less than next expected of D +sw.generate_depth_and_color(frame_number=0, timestamp=0) +sw.expect(depth_frame=0) # syncer doesn't know about color yet +sw.expect(color_frame=0, nothing_else=True) # less than next expected of D # # NOTE: if the syncer queue wasn't 100 (see above) then we'd only get the color frame! # -sw.generate_depth_and_color( 1, sw.gap_d * 1 ) -sw.expect( depth_frame = 1, color_frame = 1, nothing_else = True ) # frameset 1 +sw.generate_depth_and_color(1, sw.gap_d * 1) +sw.expect(depth_frame=1, color_frame=1, nothing_else=True) # frameset 1 test.finish() # ############################################################################################# # -test.start( "Keep going" ) +test.start("Keep going") -sw.generate_depth_and_color( 2, sw.gap_d * 2 ) -sw.expect( depth_frame = 2, color_frame = 2, nothing_else = True ) # frameset 2 -sw.generate_depth_and_color( 3, sw.gap_d * 3 ) -sw.generate_depth_and_color( 4, sw.gap_d * 4 ) -sw.expect( depth_frame = 3, color_frame = 3 ) # frameset 3 -sw.expect( depth_frame = 4, color_frame = 4, nothing_else = True ) # frameset 4 +sw.generate_depth_and_color(2, sw.gap_d * 2) +sw.expect(depth_frame=2, color_frame=2, nothing_else=True) # frameset 2 +sw.generate_depth_and_color(3, sw.gap_d * 3) +sw.generate_depth_and_color(4, sw.gap_d * 4) +sw.expect(depth_frame=3, color_frame=3) # frameset 3 +sw.expect(depth_frame=4, color_frame=4, nothing_else=True) # frameset 4 test.finish() # ############################################################################################# # -test.start( "Stop giving color; wait_for_frames() should throw (after 5s)" ) +test.start("Stop giving color; wait_for_frames() should throw (after 5s)") -sw.generate_depth_frame( 5, sw.gap_d * 5 ) +sw.generate_depth_frame(5, sw.gap_d * 5) # We expect the syncer to try and wait for a Color frame to fit the Depth we just gave it. # The syncer relies on frame inputs to actually run -- and, if we wait for a frame, we're @@ -56,16 +53,16 @@ try: fs = sw.syncer.wait_for_frames() except RuntimeError as e: - test.check_exception( e, RuntimeError, "Frame did not arrive in time!" ) + test.check_exception(e, RuntimeError, "Frame did not arrive in time!") else: - test.info( "Unexpected frameset", fs ) + test.info("Unexpected frameset", fs) test.unreachable() test.finish() # ############################################################################################# # -test.start( "try_wait_for_frames() allows us to keep feeding frames; eventually we get one" ) +test.start("try_wait_for_frames() allows us to keep feeding frames; eventually we get one") # The syncer will not immediately release framesets because it's still waiting for a color frame to # match Depth #10. But, if we advance the timestamp sufficiently, it should eventually release it @@ -74,18 +71,18 @@ # The last color frame we sent was at gap*4, so it's next expected at gap*5 plus a buffer of gap*7 # (see the code in the syncer)... so we expect depth frame 5 to be released only when we're past gap*12 -sw.generate_depth_frame( 6, sw.gap_d * 6 ) -sw.expect( nothing_else = True ) -sw.generate_depth_frame( 7, sw.gap_d * 11.5 ) -sw.expect( nothing_else = True ) -sw.generate_depth_frame( 8, sw.gap_d * 12.5 ) +sw.generate_depth_frame(6, sw.gap_d * 6) +sw.expect(nothing_else=True) +sw.generate_depth_frame(7, sw.gap_d * 11.5) +sw.expect(nothing_else=True) +sw.generate_depth_frame(8, sw.gap_d * 12.5) # # We'll get all frames in a burst. Again, if the syncer queue was 1, we'd only get the last! # -sw.expect( depth_frame = 5 ) -sw.expect( depth_frame = 6 ) -sw.expect( depth_frame = 7 ) -sw.expect( depth_frame = 8, nothing_else = True ) +sw.expect(depth_frame=5) +sw.expect(depth_frame=6) +sw.expect(depth_frame=7) +sw.expect(depth_frame=8, nothing_else=True) test.finish() # diff --git a/unit-tests/test-fw-update.py b/unit-tests/test-fw-update.py index 3bd8a39503..f054207749 100644 --- a/unit-tests/test-fw-update.py +++ b/unit-tests/test-fw-update.py @@ -2,79 +2,79 @@ # Copyright(c) 2021 Intel Corporation. All Rights Reserved. # we want this test to run first so that all tests run with updated FW versions, so we give it priority 0 -#test:priority 0 -#test:device each(L500*) -#test:device each(D400*) - -import pyrealsense2 as rs, sys, os, subprocess +# test:priority 0 +# test:device each(L500*) +# test:device each(D400*) + +import sys +import os +import subprocess +import re +import platform +import pyrealsense2 as rs from rspy import devices, log, test, file, repo -import re, platform if not devices.acroname: - log.i( "No Acroname library found; skipping device FW update" ) + log.i("No Acroname library found; skipping device FW update") sys.exit(0) # Following will throw if no acroname module is found from rspy import acroname + try: devices.acroname.discover() except acroname.NoneFoundError as e: - log.f( e ) + log.f(e) # Remove acroname -- we're likely running inside run-unit-tests in which case the # acroname hub is likely already connected-to from there and we'll get an error # thrown ('failed to connect to acroname (result=11)'). We do not need it -- just # needed to verify it is available above... devices.acroname = None -def send_hardware_monitor_command( device, command ): - command_input = [] # array of uint_8t - - # Parsing the command to array of unsigned integers(size should be < 8bits) - # threw out spaces - command = command.lower() - command = command.replace(" ", "") - current_uint8_t_string = '' - for i in range(0, len(command)): - current_uint8_t_string += command[i] - if len(current_uint8_t_string) >= 2: - command_input.append(int('0x' + current_uint8_t_string, 0)) - current_uint8_t_string = '' - if current_uint8_t_string != '': - command_input.append(int('0x' + current_uint8_t_string, 0)) +def send_hardware_monitor_command(device, command): # byte_index = -1 - raw_result = rs.debug_protocol( device ).send_and_receive_raw_data( command_input ) + raw_result = rs.debug_protocol(device).send_and_receive_raw_data(command) return raw_result[4:] -def get_update_counter( device ): - product_line = device.get_info( rs.camera_info.product_line ) - cmd = None + +def get_update_counter(device): + product_line = device.get_info(rs.camera_info.product_line) + opcode = 0x09 + start_index = 0x30 + size = None if product_line == "L500": - cmd = "14 00 AB CD 09 00 00 00 30 00 00 00 01 00 00 00 00 00 00 00 00 00 00 00" + size = 0x1 elif product_line == "D400": - cmd = "14 00 AB CD 09 00 00 00 30 00 00 00 02 00 00 00 00 00 00 00 00 00 00 00" + size = 0x2 else: - log.f( "Incompatible product line:", product_line ) + log.f("Incompatible product line:", product_line) - counter = send_hardware_monitor_command( device, cmd ) + raw_cmd = rs.debug_protocol(device).build_raw_data(opcode, start_index, size) + counter = send_hardware_monitor_command(device, raw_cmd) return counter[0] -def reset_update_counter( device ): - product_line = device.get_info( rs.camera_info.product_line ) - cmd = None + +def reset_update_counter(device): + product_line = device.get_info(rs.camera_info.product_line) if product_line == "L500": - cmd = "14 00 AB CD 0A 00 00 00 30 00 00 00 01 00 00 00 00 00 00 00 00 00 00 00 00" + opcode = 0x09 + start_index = 0x30 + size = 0x01 + raw_cmd = rs.debug_protocol(device).build_raw_data(opcode, start_index, size) elif product_line == "D400": - cmd = "14 00 AB CD 86 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00" + opcode = 0x86 + raw_cmd = rs.debug_protocol(device).build_raw_data(opcode) else: - log.f( "Incompatible product line:", product_line ) + log.f("Incompatible product line:", product_line) - send_hardware_monitor_command( device, cmd ) + send_hardware_monitor_command(device, raw_cmd) -def find_image_or_exit( product_name, fw_version_regex = r'(\d+\.){3}(\d+)' ): + +def find_image_or_exit(product_name, fw_version_regex=r'(\d+\.){3}(\d+)'): """ Searches for a FW image file for the given camera name and optional version. If none are found, exits with an error! @@ -84,10 +84,10 @@ def find_image_or_exit( product_name, fw_version_regex = r'(\d+\.){3}(\d+)' ): :return: the image file corresponding to product_name and fw_version if exist, otherwise exit """ - pattern = re.compile( r'^Intel RealSense (((\S+?)(\d+))(\S*))' ) - match = pattern.search( product_name ) + pattern = re.compile(r'^Intel RealSense (((\S+?)(\d+))(\S*))') + match = pattern.search(product_name) if not match: - raise RuntimeError( "Failed to parse product name '" + product_name + "'" ) + raise RuntimeError("Failed to parse product name '" + product_name + "'") # For a product 'PR567abc', we want to search, in order, these combinations: # PR567abc @@ -98,19 +98,20 @@ def find_image_or_exit( product_name, fw_version_regex = r'(\d+\.){3}(\d+)' ): # PR3XX # Each of the above, combined with the FW version, should yield an image name like: # PR567aXX_FW_Image-.bin - suffix = 5 # the suffix + suffix = 5 # the suffix for j in range(1, 3): # with suffix, then without start_index, end_index = match.span(j) for i in range(0, len(match.group(suffix))): - pn = product_name[start_index:end_index-i] - image_name = '(^|/)' + pn + i*'X' + "_FW_Image-" + fw_version_regex + r'\.bin$' + pn = product_name[start_index:end_index - i] + image_name = '(^|/)' + pn + i * 'X' + "_FW_Image-" + fw_version_regex + r'\.bin$' for image in file.find(repo.root, image_name): - return os.path.join( repo.root, image ) + return os.path.join(repo.root, image) suffix -= 1 # # If we get here, we didn't find any image... global product_line - log.f( "Could not find image file for", product_line ) + log.f("Could not find image file for", product_line) + # find the update tool exe fw_updater_exe = None @@ -118,105 +119,107 @@ def find_image_or_exit( product_name, fw_version_regex = r'(\d+\.){3}(\d+)' ): if platform.system() == 'Windows': fw_updater_exe_regex += r'\.exe' fw_updater_exe_regex += '$' -for tool in file.find( repo.build, fw_updater_exe_regex ): - fw_updater_exe = os.path.join( repo.build, tool ) +for tool in file.find(repo.build, fw_updater_exe_regex): + fw_updater_exe = os.path.join(repo.build, tool) if not fw_updater_exe: - log.f( "Could not find the update tool file (rs-fw-update.exe)" ) + log.f("Could not find the update tool file (rs-fw-update.exe)") -devices.query( monitor_changes = False ) +devices.query(monitor_changes=False) sn_list = devices.all() # acroname should ensure there is always 1 available device -if len( sn_list ) != 1: - log.f( "Expected 1 device, got", len( sn_list ) ) -device = devices.get_first( sn_list ).handle -log.d( 'found:', device ) -product_line = device.get_info( rs.camera_info.product_line ) -product_name = device.get_info( rs.camera_info.name ) -log.d( 'product line:', product_line ) +if len(sn_list) != 1: + log.f("Expected 1 device, got", len(sn_list)) +device = devices.get_first(sn_list).handle +log.d('found:', device) +product_line = device.get_info(rs.camera_info.product_line) +product_name = device.get_info(rs.camera_info.name) +log.d('product line:', product_line) ############################################################################### # -test.start( "Update FW" ) +test.start("Update FW") # check if recovery. If so recover recovered = False if device.is_update_device(): - log.d( "recovering device ..." ) + log.d("recovering device ...") try: # TODO: this needs to improve for L535 - image_file = find_image_or_exit( product_name ) + image_file = find_image_or_exit(product_name) cmd = [fw_updater_exe, '-r', '-f', image_file] - log.d( 'running:', cmd ) - subprocess.run( cmd ) + log.d('running:', cmd) + subprocess.run(cmd) recovered = True except Exception as e: test.unexpected_exception() - log.f( "Unexpected error while trying to recover device:", e ) + log.f("Unexpected error while trying to recover device:", e) else: - devices.query( monitor_changes = False ) - device = devices.get_first( devices.all() ).handle + devices.query(monitor_changes=False) + device = devices.get_first(devices.all()).handle -current_fw_version = repo.pretty_fw_version( device.get_info( rs.camera_info.firmware_version )) -log.d( 'FW version:', current_fw_version ) -bundled_fw_version = repo.pretty_fw_version( device.get_info( rs.camera_info.recommended_firmware_version ) ) -log.d( 'bundled FW version:', bundled_fw_version ) +current_fw_version = repo.pretty_fw_version(device.get_info(rs.camera_info.firmware_version)) +log.d('FW version:', current_fw_version) +bundled_fw_version = repo.pretty_fw_version(device.get_info(rs.camera_info.recommended_firmware_version)) +log.d('bundled FW version:', bundled_fw_version) -def compare_fw_versions( v1, v2 ): + +def compare_fw_versions(v1, v2): """ :param v1: left FW version :param v2: right FW version :return: 1 if v1 > v2; -1 is v1 < v2; 0 if they're equal """ - v1_list = v1.split( '.' ) - v2_list = v2.split( '.' ) + v1_list = v1.split('.') + v2_list = v2.split('.') if len(v1_list) != 4: - raise RuntimeError( "FW version (left) '" + v1 + "' is invalid" ) + raise RuntimeError("FW version (left) '" + v1 + "' is invalid") if len(v2_list) != 4: - raise RuntimeError( "FW version (right) '" + v2 + "' is invalid" ) - for n1, n2 in zip( v1_list, v2_list ): + raise RuntimeError("FW version (right) '" + v2 + "' is invalid") + for n1, n2 in zip(v1_list, v2_list): if int(n1) > int(n2): return 1 if int(n1) < int(n2): return -1 return 0 -if compare_fw_versions( current_fw_version, bundled_fw_version ) == 0: + +if compare_fw_versions(current_fw_version, bundled_fw_version) == 0: # Current is same as bundled if recovered or test.context != 'nightly': # In nightly, we always update; otherwise we try to save time, so do not do anything! - log.d( 'versions are same; skipping FW update' ) + log.d('versions are same; skipping FW update') test.finish() test.print_results_and_exit() else: # It is expected that, post-recovery, the FW versions will be the same - test.check( not recovered, abort_if_failed = True ) + test.check(not recovered, abort_if_failed=True) -update_counter = get_update_counter( device ) -log.d( 'update counter:', update_counter ) +update_counter = get_update_counter(device) +log.d('update counter:', update_counter) if update_counter >= 19: - log.d( 'resetting update counter' ) - reset_update_counter( device ) + log.d('resetting update counter') + reset_update_counter(device) update_counter = 0 -image_file = find_image_or_exit(product_name, re.escape( bundled_fw_version )) +image_file = find_image_or_exit(product_name, re.escape(bundled_fw_version)) # finding file containing image for FW update cmd = [fw_updater_exe, '-f', image_file] -log.d( 'running:', cmd ) +log.d('running:', cmd) sys.stdout.flush() -subprocess.run( cmd ) # may throw +subprocess.run(cmd) # may throw # make sure update worked -devices.query( monitor_changes = False ) +devices.query(monitor_changes=False) sn_list = devices.all() -device = devices.get_first( sn_list ).handle -current_fw_version = repo.pretty_fw_version( device.get_info( rs.camera_info.firmware_version )) -test.check_equal( current_fw_version, bundled_fw_version ) -new_update_counter = get_update_counter( device ) +device = devices.get_first(sn_list).handle +current_fw_version = repo.pretty_fw_version(device.get_info(rs.camera_info.firmware_version)) +test.check_equal(current_fw_version, bundled_fw_version) +new_update_counter = get_update_counter(device) # According to FW: "update counter zeros if you load newer FW than (ever) before" if new_update_counter > 0: - test.check_equal( new_update_counter, update_counter + 1 ) + test.check_equal(new_update_counter, update_counter + 1) test.finish() # diff --git a/unit-tests/unit-test-config.py b/unit-tests/unit-test-config.py index 9694be0126..52e7204f1a 100644 --- a/unit-tests/unit-test-config.py +++ b/unit-tests/unit-test-config.py @@ -14,30 +14,36 @@ # process and so individual tests cannot affect others except through hardware. # -import sys, os, subprocess, locale, re, getopt +import sys +import os +import re +import getopt from glob import glob -current_dir = os.path.dirname( os.path.abspath( __file__ ) ) -sys.path.append( current_dir + os.sep + "py" ) +current_dir = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(current_dir + os.sep + "py") from rspy import file, repo, libci, log + def usage(): ourname = os.path.basename(sys.argv[0]) - print( 'Syntax: ' + ourname + ' [options] ' ) - print( ' build unit-testing framework for the tree in $dir' ) - print( ' -r, --regex configure all tests that fit the following regular expression' ) - print( ' -t, --tag configure all tests with the following tag. If used multiple times runs all tests matching' ) - print( ' all tags. e.g. -t tag1 -t tag2 will run tests who have both tag1 and tag2' ) - print( ' tests automatically get tagged with \'exe\' or \'py\' and based on their location' ) - print( ' inside unit-tests/, e.g. unit-tests/func/test-hdr.py gets [func, py]' ) - print( ' --list-tags print out all available tags. This option will not run any tests' ) - print( ' --list-tests print out all available tests. This option will not run any tests' ) - print( ' if both list-tags and list-tests are specified each test will be printed along' ) - print( ' with what tags it has' ) - print( ' --context The context to use for test configuration' ) + print('Syntax: ' + ourname + ' [options] ') + print(' build unit-testing framework for the tree in $dir') + print(' -r, --regex configure all tests that fit the following regular expression') + print( + ' -t, --tag configure all tests with the following tag. If used multiple times runs all tests matching') + print(' all tags. e.g. -t tag1 -t tag2 will run tests who have both tag1 and tag2') + print(' tests automatically get tagged with \'exe\' or \'py\' and based on their location') + print(' inside unit-tests/, e.g. unit-tests/func/test-hdr.py gets [func, py]') + print(' --list-tags print out all available tags. This option will not run any tests') + print(' --list-tests print out all available tests. This option will not run any tests') + print(' if both list-tags and list-tests are specified each test will be printed along') + print(' with what tags it has') + print(' --context The context to use for test configuration') sys.exit(2) + regex = None required_tags = [] list_tags = False @@ -45,10 +51,10 @@ def usage(): context = None # parse command-line: try: - opts, args = getopt.getopt( sys.argv[1:], 'hr:t:', - longopts=['help', 'regex=', 'tag=', 'list-tags', 'list-tests', 'context='] ) + opts, args = getopt.getopt(sys.argv[1:], 'hr:t:', + longopts=['help', 'regex=', 'tag=', 'list-tags', 'list-tests', 'context=']) except getopt.GetoptError as err: - log.e( err ) # something like "option -a not recognized" + log.e(err) # something like "option -a not recognized" usage() for opt, arg in opts: if opt in ('-h', '--help'): @@ -56,7 +62,7 @@ def usage(): elif opt in ('-r', '--regex'): regex = arg elif opt in ('-t', '--tag'): - required_tags.append( arg ) + required_tags.append(arg) elif opt == '--list-tags': list_tags = True elif opt == '--list-tests': @@ -64,23 +70,24 @@ def usage(): elif opt == '--context': context = arg -if len( args ) != 2: +if len(args) != 2: usage() -dir=args[0] -builddir=args[1] -if not os.path.isdir( dir ) or not os.path.isdir( builddir ): +dir = args[0] +builddir = args[1] +if not os.path.isdir(dir) or not os.path.isdir(builddir): usage() # We have to stick to Unix conventions because CMake on Windows is fubar... -root = repo.root.replace( '\\' , '/' ) +root = repo.root.replace('\\', '/') src = root + '/src' -def generate_cmake( builddir, testdir, testname, filelist, custom_main ): + +def generate_cmake(builddir, testdir, testname, filelist, custom_main): makefile = builddir + '/' + testdir + '/CMakeLists.txt' - log.d( ' creating:', makefile ) - handle = open( makefile, 'w' ) - filelist = '\n '.join( filelist ) - handle.write( ''' + log.d(' creating:', makefile) + handle = open(makefile, 'w') + filelist = '\n '.join(filelist) + handle.write(''' # This file is automatically generated!! # Do not modify or your changes will be lost! @@ -90,23 +97,23 @@ def generate_cmake( builddir, testdir, testname, filelist, custom_main ): set( SRC_FILES ''' + filelist + ''' ) add_executable( ''' + testname + ''' ${SRC_FILES} ) -source_group( "Common Files" FILES ${ELPP_FILES} ${CATCH_FILES} ''' + dir + '''/test.cpp''' ) +source_group( "Common Files" FILES ${ELPP_FILES} ${CATCH_FILES} ''' + dir + '''/test.cpp''') if not custom_main: - handle.write( ' ' + dir + '/unit-test-default-main.cpp' ) - handle.write( ''' ) + handle.write(' ' + dir + '/unit-test-default-main.cpp') + handle.write(''' ) set_property(TARGET ''' + testname + ''' PROPERTY CXX_STANDARD 11) target_link_libraries( ''' + testname + ''' ${DEPENDENCIES}) -set_target_properties( ''' + testname + ''' PROPERTIES FOLDER "Unit-Tests/''' + os.path.dirname( testdir ) + '''" ) +set_target_properties( ''' + testname + ''' PROPERTIES FOLDER "Unit-Tests/''' + os.path.dirname(testdir) + '''" ) # Add the repo root directory (so includes into src/ will be specific: ) target_include_directories(''' + testname + ''' PRIVATE ''' + root + ''') -''' ) +''') handle.close() -def find_include( include, relative_to ): +def find_include(include, relative_to): """ Try to match the include to an existing file. @@ -115,29 +122,31 @@ def find_include( include, relative_to ): :return: the normalized & absolute file path, if found -- otherwise, None """ if include: - if not os.path.isabs( include ): - include = os.path.normpath( relative_to + '/' + include ) - include = include.replace( '\\', '/' ) - if os.path.exists( include ): + if not os.path.isabs(include): + include = os.path.normpath(relative_to + '/' + include) + include = include.replace('\\', '/') + if os.path.exists(include): return include standard_include_dirs = [ - os.path.join( root, 'include' ), + os.path.join(root, 'include'), root - ] -def find_include_in_dirs( include ): +] + + +def find_include_in_dirs(include): """ Search for the given include in all the standard include directories """ global include_dirs for include_dir in standard_include_dirs: - path = find_include( include, include_dir ) + path = find_include(include, include_dir) if path: return path -def find_includes( filepath, filelist = set() ): +def find_includes(filepath, filelist=set()): """ Recursively searches a .cpp file for #include directives and returns a set of all of them. @@ -146,140 +155,146 @@ def find_includes( filepath, filelist = set() ): filedir = os.path.dirname(filepath) try: log.debug_indent() - for include_line in file.grep( r'^\s*#\s*include\s+("(.*)"|<(.*)>)\s*$', filepath ): + for include_line in file.grep(r'^\s*#\s*include\s+("(.*)"|<(.*)>)\s*$', filepath): m = include_line['match'] index = include_line['index'] - include = find_include( m.group(2), filedir ) or find_include_in_dirs( m.group(2) ) or find_include_in_dirs( m.group(3) ) + include = find_include(m.group(2), filedir) or find_include_in_dirs(m.group(2)) or find_include_in_dirs( + m.group(3)) if include: if include in filelist: - log.d( m.group(0), '->', include, '(already processed)' ) + log.d(m.group(0), '->', include, '(already processed)') else: - log.d( m.group(0), '->', include ) - filelist.add( include ) - filelist = find_includes( include, filelist ) + log.d(m.group(0), '->', include) + filelist.add(include) + filelist = find_includes(include, filelist) else: - log.d( 'not found:', m.group(0) ) + log.d('not found:', m.group(0)) finally: log.debug_unindent() return filelist -def process_cpp( dir, builddir ): + +def process_cpp(dir, builddir): global regex, required_tags, list_only, available_tags, tests_and_tags found = [] shareds = [] statics = [] if regex: - pattern = re.compile( regex ) - log.d( 'looking for C++ files in:', dir ) - for f in file.find( dir, '(^|/)test-.*\.cpp$' ): - testdir = os.path.splitext( f )[0] # "log/internal/test-all" <- "log/internal/test-all.cpp" - testparent = os.path.dirname(testdir) # "log/internal" + pattern = re.compile(regex) + log.d('looking for C++ files in:', dir) + for f in file.find(dir, '(^|/)test-.*\.cpp$'): + testdir = os.path.splitext(f)[0] # "log/internal/test-all" <- "log/internal/test-all.cpp" + testparent = os.path.dirname(testdir) # "log/internal" # We need the project name unique: keep the path but make it nicer: if testparent: - testname = 'test-' + testparent.replace( '/', '-' ) + '-' + os.path.basename( testdir )[ - 5:] # "test-log-internal-all" + testname = 'test-' + testparent.replace('/', '-') + '-' + os.path.basename(testdir)[ + 5:] # "test-log-internal-all" else: testname = testdir # no parent folder so we get "test-all" - if regex and not pattern.search( testname ): + if regex and not pattern.search(testname): continue - log.d( '... found:', f ) + log.d('... found:', f) log.debug_indent() try: if required_tags or list_tags: - config = libci.TestConfigFromCpp( dir + os.sep + f, context ) - if not all( tag in config.tags for tag in required_tags ): + config = libci.TestConfigFromCpp(dir + os.sep + f, context) + if not all(tag in config.tags for tag in required_tags): continue - available_tags.update( config.tags ) + available_tags.update(config.tags) if list_tests: - tests_and_tags[ testname ] = config.tags + tests_and_tags[testname] = config.tags if testname not in tests_and_tags: tests_and_tags[testname] = None # Build the list of files we want in the project: # At a minimum, we have the original file, plus any common files - filelist = [ dir + '/' + f, '${ELPP_FILES}', '${CATCH_FILES}' ] + filelist = [dir + '/' + f, '${ELPP_FILES}', '${CATCH_FILES}'] # Add any "" includes specified in the .cpp that we can find - includes = find_includes( dir + '/' + f ) + includes = find_includes(dir + '/' + f) # Add any files explicitly listed in the .cpp itself, like this: # //#cmake:add-file # Any files listed are relative to $dir shared = False static = False custom_main = False - for cmake_directive in file.grep( '^//#cmake:\s*', dir + '/' + f ): + for cmake_directive in file.grep('^//#cmake:\s*', dir + '/' + f): m = cmake_directive['match'] index = cmake_directive['index'] cmd, *rest = cmake_directive['line'][m.end():].split() if cmd == 'add-file': for additional_file in rest: files = additional_file - if not os.path.isabs( additional_file ): + if not os.path.isabs(additional_file): files = dir + '/' + testparent + '/' + additional_file - files = glob( files ) + files = glob(files) if not files: - log.e( f + '+' + str(index) + ': no files match "' + additional_file + '"' ) + log.e(f + '+' + str(index) + ': no files match "' + additional_file + '"') for abs_file in files: - abs_file = os.path.normpath( abs_file ) - abs_file = abs_file.replace( '\\', '/' ) - if not os.path.exists( abs_file ): - log.e( f + '+' + str(index) + ': file not found "' + additional_file + '"' ) - log.d( 'add file:', abs_file ) - filelist.append( abs_file ) - if( os.path.splitext( abs_file )[0] == 'cpp' ): + abs_file = os.path.normpath(abs_file) + abs_file = abs_file.replace('\\', '/') + if not os.path.exists(abs_file): + log.e(f + '+' + str(index) + ': file not found "' + additional_file + '"') + log.d('add file:', abs_file) + filelist.append(abs_file) + if (os.path.splitext(abs_file)[0] == 'cpp'): # Add any "" includes specified in the .cpp that we can find - includes |= find_includes( abs_file ) + includes |= find_includes(abs_file) elif cmd == 'static!': if len(rest): - log.e( f + '+' + str(index) + ': unexpected arguments past \'' + cmd + '\'' ) + log.e(f + '+' + str(index) + ': unexpected arguments past \'' + cmd + '\'') elif shared: - log.e( f + '+' + str(index) + ': \'' + cmd + '\' mutually exclusive with \'shared!\'' ) + log.e(f + '+' + str(index) + ': \'' + cmd + '\' mutually exclusive with \'shared!\'') else: - log.d( 'static!' ) + log.d('static!') static = True elif cmd == 'shared!': if len(rest): - log.e( f + '+' + str(index) + ': unexpected arguments past \'' + cmd + '\'' ) + log.e(f + '+' + str(index) + ': unexpected arguments past \'' + cmd + '\'') elif static: - log.e( f + '+' + str(index) + ': \'' + cmd + '\' mutually exclusive with \'static!\'' ) + log.e(f + '+' + str(index) + ': \'' + cmd + '\' mutually exclusive with \'static!\'') else: - log.d( 'shared!' ) + log.d('shared!') shared = True elif cmd == 'custom-main': custom_main = True else: - log.e( f + '+' + str(index) + ': unknown cmd \'' + cmd + '\' (should be \'add-file\', \'static!\', or \'shared!\')' ) + log.e(f + '+' + str( + index) + ': unknown cmd \'' + cmd + '\' (should be \'add-file\', \'static!\', or \'shared!\')') for include in includes: - filelist.append( include ) + filelist.append(include) # all tests use the common test.cpp file - filelist.append( root + "/unit-tests/test.cpp" ) + filelist.append(root + "/unit-tests/test.cpp") # 'cmake:custom-main' indicates that the test is defining its own main() function. # If not specified we use a default main() which lives in its own .cpp: if not custom_main: - filelist.append( root + "/unit-tests/unit-test-default-main.cpp" ) + filelist.append(root + "/unit-tests/unit-test-default-main.cpp") if list_only: continue # Each CMakeLists.txt sits in its own directory - os.makedirs( builddir + '/' + testdir, exist_ok=True ) # "build/log/internal/test-all" - generate_cmake( builddir, testdir, testname, filelist, custom_main ) + os.makedirs(builddir + '/' + testdir, exist_ok=True) # "build/log/internal/test-all" + generate_cmake(builddir, testdir, testname, filelist, custom_main) if static: - statics.append( testdir ) + statics.append(testdir) elif shared: - shareds.append( testdir ) + shareds.append(testdir) else: - found.append( testdir ) + found.append(testdir) finally: log.debug_unindent() return found, shareds, statics -def process_py( dir, builddir ): + + +def process_py(dir, builddir): # TODO - return [],[],[] + return [], [], [] + list_only = list_tags or list_tests available_tags = set() @@ -287,39 +302,39 @@ def process_py( dir, builddir ): normal_tests = [] shared_tests = [] static_tests = [] -n,sh,st = process_cpp( dir, builddir ) +n, sh, st = process_cpp(dir, builddir) if list_only: if list_tags and list_tests: - for t in sorted( tests_and_tags.keys() ): - print( t, "has tags:", ' '.join( tests_and_tags[t] ) ) + for t in sorted(tests_and_tags.keys()): + print(t, "has tags:", ' '.join(tests_and_tags[t])) # elif list_tags: - for t in sorted( list( available_tags ) ): - print( t ) + for t in sorted(list(available_tags)): + print(t) # elif list_tests: - for t in sorted( tests_and_tags.keys() ): - print( t ) - sys.exit( 0 ) - -normal_tests.extend( n ) -shared_tests.extend( sh ) -static_tests.extend( st ) -n,sh,st = process_py( dir, builddir ) -normal_tests.extend( n ) -shared_tests.extend( sh ) -static_tests.extend( st ) + for t in sorted(tests_and_tags.keys()): + print(t) + sys.exit(0) + +normal_tests.extend(n) +shared_tests.extend(sh) +static_tests.extend(st) +n, sh, st = process_py(dir, builddir) +normal_tests.extend(n) +shared_tests.extend(sh) +static_tests.extend(st) cmakefile = builddir + '/CMakeLists.txt' -name = os.path.basename( os.path.realpath( dir )) -log.d( 'Creating "' + name + '" project in', cmakefile ) +name = os.path.basename(os.path.realpath(dir)) +log.d('Creating "' + name + '" project in', cmakefile) -handle = open( cmakefile, 'w' ) -handle.write( ''' +handle = open(cmakefile, 'w') +handle.write(''' # We make use of ELPP (EasyLogging++): -include_directories( ''' + dir + '''/../third-party/easyloggingpp/src ) +include_directories( ''' + dir + '''/../third-party/easyloggingpp/src ) set( ELPP_FILES ''' + dir + '''/../third-party/easyloggingpp/src/easylogging++.cc ''' + dir + '''/../third-party/easyloggingpp/src/easylogging++.h @@ -328,35 +343,36 @@ def process_py( dir, builddir ): ''' + dir + '''/catch/catch.hpp ) -''' ) +''') n_tests = 0 for sdir in normal_tests: - handle.write( 'add_subdirectory( ' + sdir + ' )\n' ) - log.d( '... including:', sdir ) + handle.write('add_subdirectory( ' + sdir + ' )\n') + log.d('... including:', sdir) n_tests += 1 if len(shared_tests): - handle.write( 'if(NOT ${BUILD_SHARED_LIBS})\n' ) - handle.write( ' message( INFO "' + str(len(shared_tests)) + ' shared lib unit-tests will be skipped. Check BUILD_SHARED_LIBS to run them..." )\n' ) - handle.write( 'else()\n' ) + handle.write('if(NOT ${BUILD_SHARED_LIBS})\n') + handle.write(' message( INFO "' + str( + len(shared_tests)) + ' shared lib unit-tests will be skipped. Check BUILD_SHARED_LIBS to run them..." )\n') + handle.write('else()\n') for test in shared_tests: - handle.write( ' add_subdirectory( ' + test + ' )\n' ) - log.d( '... including:', sdir ) + handle.write(' add_subdirectory( ' + test + ' )\n') + log.d('... including:', sdir) n_tests += 1 - handle.write( 'endif()\n' ) + handle.write('endif()\n') if len(static_tests): - handle.write( 'if(${BUILD_SHARED_LIBS})\n' ) - handle.write( ' message( INFO "' + str(len(static_tests)) + ' static lib unit-tests will be skipped. Uncheck BUILD_SHARED_LIBS to run them..." )\n' ) - handle.write( 'else()\n' ) + handle.write('if(${BUILD_SHARED_LIBS})\n') + handle.write(' message( INFO "' + str( + len(static_tests)) + ' static lib unit-tests will be skipped. Uncheck BUILD_SHARED_LIBS to run them..." )\n') + handle.write('else()\n') for test in static_tests: - handle.write( ' add_subdirectory( ' + test + ' )\n' ) - log.d( '... including:', sdir ) + handle.write(' add_subdirectory( ' + test + ' )\n') + log.d('... including:', sdir) n_tests += 1 - handle.write( 'endif()\n' ) + handle.write('endif()\n') handle.close() -print( 'Generated ' + str(n_tests) + ' unit-tests' ) +print('Generated ' + str(n_tests) + ' unit-tests') if log.n_errors(): sys.exit(1) sys.exit(0) - diff --git a/unit-tests/unit-tests-internal.cpp b/unit-tests/unit-tests-internal.cpp index 23405dd3a5..a7df8fddb4 100644 --- a/unit-tests/unit-tests-internal.cpp +++ b/unit-tests/unit-tests-internal.cpp @@ -1944,14 +1944,12 @@ void metadata_verification(const std::vector& da ////serialize_json void trigger_error(const rs2::device& dev, int num) { - std::vector raw_data(24, 0); - raw_data[0] = 0x14; - raw_data[2] = 0xab; - raw_data[3] = 0xcd; - raw_data[4] = 0x4d; - raw_data[8] = num; + int opcode = 0x4d; if (auto debug = dev.as()) + { + auto& raw_data = debug.build_raw_data(opcode, num) debug.send_and_receive_raw_data(raw_data); + } } diff --git a/unit-tests/unit-tests-live.cpp b/unit-tests/unit-tests-live.cpp index ce4321c918..f55c7c4eb5 100644 --- a/unit-tests/unit-tests-live.cpp +++ b/unit-tests/unit-tests-live.cpp @@ -2035,14 +2035,12 @@ void metadata_verification(const std::vector& da ////serialize_json void trigger_error(const rs2::device& dev, int num) { - std::vector raw_data(24, 0); - raw_data[0] = 0x14; - raw_data[2] = 0xab; - raw_data[3] = 0xcd; - raw_data[4] = 0x4d; - raw_data[8] = num; + int opcode = 0x4d; if (auto debug = dev.as()) + { + auto& raw_data = debug.build_raw_data(opcode, num); debug.send_and_receive_raw_data(raw_data); + } } diff --git a/wrappers/csharp/Intel.RealSense/Devices/DebugDevice.cs b/wrappers/csharp/Intel.RealSense/Devices/DebugDevice.cs index 947a8dd1c3..2f093455ff 100644 --- a/wrappers/csharp/Intel.RealSense/Devices/DebugDevice.cs +++ b/wrappers/csharp/Intel.RealSense/Devices/DebugDevice.cs @@ -30,6 +30,39 @@ public static DebugDevice FromDevice(Device dev) return Device.Create(dev.Handle); } + + public byte[] BuildRawData(UInt32 opcode, UInt32 param1 = 0, UInt32 param2 = 0, UInt32 param3 = 0, + UInt32 param4 = 0, byte[] data = null) + { + IntPtr nativeBytes = IntPtr.Zero; + uint dataLength = 0; + try + { + object error; + if (data != null) { + nativeBytes = Marshal.AllocHGlobal(data.Length); + Marshal.Copy(data, 0, nativeBytes, data.Length); + dataLength = (uint)data.Length; + } + + IntPtr rawDataBuffer = NativeMethods.rs2_build_raw_data(Handle, opcode, param1, param2, param3, + param4, nativeBytes, dataLength, out error); + + IntPtr start = NativeMethods.rs2_get_raw_data(rawDataBuffer, out error); + int size = NativeMethods.rs2_get_raw_data_size(rawDataBuffer, out error); + + byte[] managedBytes = new byte[size]; + Marshal.Copy(start, managedBytes, 0, size); + NativeMethods.rs2_delete_raw_data(rawDataBuffer); + + return managedBytes; + } + finally + { + Marshal.FreeHGlobal(nativeBytes); + } + } + public byte[] SendReceiveRawData(byte[] command_bytes) { IntPtr nativeBytes = IntPtr.Zero; diff --git a/wrappers/csharp/Intel.RealSense/NativeMethods.cs b/wrappers/csharp/Intel.RealSense/NativeMethods.cs index bf7f5e7190..51bcd1a95e 100644 --- a/wrappers/csharp/Intel.RealSense/NativeMethods.cs +++ b/wrappers/csharp/Intel.RealSense/NativeMethods.cs @@ -505,6 +505,9 @@ internal static MemCpyDelegate GetMethod() [DllImport(dllName, CallingConvention = CallingConvention.Cdecl)] internal static extern IntPtr rs2_send_and_receive_raw_data(IntPtr device, IntPtr raw_data_to_send, uint size_of_raw_data_to_send, [MarshalAs(UnmanagedType.CustomMarshaler, MarshalTypeRef = typeof(ErrorMarshaler))] out object error); + [DllImport(dllName, CallingConvention = CallingConvention.Cdecl)] + internal static extern IntPtr rs2_build_raw_data(IntPtr device, uint opdoce, uint param1, uint param2, uint param3, uint param4, IntPtr raw_data_to_send, uint size_of_raw_data_to_send, [MarshalAs(UnmanagedType.CustomMarshaler, MarshalTypeRef = typeof(ErrorMarshaler))] out object error); + [DllImport(dllName, CallingConvention = CallingConvention.Cdecl)] internal static extern int rs2_is_device_extendable_to(IntPtr device, Extension extension, [MarshalAs(UnmanagedType.CustomMarshaler, MarshalTypeRef = typeof(ErrorMarshaler))] out object error); @@ -700,11 +703,11 @@ internal static MemCpyDelegate GetMethod() internal static extern IntPtr rs2_run_tare_calibration(IntPtr dev, float ground_truth_mm, [MarshalAs(UnmanagedType.LPStr)] string json_content, int content_size, [MarshalAs(UnmanagedType.FunctionPtr)] rs2_update_progress_callback callback, IntPtr client_data, int timeout_ms, [MarshalAs(UnmanagedType.CustomMarshaler, MarshalTypeRef = typeof(ErrorMarshaler))] out object error); [DllImport(dllName, CallingConvention = CallingConvention.Cdecl)] - internal static extern IntPtr rs2_run_focal_length_calibration(IntPtr dev, IntPtr left_queue, IntPtr right_queue, float target_width_mm, float target_height_mm, int adjust_both_sides, out float ratio, out float angle, + internal static extern IntPtr rs2_run_focal_length_calibration(IntPtr dev, IntPtr left_queue, IntPtr right_queue, float target_width_mm, float target_height_mm, int adjust_both_sides, out float ratio, out float angle, [MarshalAs(UnmanagedType.FunctionPtr)] rs2_update_progress_callback callback, [MarshalAs(UnmanagedType.CustomMarshaler, MarshalTypeRef = typeof(ErrorMarshaler))] out object error); [DllImport(dllName, CallingConvention = CallingConvention.Cdecl)] - internal static extern IntPtr rs2_run_uv_map_calibration(IntPtr dev, IntPtr left_queue, IntPtr rgb_queue, IntPtr depth_queue, int px_py_only, out float ratio, out float angle, + internal static extern IntPtr rs2_run_uv_map_calibration(IntPtr dev, IntPtr left_queue, IntPtr rgb_queue, IntPtr depth_queue, int px_py_only, out float ratio, out float angle, [MarshalAs(UnmanagedType.FunctionPtr)] rs2_update_progress_callback callback, [MarshalAs(UnmanagedType.CustomMarshaler, MarshalTypeRef = typeof(ErrorMarshaler))] out object error); [DllImport(dllName, CallingConvention = CallingConvention.Cdecl)] diff --git a/wrappers/python/pyrs_device.cpp b/wrappers/python/pyrs_device.cpp index a8dd90366a..f4dc21b626 100644 --- a/wrappers/python/pyrs_device.cpp +++ b/wrappers/python/pyrs_device.cpp @@ -226,8 +226,10 @@ void init_device(py::module &m) { py::class_ debug_protocol(m, "debug_protocol"); // No docstring in C++ debug_protocol.def(py::init()) + .def("build_raw_data", &rs2::debug_protocol::build_raw_data, "opcode"_a, "param1"_a = 0, + "param2"_a = 0, "param3"_a = 0, "param4"_a = 0, "data"_a = std::vector()) .def("send_and_receive_raw_data", &rs2::debug_protocol::send_and_receive_raw_data, - "input"_a); // No docstring in C++ + "input"_a); // No docstring in C++ py::class_ device_list(m, "device_list"); // No docstring in C++ device_list.def(py::init<>())