From 57ff355527117bb2b29de0d09af7421dd18a03d3 Mon Sep 17 00:00:00 2001 From: ericleong17 Date: Sat, 5 Sep 2020 15:06:15 -0700 Subject: [PATCH 01/34] perfect lane detection config --- configs/perfect_lane_detection.conf | 19 +++++++++++++++++++ pylot/map/hd_map.py | 6 ++++-- 2 files changed, 23 insertions(+), 2 deletions(-) create mode 100644 configs/perfect_lane_detection.conf diff --git a/configs/perfect_lane_detection.conf b/configs/perfect_lane_detection.conf new file mode 100644 index 000000000..2881ddcbc --- /dev/null +++ b/configs/perfect_lane_detection.conf @@ -0,0 +1,19 @@ +--perfect_lane_detection +###### Control config ##### +--control=carla_auto_pilot +######### Carla config ######### +--carla_num_people=50 +--carla_num_vehicles=20 +# Resolution required by lanenet. +--camera_image_width=1280 +--camera_image_height=720 +# Resolution required by Canny edge lane detetection. +# --camera_image_width=800 +# --camera_image_height=600 +######### Logging config ######### +--log_file_name=pylot.log +--csv_log_file_name=pylot.csv +--v=1 +######### Other config ######### +--visualize_rgb_camera +--visualize_detected_lanes \ No newline at end of file diff --git a/pylot/map/hd_map.py b/pylot/map/hd_map.py index fc44440af..fa329b082 100644 --- a/pylot/map/hd_map.py +++ b/pylot/map/hd_map.py @@ -319,7 +319,9 @@ def _must_obey_american_traffic_light(self, ego_transform: Transform, def get_lane(self, location: Location, waypoint_precision: float = 0.05): lane_waypoints = [] - next_wp = [self._get_waypoint(location)] + next_wp = [self._get_waypoint(location, + project_to_road=False, + lane_type=carla.LaneType.Any)] while len(next_wp) == 1: lane_waypoints.append(next_wp[0]) @@ -334,7 +336,7 @@ def get_lane(self, location: Location, waypoint_precision: float = 0.05): self._lateral_shift(w.transform, w.lane_width * 0.5) for w in lane_waypoints ] - return Lane(left_markings, right_markings) + return Lane(0, left_markings, right_markings) def get_left_lane(self, location: Location): waypoint = self._get_waypoint(location, project_to_road=False) From fd40877a345e0a1181bb2c42ec290d1080c4b5c9 Mon Sep 17 00:00:00 2001 From: ericleong17 Date: Sat, 5 Sep 2020 15:15:22 -0700 Subject: [PATCH 02/34] python formatting --- pylot/map/hd_map.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/pylot/map/hd_map.py b/pylot/map/hd_map.py index fa329b082..35d12ce6f 100644 --- a/pylot/map/hd_map.py +++ b/pylot/map/hd_map.py @@ -319,9 +319,11 @@ def _must_obey_american_traffic_light(self, ego_transform: Transform, def get_lane(self, location: Location, waypoint_precision: float = 0.05): lane_waypoints = [] - next_wp = [self._get_waypoint(location, - project_to_road=False, - lane_type=carla.LaneType.Any)] + next_wp = [ + self._get_waypoint(location, + project_to_road=False, + lane_type=carla.LaneType.Any) + ] while len(next_wp) == 1: lane_waypoints.append(next_wp[0]) From c26429081546d62980a6d8b9a2a92961951bad98 Mon Sep 17 00:00:00 2001 From: ericleong17 Date: Fri, 18 Sep 2020 23:47:31 -0700 Subject: [PATCH 03/34] add all lane --- pylot/map/hd_map.py | 46 +++++++++++++++---- .../perfect_lane_detector_operator.py | 5 +- 2 files changed, 41 insertions(+), 10 deletions(-) diff --git a/pylot/map/hd_map.py b/pylot/map/hd_map.py index 35d12ce6f..78885b101 100644 --- a/pylot/map/hd_map.py +++ b/pylot/map/hd_map.py @@ -317,7 +317,7 @@ def _must_obey_american_traffic_light(self, ego_transform: Transform, else: return (False, None) - def get_lane(self, location: Location, waypoint_precision: float = 0.05): + def get_lane(self, location, lane_id=0, waypoint_precision=0.05): lane_waypoints = [] next_wp = [ self._get_waypoint(location, @@ -338,24 +338,54 @@ def get_lane(self, location: Location, waypoint_precision: float = 0.05): self._lateral_shift(w.transform, w.lane_width * 0.5) for w in lane_waypoints ] - return Lane(0, left_markings, right_markings) + return Lane(lane_id, left_markings, right_markings) - def get_left_lane(self, location: Location): - waypoint = self._get_waypoint(location, project_to_road=False) + def get_left_lane(self, location): + waypoint = self._get_waypoint(location, project_to_road=False, lane_type=carla.LaneType.Any) if waypoint: left_lane_waypoint = waypoint.get_left_lane() if left_lane_waypoint: - return Transform.from_carla_transform(waypoint.transform) + return pylot.utils.Transform.from_carla_transform( + left_lane_waypoint.transform) return None - def get_right_lane(self, location: Location): - waypoint = self._get_waypoint(location, project_to_road=False) + def get_right_lane(self, location): + waypoint = self._get_waypoint(location, project_to_road=False, lane_type=carla.LaneType.Any) if waypoint: right_lane_waypoint = waypoint.get_right_lane() if right_lane_waypoint: - return Transform.from_carla_transform(waypoint.transform) + return pylot.utils.Transform.from_carla_transform( + right_lane_waypoint.transform) return None + def get_all_lanes(self, location): + lanes = [self.get_lane(location)] + + waypoint = self._get_waypoint(location, project_to_road=False, lane_type=carla.LaneType.Any) + if waypoint: + wp_left = waypoint.get_left_lane() + w_rotation = waypoint.transform.rotation + while wp_left and wp_left.lane_type == carla.LaneType.Driving: + camera_transform = pylot.utils.Transform.from_carla_transform( + wp_left.transform) + lanes.append(self.get_lane(camera_transform.location, lane_id=wp_left.lane_id)) + if w_rotation == wp_left.transform.rotation: + wp_left = wp_left.get_left_lane() + else: + wp_left = wp_left.get_right_lane() + + wp_right = waypoint.get_right_lane() + w_rotation = waypoint.transform.rotation + while wp_right and wp_right.lane_type == carla.LaneType.Driving: + camera_transform = pylot.utils.Transform.from_carla_transform( + wp_right.transform) + lanes.append(self.get_lane(camera_transform.location, lane_id=wp_right.lane_id)) + if w_rotation == wp_right.transform.rotation: + wp_right = wp_right.get_right_lane() + else: + wp_right = wp_left.get_right_lane() + return lanes + def compute_waypoints(self, source_loc: Location, destination_loc: Location): """Computes waypoints between two locations. diff --git a/pylot/simulation/perfect_lane_detector_operator.py b/pylot/simulation/perfect_lane_detector_operator.py index b159a4fe4..b945e004f 100644 --- a/pylot/simulation/perfect_lane_detector_operator.py +++ b/pylot/simulation/perfect_lane_detector_operator.py @@ -78,8 +78,9 @@ def on_position_update(self, pose_msg: Message, pose_msg.timestamp)) vehicle_location = pose_msg.data.transform.location if self._map: - lanes = [self._map.get_lane(vehicle_location)] - lanes[0].draw_on_world(self._world) + lanes = self._map.get_all_lanes(vehicle_location) + for lane in lanes: + lane.draw_on_world(self._world) else: self._logger.debug('@{}: map is not ready yet'.format( pose_msg.timestamp)) From e6edf812b4c52e7ae829e79e18aa14788ec89a9d Mon Sep 17 00:00:00 2001 From: ericleong17 Date: Fri, 18 Sep 2020 23:51:43 -0700 Subject: [PATCH 04/34] lint --- pylot/map/hd_map.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/pylot/map/hd_map.py b/pylot/map/hd_map.py index 78885b101..f2b0dd69a 100644 --- a/pylot/map/hd_map.py +++ b/pylot/map/hd_map.py @@ -341,7 +341,9 @@ def get_lane(self, location, lane_id=0, waypoint_precision=0.05): return Lane(lane_id, left_markings, right_markings) def get_left_lane(self, location): - waypoint = self._get_waypoint(location, project_to_road=False, lane_type=carla.LaneType.Any) + waypoint = self._get_waypoint(location, + project_to_road=False, + lane_type=carla.LaneType.Any) if waypoint: left_lane_waypoint = waypoint.get_left_lane() if left_lane_waypoint: @@ -350,7 +352,9 @@ def get_left_lane(self, location): return None def get_right_lane(self, location): - waypoint = self._get_waypoint(location, project_to_road=False, lane_type=carla.LaneType.Any) + waypoint = self._get_waypoint(location, + project_to_road=False, + lane_type=carla.LaneType.Any) if waypoint: right_lane_waypoint = waypoint.get_right_lane() if right_lane_waypoint: From 1680e01b8337b8dc67a248285180e85c476058da Mon Sep 17 00:00:00 2001 From: ericleong17 Date: Fri, 18 Sep 2020 23:55:17 -0700 Subject: [PATCH 05/34] more lint --- pylot/map/hd_map.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/pylot/map/hd_map.py b/pylot/map/hd_map.py index f2b0dd69a..db259e584 100644 --- a/pylot/map/hd_map.py +++ b/pylot/map/hd_map.py @@ -370,20 +370,23 @@ def get_all_lanes(self, location): wp_left = waypoint.get_left_lane() w_rotation = waypoint.transform.rotation while wp_left and wp_left.lane_type == carla.LaneType.Driving: - camera_transform = pylot.utils.Transform.from_carla_transform( + camera_transform_l = pylot.utils.Transform.from_carla_transform( wp_left.transform) - lanes.append(self.get_lane(camera_transform.location, lane_id=wp_left.lane_id)) + lanes.append( + self.get_lane(camera_transform_l.location, + lane_id=wp_left.lane_id)) if w_rotation == wp_left.transform.rotation: wp_left = wp_left.get_left_lane() else: wp_left = wp_left.get_right_lane() wp_right = waypoint.get_right_lane() - w_rotation = waypoint.transform.rotation while wp_right and wp_right.lane_type == carla.LaneType.Driving: - camera_transform = pylot.utils.Transform.from_carla_transform( + camera_transform_r = pylot.utils.Transform.from_carla_transform( wp_right.transform) - lanes.append(self.get_lane(camera_transform.location, lane_id=wp_right.lane_id)) + lanes.append( + self.get_lane(camera_transform_r.location, + lane_id=wp_right.lane_id)) if w_rotation == wp_right.transform.rotation: wp_right = wp_right.get_right_lane() else: From d4da9f519906acabed507642ebe28135c48b6acb Mon Sep 17 00:00:00 2001 From: ericleong17 Date: Fri, 18 Sep 2020 23:58:29 -0700 Subject: [PATCH 06/34] ... --- pylot/map/hd_map.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/pylot/map/hd_map.py b/pylot/map/hd_map.py index db259e584..2be4b3dd4 100644 --- a/pylot/map/hd_map.py +++ b/pylot/map/hd_map.py @@ -365,7 +365,9 @@ def get_right_lane(self, location): def get_all_lanes(self, location): lanes = [self.get_lane(location)] - waypoint = self._get_waypoint(location, project_to_road=False, lane_type=carla.LaneType.Any) + waypoint = self._get_waypoint(location, + project_to_road=False, + lane_type=carla.LaneType.Any) if waypoint: wp_left = waypoint.get_left_lane() w_rotation = waypoint.transform.rotation @@ -387,7 +389,7 @@ def get_all_lanes(self, location): lanes.append( self.get_lane(camera_transform_r.location, lane_id=wp_right.lane_id)) - if w_rotation == wp_right.transform.rotation: + if w_rotation == wp_right.transform.rotation: wp_right = wp_right.get_right_lane() else: wp_right = wp_left.get_right_lane() From 5fc8be1797dea1e2718fac9741394ce7a76a409b Mon Sep 17 00:00:00 2001 From: ericleong17 Date: Sat, 19 Sep 2020 00:07:51 -0700 Subject: [PATCH 07/34] more linting --- pylot/map/hd_map.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/pylot/map/hd_map.py b/pylot/map/hd_map.py index 2be4b3dd4..a9736785a 100644 --- a/pylot/map/hd_map.py +++ b/pylot/map/hd_map.py @@ -347,7 +347,7 @@ def get_left_lane(self, location): if waypoint: left_lane_waypoint = waypoint.get_left_lane() if left_lane_waypoint: - return pylot.utils.Transform.from_carla_transform( + return Transform.from_carla_transform( left_lane_waypoint.transform) return None @@ -358,7 +358,7 @@ def get_right_lane(self, location): if waypoint: right_lane_waypoint = waypoint.get_right_lane() if right_lane_waypoint: - return pylot.utils.Transform.from_carla_transform( + return Transform.from_carla_transform( right_lane_waypoint.transform) return None @@ -372,10 +372,10 @@ def get_all_lanes(self, location): wp_left = waypoint.get_left_lane() w_rotation = waypoint.transform.rotation while wp_left and wp_left.lane_type == carla.LaneType.Driving: - camera_transform_l = pylot.utils.Transform.from_carla_transform( + camera_transform = Transform.from_carla_transform( wp_left.transform) lanes.append( - self.get_lane(camera_transform_l.location, + self.get_lane(camera_transform.location, lane_id=wp_left.lane_id)) if w_rotation == wp_left.transform.rotation: wp_left = wp_left.get_left_lane() @@ -384,10 +384,10 @@ def get_all_lanes(self, location): wp_right = waypoint.get_right_lane() while wp_right and wp_right.lane_type == carla.LaneType.Driving: - camera_transform_r = pylot.utils.Transform.from_carla_transform( + camera_transform = Transform.from_carla_transform( wp_right.transform) lanes.append( - self.get_lane(camera_transform_r.location, + self.get_lane(camera_transform.location, lane_id=wp_right.lane_id)) if w_rotation == wp_right.transform.rotation: wp_right = wp_right.get_right_lane() From e7fe4e518ee5200691f69d92b5da586d3270781e Mon Sep 17 00:00:00 2001 From: ericleong17 Date: Sat, 19 Sep 2020 13:32:41 -0700 Subject: [PATCH 08/34] pr comments --- pylot/map/hd_map.py | 30 +++++++++++++++++++----------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/pylot/map/hd_map.py b/pylot/map/hd_map.py index a9736785a..c6d67d314 100644 --- a/pylot/map/hd_map.py +++ b/pylot/map/hd_map.py @@ -317,7 +317,7 @@ def _must_obey_american_traffic_light(self, ego_transform: Transform, else: return (False, None) - def get_lane(self, location, lane_id=0, waypoint_precision=0.05): + def get_lane(self, location: Location, waypoint_precision: float = 0.05, lane_id: int = 0): lane_waypoints = [] next_wp = [ self._get_waypoint(location, @@ -340,7 +340,7 @@ def get_lane(self, location, lane_id=0, waypoint_precision=0.05): ] return Lane(lane_id, left_markings, right_markings) - def get_left_lane(self, location): + def get_left_lane(self, location: Location): waypoint = self._get_waypoint(location, project_to_road=False, lane_type=carla.LaneType.Any) @@ -351,7 +351,7 @@ def get_left_lane(self, location): left_lane_waypoint.transform) return None - def get_right_lane(self, location): + def get_right_lane(self, location: Location): waypoint = self._get_waypoint(location, project_to_road=False, lane_type=carla.LaneType.Any) @@ -362,7 +362,7 @@ def get_right_lane(self, location): right_lane_waypoint.transform) return None - def get_all_lanes(self, location): + def get_all_lanes(self, location: Location): lanes = [self.get_lane(location)] waypoint = self._get_waypoint(location, @@ -372,11 +372,15 @@ def get_all_lanes(self, location): wp_left = waypoint.get_left_lane() w_rotation = waypoint.transform.rotation while wp_left and wp_left.lane_type == carla.LaneType.Driving: - camera_transform = Transform.from_carla_transform( - wp_left.transform) + left_location = Location.from_carla_location( + wp_left.transform.location) lanes.append( - self.get_lane(camera_transform.location, + self.get_lane(left_location, lane_id=wp_left.lane_id)) + + # If left lane is facing the opposite direction, its left + # lane would point back to the current lane, so we select + # its right lane to get the left lane relative to current. if w_rotation == wp_left.transform.rotation: wp_left = wp_left.get_left_lane() else: @@ -384,15 +388,19 @@ def get_all_lanes(self, location): wp_right = waypoint.get_right_lane() while wp_right and wp_right.lane_type == carla.LaneType.Driving: - camera_transform = Transform.from_carla_transform( - wp_right.transform) + right_location = Location.from_carla_location( + wp_right.transform.location) lanes.append( - self.get_lane(camera_transform.location, + self.get_lane(right_location, lane_id=wp_right.lane_id)) + + # Same logic as above. If right lane of current is in + # opposite direction, move rightwards by selecting it's + # left lane. if w_rotation == wp_right.transform.rotation: wp_right = wp_right.get_right_lane() else: - wp_right = wp_left.get_right_lane() + wp_right = wp_left.get_left_lane() return lanes def compute_waypoints(self, source_loc: Location, From 021e20d22aa42ab38acd3397473f952002c6faba Mon Sep 17 00:00:00 2001 From: ericleong17 Date: Sat, 19 Sep 2020 13:37:12 -0700 Subject: [PATCH 09/34] lint --- pylot/map/hd_map.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/pylot/map/hd_map.py b/pylot/map/hd_map.py index c6d67d314..5916e5713 100644 --- a/pylot/map/hd_map.py +++ b/pylot/map/hd_map.py @@ -317,7 +317,10 @@ def _must_obey_american_traffic_light(self, ego_transform: Transform, else: return (False, None) - def get_lane(self, location: Location, waypoint_precision: float = 0.05, lane_id: int = 0): + def get_lane(self, + location: Location, + waypoint_precision: float = 0.05, + lane_id: int = 0): lane_waypoints = [] next_wp = [ self._get_waypoint(location, @@ -375,8 +378,7 @@ def get_all_lanes(self, location: Location): left_location = Location.from_carla_location( wp_left.transform.location) lanes.append( - self.get_lane(left_location, - lane_id=wp_left.lane_id)) + self.get_lane(left_location, lane_id=wp_left.lane_id)) # If left lane is facing the opposite direction, its left # lane would point back to the current lane, so we select @@ -391,8 +393,7 @@ def get_all_lanes(self, location: Location): right_location = Location.from_carla_location( wp_right.transform.location) lanes.append( - self.get_lane(right_location, - lane_id=wp_right.lane_id)) + self.get_lane(right_location, lane_id=wp_right.lane_id)) # Same logic as above. If right lane of current is in # opposite direction, move rightwards by selecting it's From c54a88d7ce83a1b915311927e05cef8a7dfaeff1 Mon Sep 17 00:00:00 2001 From: ericleong17 Date: Sat, 19 Sep 2020 13:39:18 -0700 Subject: [PATCH 10/34] lint --- pylot/map/hd_map.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pylot/map/hd_map.py b/pylot/map/hd_map.py index 5916e5713..bed67fca0 100644 --- a/pylot/map/hd_map.py +++ b/pylot/map/hd_map.py @@ -395,7 +395,7 @@ def get_all_lanes(self, location: Location): lanes.append( self.get_lane(right_location, lane_id=wp_right.lane_id)) - # Same logic as above. If right lane of current is in + # Same logic as above. If right lane of current is in # opposite direction, move rightwards by selecting it's # left lane. if w_rotation == wp_right.transform.rotation: From 5a2a55043b4fae30234283e43113c613c7cd5d7f Mon Sep 17 00:00:00 2001 From: ericleong17 Date: Tue, 22 Sep 2020 12:59:27 -0700 Subject: [PATCH 11/34] fix to get all lane --- pylot/flags.py | 2 ++ pylot/map/hd_map.py | 12 ++++++++++++ 2 files changed, 14 insertions(+) diff --git a/pylot/flags.py b/pylot/flags.py index 9a4ad9f1d..db4cd499c 100644 --- a/pylot/flags.py +++ b/pylot/flags.py @@ -154,6 +154,8 @@ 'Enable recording of bbox annotated detector images') flags.DEFINE_bool('log_traffic_light_detector_output', False, 'Enable recording of bbox annotated tl detector images') +flags.DEFINE_bool('log_perfect_lane_detection', False, + 'Enable recording of lanes from perfect lane detection') ######################################## # Evaluation operators. diff --git a/pylot/map/hd_map.py b/pylot/map/hd_map.py index bed67fca0..7b75234f6 100644 --- a/pylot/map/hd_map.py +++ b/pylot/map/hd_map.py @@ -322,6 +322,18 @@ def get_lane(self, waypoint_precision: float = 0.05, lane_id: int = 0): lane_waypoints = [] + # Consider waypoints in opposite direction of camera so we can get + # lane data for adjacent lanes in opposing directions. + previous_wp = [ + self._get_waypoint(location, + project_to_road=False, + lane_type=carla.LaneType.Any) + ] + + while len(previous_wp) == 1: + lane_waypoints.append(previous_wp[0]) + previous_wp = previous_wp[0].previous(waypoint_precision) + next_wp = [ self._get_waypoint(location, project_to_road=False, From f4ad20feed9b0a1e5d28c477c736bb888571d6bd Mon Sep 17 00:00:00 2001 From: mageofboy Date: Tue, 17 Nov 2020 21:47:02 +0000 Subject: [PATCH 12/34] updates --- pylot/simulation/carla_operator.py | 3 +-- pylot/simulation/perfect_lane_detector_operator.py | 1 + 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pylot/simulation/carla_operator.py b/pylot/simulation/carla_operator.py index 5df93e96b..e050f13a1 100644 --- a/pylot/simulation/carla_operator.py +++ b/pylot/simulation/carla_operator.py @@ -85,8 +85,7 @@ def __init__( self._spectator = self._world.get_spectator() if not (self._simulator_version.startswith('0.8') - or re.match('0.9.[02-7]', self._simulator_version) is not None - or self._simulator_version == '0.9.1'): + or re.match('0.9.[1-9]$', self._simulator_version) is not None): # Any simulator version after 0.9.7. # Create a traffic manager to that auto pilot works. self._traffic_manager = self._client.get_trafficmanager( diff --git a/pylot/simulation/perfect_lane_detector_operator.py b/pylot/simulation/perfect_lane_detector_operator.py index d1d166750..d1941221e 100644 --- a/pylot/simulation/perfect_lane_detector_operator.py +++ b/pylot/simulation/perfect_lane_detector_operator.py @@ -124,3 +124,4 @@ def on_position_update(self, timestamp: Timestamp, lanes = [] output_msg = LanesMessage(pose_msg.timestamp, lanes) detected_lane_stream.send(output_msg) + detected_lane_stream.send(erdos.WatermarkMessage(pose_msg.timestamp)) From 25d4b7d13ca24e47bea8222f8bb1f147d8405608 Mon Sep 17 00:00:00 2001 From: mageofboy Date: Tue, 17 Nov 2020 21:58:36 +0000 Subject: [PATCH 13/34] lint --- pylot/simulation/carla_operator.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pylot/simulation/carla_operator.py b/pylot/simulation/carla_operator.py index e050f13a1..fdb48d5c3 100644 --- a/pylot/simulation/carla_operator.py +++ b/pylot/simulation/carla_operator.py @@ -84,8 +84,8 @@ def __init__( # handle (which is slow). self._spectator = self._world.get_spectator() - if not (self._simulator_version.startswith('0.8') - or re.match('0.9.[1-9]$', self._simulator_version) is not None): + if not (self._simulator_version.startswith('0.8') or re.match( + '0.9.[1-9]$', self._simulator_version) is not None): # Any simulator version after 0.9.7. # Create a traffic manager to that auto pilot works. self._traffic_manager = self._client.get_trafficmanager( From f226257c6bd90f47e461eac8d640c3ed80233dfb Mon Sep 17 00:00:00 2001 From: mageofboy Date: Mon, 23 Nov 2020 22:38:59 +0000 Subject: [PATCH 14/34] update data process --- scripts/lanenet_data_process.py | 47 ++++++++++++++++++++++++++------- 1 file changed, 38 insertions(+), 9 deletions(-) diff --git a/scripts/lanenet_data_process.py b/scripts/lanenet_data_process.py index c9fc1861a..b54c42929 100644 --- a/scripts/lanenet_data_process.py +++ b/scripts/lanenet_data_process.py @@ -4,11 +4,12 @@ import re FLAGS = flags.FLAGS -flags.DEFINE_string('data_path', 'data/', 'Path where data is logged') +flags.DEFINE_string('data_path', 'data/', 'Main path where data is logged, not including town and start') flags.DEFINE_string('output_path', 'data/lanenet/', 'Path where to place lanenet structured data') - +flags.DEFINE_string('town_start', '', 'Town and start where images were collected from') def main(argv): - data_dir = os.path.abspath(FLAGS.data_path) + data_path = os.path.join(FLAGS.data_path, FLAGS.town_start) + data_dir = os.path.abspath(data_path) output_dir = os.path.abspath(FLAGS.output_path) binary_img_dir = os.path.join(output_dir, "gt_binary_image") instance_img_dir = os.path.join(output_dir, "gt_instance_image") @@ -17,14 +18,42 @@ def main(argv): Path(binary_img_dir).mkdir(parents=True, exist_ok=True) Path(instance_img_dir).mkdir(parents=True, exist_ok=True) Path(img_dir).mkdir(parents=True, exist_ok=True) + group_files = {} + rename = {} + i, j, k = 0, 0, 0 for f in os.listdir(data_dir): curr_path = os.path.join(data_dir, f) - if re.search(f"binary-lane-.+\.png", f): - Path(curr_path).rename(os.path.join(binary_img_dir, f)) - elif re.search(f"lane-.+\.png", f): - Path(curr_path).rename(os.path.join(instance_img_dir, f)) - elif re.search(f"carla-center-.+\.png", f): - Path(curr_path).rename(os.path.join(img_dir, f)) + m = re.search(r"-(\d+)\.", f) + timestamp = m.group(1) + new_f = FLAGS.town_start + "_" + timestamp + ".png" if FLAGS.town_start else timestamp +".png" + if re.search(r"binary-lane-.+\.png", f): + new_fname = os.path.join(binary_img_dir, new_f) + i+=1 + elif re.search(r"lane-.+\.png", f): + new_fname = os.path.join(instance_img_dir, new_f) + j+=1 + elif re.search(r"center-.+\.png", f): + new_fname = os.path.join(img_dir, new_f) + k+=1 + rename[new_fname] = curr_path + if timestamp in group_files: + group_files[timestamp].append(new_fname) + else: + group_files[timestamp] = [new_fname] + print(f"{i} binary {j} lane {k} center") + with open(os.path.join(output_dir,"im_names.txt"), "a") as f: + i = 0 + for v in group_files.values(): + if len(v) == 3: + v_sort = sorted(v) #binary, center, lane + v_join = [v_sort[1], v_sort[0], v_sort[2]] + f.write(" ".join(v_join) +'\n') + for new_fname in v: + curr_path = rename[new_fname] + Path(curr_path).rename(new_fname) + i+=1 + print(i) + if __name__ == "__main__": app.run(main) \ No newline at end of file From ee9b5326936e28120f16955107772bce1fe7cf54 Mon Sep 17 00:00:00 2001 From: mageofboy Date: Sat, 12 Dec 2020 11:02:10 +0000 Subject: [PATCH 15/34] fixed data generation --- pylot/perception/detection/lane.py | 46 ++++++++++++++++++- .../perfect_lane_detector_operator.py | 34 ++++++++------ scripts/lanenet_data_process.py | 4 +- 3 files changed, 68 insertions(+), 16 deletions(-) diff --git a/pylot/perception/detection/lane.py b/pylot/perception/detection/lane.py index 92840e979..de360c8f7 100644 --- a/pylot/perception/detection/lane.py +++ b/pylot/perception/detection/lane.py @@ -1,7 +1,7 @@ from collections import deque import numpy as np from pylot.utils import Location, Rotation, Transform, Vector3D - +import cv2 from shapely.geometry import Point from shapely.geometry.polygon import Polygon @@ -71,6 +71,50 @@ def draw_on_frame(self, frame, inverse_transform=None, binary_frame=None): except Exception: continue + def draw_on_frame_data(self, frame, binary_frame, camera_setup, inverse_transform=None): + """Draw lane markings on a frame. + + Args: + bgr_frame: Frame on which to draw the waypoints. + inverse_transform (optional): To be used to transform the waypoints + to relative to the ego vehicle. + """ + extrinsic_matrix = camera_setup.get_extrinsic_matrix() + intrinsic_matrix = camera_setup.get_intrinsic_matrix() + # change color based on lane id + gray_color_map = [(20, 20), (70, 70), (120, 120), (170, 170), (220, 220), (250, 250)] + lane_color_l = gray_color_map[self.id % len(gray_color_map)] + lane_color_r = gray_color_map[(self.id + 2) % len(gray_color_map)] + + for marking in self.left_markings: + if inverse_transform: + # marking = inverse_transform * marking + marking = inverse_transform.transform_points( + np.array([marking.as_numpy_array()])) + marking = Vector3D(marking[0, 0], marking[0, 1], marking[0, 2]) + pixel_location = marking.to_camera_view(extrinsic_matrix, + intrinsic_matrix) + if (pixel_location.z >= 0): + try: + cv2.circle(frame, (int(pixel_location.x), int(pixel_location.y)), 3, lane_color_l, -1) + cv2.circle(binary_frame, (int(pixel_location.x), int(pixel_location.y)), 3, (255, 255), -1) + except Exception: + continue + for marking in self.right_markings: + if inverse_transform: + # marking = inverse_transform * marking + marking = inverse_transform.transform_points( + np.array([marking.as_numpy_array()])) + marking = Vector3D(marking[0, 0], marking[0, 1], marking[0, 2]) + pixel_location = marking.to_camera_view(extrinsic_matrix, + intrinsic_matrix) + if (pixel_location.z >= 0): + try: + cv2.circle(frame, (int(pixel_location.x), int(pixel_location.y)), 3, lane_color_r, -1) + cv2.circle(binary_frame, (int(pixel_location.x), int(pixel_location.y)), 3, (255, 255), -1) + except Exception: + continue + def draw_on_world(self, world): from carla import Color for marking in self.left_markings: diff --git a/pylot/simulation/perfect_lane_detector_operator.py b/pylot/simulation/perfect_lane_detector_operator.py index d1941221e..fd7348f66 100644 --- a/pylot/simulation/perfect_lane_detector_operator.py +++ b/pylot/simulation/perfect_lane_detector_operator.py @@ -1,6 +1,9 @@ from collections import deque import numpy as np import erdos +import PIL.Image as Image +import cv2 +import os from erdos import Message, ReadStream, Timestamp, WriteStream from pylot.perception.messages import LanesMessage @@ -32,6 +35,8 @@ def __init__(self, pose_stream: ReadStream, open_drive_stream: ReadStream, self._flags = flags self._logger = erdos.utils.setup_logging(self.config.name, self.config.log_file_name) + self._logger_lane = erdos.utils.setup_logging(self.config.name+"_lane", + self.config.log_file_name + "_lane") self._bgr_msgs = deque() self._pose_msgs = deque() self._frame_cnt = 0 @@ -92,29 +97,32 @@ def on_position_update(self, timestamp: Timestamp, bgr_msg = self._bgr_msgs.popleft() pose_msg = self._pose_msgs.popleft() vehicle_location = pose_msg.data.transform.location + self._frame_cnt += 1 if self._map: lanes = self._map.get_all_lanes(vehicle_location) if self._flags.log_lane_detection_camera: camera_setup = bgr_msg.frame.camera_setup - black_img = np.zeros( - (camera_setup.height, camera_setup.width, 3), + frame = np.zeros( + (camera_setup.height, camera_setup.width), dtype=np.dtype("uint8")) - frame = CameraFrame(black_img, 'BGR', camera_setup) - binary_frame = CameraFrame(black_img.copy(), 'BGR', - camera_setup) + binary_frame = frame.copy() for lane in lanes: - lane.draw_on_frame(frame, + lane.draw_on_frame_data(frame, binary_frame, camera_setup, inverse_transform=pose_msg.data. - transform.inverse_transform(), - binary_frame=binary_frame) + transform.inverse_transform()) self._logger.debug('@{}: detected {} lanes'.format( bgr_msg.timestamp, len(lanes))) - self._frame_cnt += 1 if self._frame_cnt % self._flags.log_every_nth_message == 0: - frame.save(bgr_msg.timestamp.coordinates[0], - self._flags.data_path, "lane") - binary_frame.save(bgr_msg.timestamp.coordinates[0], - self._flags.data_path, "binary-lane") + instance_file_name = os.path.join(self._flags.data_path, + '{}-{}.png'.format("lane-", bgr_msg.timestamp.coordinates[0])) + binary_file_name = os.path.join(self._flags.data_path, + '{}-{}.png'.format("binary_lane-", bgr_msg.timestamp.coordinates[0])) + instance_img = Image.fromarray(frame) + binary_img = Image.fromarray(binary_frame) + instance_img.save(instance_file_name) + binary_img.save(binary_file_name) + self._logger_lane.debug('@{}: Created binary lane and lane images in {}'.format( + pose_msg.timestamp, self._flags.data_path)) else: for lane in lanes: lane.draw_on_world(self._world) diff --git a/scripts/lanenet_data_process.py b/scripts/lanenet_data_process.py index b54c42929..20d35bfeb 100644 --- a/scripts/lanenet_data_process.py +++ b/scripts/lanenet_data_process.py @@ -26,7 +26,7 @@ def main(argv): m = re.search(r"-(\d+)\.", f) timestamp = m.group(1) new_f = FLAGS.town_start + "_" + timestamp + ".png" if FLAGS.town_start else timestamp +".png" - if re.search(r"binary-lane-.+\.png", f): + if re.search(r"binary_lane-.+\.png", f): new_fname = os.path.join(binary_img_dir, new_f) i+=1 elif re.search(r"lane-.+\.png", f): @@ -52,7 +52,7 @@ def main(argv): curr_path = rename[new_fname] Path(curr_path).rename(new_fname) i+=1 - print(i) + print(f"{i} data points ") if __name__ == "__main__": From 503dc42932034b5f5718a6d8acd949cc31cc753d Mon Sep 17 00:00:00 2001 From: mageofboy Date: Sat, 12 Dec 2020 03:20:35 -0800 Subject: [PATCH 16/34] linting --- pylot/perception/detection/lane.py | 31 ++++++++++++----- .../perfect_lane_detector_operator.py | 34 +++++++++++-------- 2 files changed, 43 insertions(+), 22 deletions(-) diff --git a/pylot/perception/detection/lane.py b/pylot/perception/detection/lane.py index de360c8f7..d4e3a72c6 100644 --- a/pylot/perception/detection/lane.py +++ b/pylot/perception/detection/lane.py @@ -71,18 +71,25 @@ def draw_on_frame(self, frame, inverse_transform=None, binary_frame=None): except Exception: continue - def draw_on_frame_data(self, frame, binary_frame, camera_setup, inverse_transform=None): - """Draw lane markings on a frame. + def collect_frame_data(self, + frame, + binary_frame, + camera_setup, + inverse_transform=None): + """Draw lane markings on input frames for lane data collection. Args: - bgr_frame: Frame on which to draw the waypoints. + frame: Grayscale frame on which to draw the waypoints. + binary_frame: Grayscale frame on which to draw the waypoints in white. + camera_setup: Camera setup used to generate the frame. inverse_transform (optional): To be used to transform the waypoints to relative to the ego vehicle. """ extrinsic_matrix = camera_setup.get_extrinsic_matrix() intrinsic_matrix = camera_setup.get_intrinsic_matrix() # change color based on lane id - gray_color_map = [(20, 20), (70, 70), (120, 120), (170, 170), (220, 220), (250, 250)] + gray_color_map = [(20, 20), (70, 70), (120, 120), (170, 170), + (220, 220), (250, 250)] lane_color_l = gray_color_map[self.id % len(gray_color_map)] lane_color_r = gray_color_map[(self.id + 2) % len(gray_color_map)] @@ -96,8 +103,12 @@ def draw_on_frame_data(self, frame, binary_frame, camera_setup, inverse_transfor intrinsic_matrix) if (pixel_location.z >= 0): try: - cv2.circle(frame, (int(pixel_location.x), int(pixel_location.y)), 3, lane_color_l, -1) - cv2.circle(binary_frame, (int(pixel_location.x), int(pixel_location.y)), 3, (255, 255), -1) + cv2.circle(frame, + (int(pixel_location.x), int(pixel_location.y)), + 3, lane_color_l, -1) + cv2.circle(binary_frame, + (int(pixel_location.x), int(pixel_location.y)), + 3, (255, 255), -1) except Exception: continue for marking in self.right_markings: @@ -110,8 +121,12 @@ def draw_on_frame_data(self, frame, binary_frame, camera_setup, inverse_transfor intrinsic_matrix) if (pixel_location.z >= 0): try: - cv2.circle(frame, (int(pixel_location.x), int(pixel_location.y)), 3, lane_color_r, -1) - cv2.circle(binary_frame, (int(pixel_location.x), int(pixel_location.y)), 3, (255, 255), -1) + cv2.circle(frame, + (int(pixel_location.x), int(pixel_location.y)), + 3, lane_color_r, -1) + cv2.circle(binary_frame, + (int(pixel_location.x), int(pixel_location.y)), + 3, (255, 255), -1) except Exception: continue diff --git a/pylot/simulation/perfect_lane_detector_operator.py b/pylot/simulation/perfect_lane_detector_operator.py index fd7348f66..b3b61fd7e 100644 --- a/pylot/simulation/perfect_lane_detector_operator.py +++ b/pylot/simulation/perfect_lane_detector_operator.py @@ -35,8 +35,8 @@ def __init__(self, pose_stream: ReadStream, open_drive_stream: ReadStream, self._flags = flags self._logger = erdos.utils.setup_logging(self.config.name, self.config.log_file_name) - self._logger_lane = erdos.utils.setup_logging(self.config.name+"_lane", - self.config.log_file_name + "_lane") + self._logger_lane = erdos.utils.setup_logging( + self.config.name + "_lane", self.config.log_file_name + "_lane") self._bgr_msgs = deque() self._pose_msgs = deque() self._frame_cnt = 0 @@ -102,27 +102,33 @@ def on_position_update(self, timestamp: Timestamp, lanes = self._map.get_all_lanes(vehicle_location) if self._flags.log_lane_detection_camera: camera_setup = bgr_msg.frame.camera_setup - frame = np.zeros( - (camera_setup.height, camera_setup.width), - dtype=np.dtype("uint8")) + frame = np.zeros((camera_setup.height, camera_setup.width), + dtype=np.dtype("uint8")) binary_frame = frame.copy() for lane in lanes: - lane.draw_on_frame_data(frame, binary_frame, camera_setup, - inverse_transform=pose_msg.data. - transform.inverse_transform()) + lane.collect_frame_data(frame, + binary_frame, + camera_setup, + inverse_transform=pose_msg.data. + transform.inverse_transform()) self._logger.debug('@{}: detected {} lanes'.format( bgr_msg.timestamp, len(lanes))) if self._frame_cnt % self._flags.log_every_nth_message == 0: - instance_file_name = os.path.join(self._flags.data_path, - '{}-{}.png'.format("lane-", bgr_msg.timestamp.coordinates[0])) - binary_file_name = os.path.join(self._flags.data_path, - '{}-{}.png'.format("binary_lane-", bgr_msg.timestamp.coordinates[0])) + instance_file_name = os.path.join( + self._flags.data_path, + '{}-{}.png'.format("lane-", + bgr_msg.timestamp.coordinates[0])) + binary_file_name = os.path.join( + self._flags.data_path, + '{}-{}.png'.format("binary_lane-", + bgr_msg.timestamp.coordinates[0])) instance_img = Image.fromarray(frame) binary_img = Image.fromarray(binary_frame) instance_img.save(instance_file_name) binary_img.save(binary_file_name) - self._logger_lane.debug('@{}: Created binary lane and lane images in {}'.format( - pose_msg.timestamp, self._flags.data_path)) + self._logger_lane.debug( + '@{}: Created binary lane and lane images in {}'. + format(pose_msg.timestamp, self._flags.data_path)) else: for lane in lanes: lane.draw_on_world(self._world) From b235b9563ab17a868c8a1ab53dc8e55fe59171f7 Mon Sep 17 00:00:00 2001 From: mageofboy Date: Sat, 12 Dec 2020 03:36:05 -0800 Subject: [PATCH 17/34] lint --- pylot/perception/detection/lane.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pylot/perception/detection/lane.py b/pylot/perception/detection/lane.py index d4e3a72c6..865da587c 100644 --- a/pylot/perception/detection/lane.py +++ b/pylot/perception/detection/lane.py @@ -79,17 +79,17 @@ def collect_frame_data(self, """Draw lane markings on input frames for lane data collection. Args: - frame: Grayscale frame on which to draw the waypoints. - binary_frame: Grayscale frame on which to draw the waypoints in white. + frame: Grayscale image on which to draw the waypoints. + binary_frame: Black and white image on which to draw the waypoints. camera_setup: Camera setup used to generate the frame. inverse_transform (optional): To be used to transform the waypoints to relative to the ego vehicle. """ extrinsic_matrix = camera_setup.get_extrinsic_matrix() intrinsic_matrix = camera_setup.get_intrinsic_matrix() - # change color based on lane id gray_color_map = [(20, 20), (70, 70), (120, 120), (170, 170), (220, 220), (250, 250)] + # change color based on lane id lane_color_l = gray_color_map[self.id % len(gray_color_map)] lane_color_r = gray_color_map[(self.id + 2) % len(gray_color_map)] From 7b9624fed6d7d20776465c1ceb34378d272c96f3 Mon Sep 17 00:00:00 2001 From: mageofboy Date: Sat, 12 Dec 2020 03:38:44 -0800 Subject: [PATCH 18/34] lint --- pylot/simulation/perfect_lane_detector_operator.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/pylot/simulation/perfect_lane_detector_operator.py b/pylot/simulation/perfect_lane_detector_operator.py index b3b61fd7e..db6ddfc6e 100644 --- a/pylot/simulation/perfect_lane_detector_operator.py +++ b/pylot/simulation/perfect_lane_detector_operator.py @@ -2,12 +2,10 @@ import numpy as np import erdos import PIL.Image as Image -import cv2 import os from erdos import Message, ReadStream, Timestamp, WriteStream from pylot.perception.messages import LanesMessage -from pylot.perception.camera_frame import CameraFrame class PerfectLaneDetectionOperator(erdos.Operator): From cba1d1a145753284b0fe9a6eaebba8858f0b0a3b Mon Sep 17 00:00:00 2001 From: mageofboy Date: Fri, 16 Apr 2021 17:11:35 +0000 Subject: [PATCH 19/34] start --- install.sh | 9 +++ pylot/component_creator.py | 5 ++ pylot/flags.py | 2 +- pylot/operator_creator.py | 15 ++++ pylot/perception/flags.py | 6 ++ .../perception/tracking/qd_track_operator.py | 76 +++++++++++++++++++ requirements.txt | 2 + 7 files changed, 114 insertions(+), 1 deletion(-) create mode 100644 pylot/perception/tracking/qd_track_operator.py diff --git a/install.sh b/install.sh index 2e20369e1..ebe72c976 100755 --- a/install.sh +++ b/install.sh @@ -141,6 +141,15 @@ sudo apt-get install llvm-9 export LLVM_CONFIG=/usr/bin/llvm-config-9 python3 setup.py build develop --user +###### Install CenterTrack ###### +cd $PYLOT_HOME/dependencies/ +git clone https://github.com/SysCV/qdtrack #FIX to use fork? +git clone https://github.com/open-mmlab/mmdetection.git +cd mmdetection +python3 setup.py develop +cd $PYLOT_HOME/dependencies/qdtrack +python3 setup.py develop + ##### Download the Lanenet code ##### echo "[x] Cloning the lanenet lane detection code..." cd $PYLOT_HOME/dependencies/ diff --git a/pylot/component_creator.py b/pylot/component_creator.py index 9612d7987..99e2bb4e8 100644 --- a/pylot/component_creator.py +++ b/pylot/component_creator.py @@ -351,6 +351,11 @@ def add_obstacle_tracking(center_camera_stream, obstacles_wo_history_tracking_stream = \ pylot.operator_creator.add_center_track_tracking( center_camera_stream, center_camera_setup) + elif FLAGS.tracker_type == 'qd_track': + logger.debug('Using QDTrack obstacle tracker...') + obstacles_wo_history_tracking_stream = \ + pylot.operator_creator.add_qd_track_tracking( + center_camera_stream, center_camera_setup) else: logger.debug('Using obstacle tracker...') obstacles_wo_history_tracking_stream = \ diff --git a/pylot/flags.py b/pylot/flags.py index 6b7f8a5ed..96a7357f1 100644 --- a/pylot/flags.py +++ b/pylot/flags.py @@ -40,7 +40,7 @@ flags.DEFINE_bool('perfect_obstacle_tracking', False, 'True to enable perfect obstacle tracking') flags.DEFINE_enum('tracker_type', 'sort', - ['da_siam_rpn', 'deep_sort', 'sort', 'center_track'], + ['da_siam_rpn', 'deep_sort', 'sort', 'center_track', 'qd_track'], 'Sets which obstacle tracker to use') flags.DEFINE_bool('lane_detection', False, 'True to enable lane detection') flags.DEFINE_bool('perfect_lane_detection', False, diff --git a/pylot/operator_creator.py b/pylot/operator_creator.py index 9b80bff27..fa7c9979b 100644 --- a/pylot/operator_creator.py +++ b/pylot/operator_creator.py @@ -269,6 +269,21 @@ def add_center_track_tracking(bgr_camera_stream, return obstacle_tracking_stream +def add_qd_track_tracking(bgr_camera_stream, + camera_setup, + name='qd_track'): + from pylot.perception.tracking.qd_track_operator import \ + QdTrackOperator + op_config = erdos.OperatorConfig(name='qd_track_operator', + log_file_name=FLAGS.log_file_name, + csv_log_file_name=FLAGS.csv_log_file_name, + profile_file_name=FLAGS.profile_file_name) + [obstacle_tracking_stream] = erdos.connect(QdTrackOperator, op_config, + [bgr_camera_stream], FLAGS, + camera_setup) + return obstacle_tracking_stream + + def add_tracking_evaluation(obstacle_tracking_stream, ground_obstacles_stream, evaluate_timely=False, diff --git a/pylot/perception/flags.py b/pylot/perception/flags.py index 4ed856922..93ad7215f 100644 --- a/pylot/perception/flags.py +++ b/pylot/perception/flags.py @@ -84,6 +84,12 @@ ['kitti_tracking', 'coco', 'mot', 'nuscenes'], 'CenterTrack available models') +# QDTrack tracking flags. +flags.DEFINE_string( + 'qd_track_model_path', + 'dependencies/models/tracking/CenterTrack/kitti_fulltrain.pth', #FIX + 'Path to the model') + # Lane detection flags. flags.DEFINE_float('lane_detection_gpu_memory_fraction', 0.3, 'GPU memory fraction allocated to Lanenet') diff --git a/pylot/perception/tracking/qd_track_operator.py b/pylot/perception/tracking/qd_track_operator.py new file mode 100644 index 000000000..080690ac9 --- /dev/null +++ b/pylot/perception/tracking/qd_track_operator.py @@ -0,0 +1,76 @@ +import cv2 + +import erdos + +import numpy as np + +from pylot.perception.detection.obstacle import Obstacle +from pylot.perception.detection.utils import BoundingBox2D, \ + BoundingBox3D, OBSTACLE_LABELS +from pylot.perception.messages import ObstaclesMessage +from qdtrack.apis.inference import init_model, inference_model + +import torch + + +class QdTrackOperator(erdos.Operator): + def __init__(self, camera_stream, obstacle_tracking_stream, flags, + camera_setup): + camera_stream.add_callback(self.on_frame_msg, + [obstacle_tracking_stream]) + self._flags = flags + self._logger = erdos.utils.setup_logging(self.config.name, + self.config.log_file_name) + self._csv_logger = erdos.utils.setup_csv_logging( + self.config.name + '-csv', self.config.csv_log_file_name) + self._camera_setup = camera_setup + + self.model = init_model() #FIX + + + @staticmethod + def connect(camera_stream): + obstacle_tracking_stream = erdos.WriteStream() + return [obstacle_tracking_stream] + + def destroy(self): + self._logger.warn('destroying {}'.format(self.config.name)) + + @erdos.profile_method() + def on_frame_msg(self, msg, obstacle_tracking_stream): + """Invoked when a FrameMessage is received on the camera stream.""" + self._logger.debug('@{}: {} received frame'.format( + msg.timestamp, self.config.name)) + assert msg.frame.encoding == 'BGR', 'Expects BGR frames' + image_np = msg.frame.as_bgr_numpy_array() + results = self.inference_model(self.model, image_np) #FIX + obstacles = [] + for res in results: + track_id = res['tracking_id'] + bbox = res['bbox'] + score = res['score'] + (label_id, ) = res['class'] - 1, + if label_id > 80: + continue + label = self.trained_dataset.class_name[label_id] + if label in ['Pedestrian', 'pedestrian']: + label = 'person' + elif label == 'Car': + label = 'car' + elif label == 'Cyclist': + label == 'bicycle' + if label in OBSTACLE_LABELS: + bounding_box_2D = BoundingBox2D(bbox[0], bbox[2], bbox[1], + bbox[3]) + bounding_box_3D = None + if 'dim' in res and 'loc' in res and 'rot_y' in res: + bounding_box_3D = BoundingBox3D.from_dimensions( + res['dim'], res['loc'], res['rot_y']) + obstacles.append( + Obstacle(bounding_box_3D, + score, + label, + track_id, + bounding_box_2D=bounding_box_2D)) + obstacle_tracking_stream.send( + ObstaclesMessage(msg.timestamp, obstacles, 0)) diff --git a/requirements.txt b/requirements.txt index 6b3d1776d..4ca42c1a4 100644 --- a/requirements.txt +++ b/requirements.txt @@ -27,5 +27,7 @@ nuscenes-devkit progress pyquaternion scikit-learn==0.22.2 +mmcv>=0.3.0 +mmdet ##### CARLA dependencies ##### networkx==2.2 From eb503be387b223ede2d64aebee9a085ffaaeca38 Mon Sep 17 00:00:00 2001 From: mageofboy Date: Fri, 23 Apr 2021 19:33:24 +0000 Subject: [PATCH 20/34] model working, need fix operator --- configs/tracking.conf | 4 ++-- install.sh | 11 ++++++--- pylot/perception/flags.py | 6 ++++- .../perception/tracking/qd_track_operator.py | 23 ++++++++++++++----- 4 files changed, 32 insertions(+), 12 deletions(-) diff --git a/configs/tracking.conf b/configs/tracking.conf index 6fee38356..97ddd63e3 100644 --- a/configs/tracking.conf +++ b/configs/tracking.conf @@ -4,7 +4,7 @@ --obstacle_detection_gpu_memory_fraction=0.4 ######### Tracker config ######### --obstacle_tracking ---tracker_type=sort +--tracker_type=qd_track #--tracker_type=deep_sort #--tracker_type=da_siam_rpn ###### Control config ##### @@ -15,7 +15,7 @@ --simulator_num_people=200 --simulator_num_vehicles=10 ######### Other config ######### ---visualize_tracked_obstacles +# --visualize_tracked_obstacles --log_file_name=pylot.log --csv_log_file_name=pylot.csv --v=1 diff --git a/install.sh b/install.sh index ebe72c976..e9fbedb79 100755 --- a/install.sh +++ b/install.sh @@ -79,6 +79,11 @@ mkdir -p tracking/center_track ; cd tracking/center_track # COCO model ~/.local/bin/gdown https://drive.google.com/uc?id=1tJCEJmdtYIh8VuN8CClGNws3YO7QGd40 +###### Download QDTrack models ###### +cd $PYLOT_HOME/dependencies/models +mkdir -p tracking/qd_track ; cd tracking/qd_track +~/.local/bin/gdown https://drive.google.com/uc?id=1YNAQgd8rMqqEG-fRj3VWlO4G5kdwJbxz + ##### Download AnyNet depth estimation models ##### echo "[x] Downloading the depth estimation models..." cd $PYLOT_HOME/dependencies/models @@ -141,12 +146,12 @@ sudo apt-get install llvm-9 export LLVM_CONFIG=/usr/bin/llvm-config-9 python3 setup.py build develop --user -###### Install CenterTrack ###### +###### Install QDTrack ###### cd $PYLOT_HOME/dependencies/ -git clone https://github.com/SysCV/qdtrack #FIX to use fork? +git clone https://github.com/mageofboy/qdtrack.git git clone https://github.com/open-mmlab/mmdetection.git cd mmdetection -python3 setup.py develop +python3 setup.py develop #need to add mmcv cd $PYLOT_HOME/dependencies/qdtrack python3 setup.py develop diff --git a/pylot/perception/flags.py b/pylot/perception/flags.py index 93ad7215f..f0b5825bc 100644 --- a/pylot/perception/flags.py +++ b/pylot/perception/flags.py @@ -87,7 +87,11 @@ # QDTrack tracking flags. flags.DEFINE_string( 'qd_track_model_path', - 'dependencies/models/tracking/CenterTrack/kitti_fulltrain.pth', #FIX + 'dependencies/models/tracking/qd_track/qdtrack-frcnn_r50_fpn_12e_bdd100k-13328aed.pth', #FIX + 'Path to the model') +flags.DEFINE_string( + 'qd_track_config_path', + 'dependencies/qdtrack/configs/qdtrack-frcnn_r50_fpn_12e_bdd100k.py', #FIX 'Path to the model') # Lane detection flags. diff --git a/pylot/perception/tracking/qd_track_operator.py b/pylot/perception/tracking/qd_track_operator.py index 080690ac9..bef027f7b 100644 --- a/pylot/perception/tracking/qd_track_operator.py +++ b/pylot/perception/tracking/qd_track_operator.py @@ -8,14 +8,14 @@ from pylot.perception.detection.utils import BoundingBox2D, \ BoundingBox3D, OBSTACLE_LABELS from pylot.perception.messages import ObstaclesMessage -from qdtrack.apis.inference import init_model, inference_model import torch - class QdTrackOperator(erdos.Operator): def __init__(self, camera_stream, obstacle_tracking_stream, flags, camera_setup): + from qdtrack.apis import init_model + camera_stream.add_callback(self.on_frame_msg, [obstacle_tracking_stream]) self._flags = flags @@ -24,9 +24,7 @@ def __init__(self, camera_stream, obstacle_tracking_stream, flags, self._csv_logger = erdos.utils.setup_csv_logging( self.config.name + '-csv', self.config.csv_log_file_name) self._camera_setup = camera_setup - - self.model = init_model() #FIX - + self.model = init_model(self._flags.qd_track_config_path, checkpoint=self._flags.qd_track_model_path, device='cuda:0', cfg_options=None) @staticmethod def connect(camera_stream): @@ -39,12 +37,25 @@ def destroy(self): @erdos.profile_method() def on_frame_msg(self, msg, obstacle_tracking_stream): """Invoked when a FrameMessage is received on the camera stream.""" + from qdtrack.apis import inference_model + self._logger.debug('@{}: {} received frame'.format( msg.timestamp, self.config.name)) assert msg.frame.encoding == 'BGR', 'Expects BGR frames' + image_np = msg.frame.as_bgr_numpy_array() - results = self.inference_model(self.model, image_np) #FIX + results = inference_model(self.model, image_np) + bbox_result, track_result = results.values() + bboxes = np.vstack(bbox_result) + labels = [ + np.full(bbox.shape[0], i, dtype=np.int32) + for i, bbox in enumerate(bbox_result) + ] + labels = np.concatenate(labels) obstacles = [] + # for i in range(len(labels)): + # bbox = bboxes[i] + for res in results: track_id = res['tracking_id'] bbox = res['bbox'] From 99984887ee089d8f004682e7c07671dea44b0e0e Mon Sep 17 00:00:00 2001 From: mageofboy Date: Fri, 23 Apr 2021 19:35:56 +0000 Subject: [PATCH 21/34] add lanenet --- scripts/lanenet_data_postprocess.py | 47 +++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) create mode 100644 scripts/lanenet_data_postprocess.py diff --git a/scripts/lanenet_data_postprocess.py b/scripts/lanenet_data_postprocess.py new file mode 100644 index 000000000..44920eb9b --- /dev/null +++ b/scripts/lanenet_data_postprocess.py @@ -0,0 +1,47 @@ +from absl import app, flags +from pathlib import Path +import numpy as np +import os +from PIL import Image +import matplotlib.pyplot as plt +import math +from random import sample +FLAGS = flags.FLAGS +flags.DEFINE_string('data_path', 'data/', 'binary images directory') + +def main(argv): + data_dir = os.path.abspath(FLAGS.data_path) + res = [] + # ct = [] + for f in os.listdir(data_dir): + curr_path = os.path.join(data_dir, f) + image = Image.open(curr_path) + im = np.asarray(image) + num_nonzero = np.count_nonzero(im) + # ct.append(num_nonzero) + if num_nonzero < 3000: + res.append(curr_path) + # n = math.ceil((max(ct) - min(ct))/1000) + # plt.hist(ct, bins=n) + # plt.show() + print(len(res)) + res_sample = res + directories = ['gt_binary_image','gt_instance_image', 'gt_image'] + filtered_dir = os.path.join(data_dir[:-len('gt_binary_image')], 'filtered/') + Path(filtered_dir).mkdir(parents=True, exist_ok=True) + for d in directories: + res_dir = os.path.join(filtered_dir, d) + Path(res_dir).mkdir(parents=True, exist_ok=True) + # Path(output_dir).mkdir(parents=True, exist_ok=True) + for f in res_sample: + s = f.split('gt_binary_image') + for d in directories: + curr_dir = d.join(s) + res_dir = os.path.join(filtered_dir, d) + s[1] + # print(curr_dir, res_dir) + Path(curr_dir).rename(res_dir) + + + +if __name__ == "__main__": + app.run(main) \ No newline at end of file From c8d89e54a30582411d8fafb16158d46f08d341a5 Mon Sep 17 00:00:00 2001 From: mageofboy Date: Fri, 16 Apr 2021 17:11:35 +0000 Subject: [PATCH 22/34] start --- install.sh | 9 +++ pylot/component_creator.py | 5 ++ pylot/flags.py | 2 +- pylot/operator_creator.py | 15 ++++ pylot/perception/flags.py | 6 ++ .../perception/tracking/qd_track_operator.py | 76 +++++++++++++++++++ requirements.txt | 2 + 7 files changed, 114 insertions(+), 1 deletion(-) create mode 100644 pylot/perception/tracking/qd_track_operator.py diff --git a/install.sh b/install.sh index 2e20369e1..ebe72c976 100755 --- a/install.sh +++ b/install.sh @@ -141,6 +141,15 @@ sudo apt-get install llvm-9 export LLVM_CONFIG=/usr/bin/llvm-config-9 python3 setup.py build develop --user +###### Install CenterTrack ###### +cd $PYLOT_HOME/dependencies/ +git clone https://github.com/SysCV/qdtrack #FIX to use fork? +git clone https://github.com/open-mmlab/mmdetection.git +cd mmdetection +python3 setup.py develop +cd $PYLOT_HOME/dependencies/qdtrack +python3 setup.py develop + ##### Download the Lanenet code ##### echo "[x] Cloning the lanenet lane detection code..." cd $PYLOT_HOME/dependencies/ diff --git a/pylot/component_creator.py b/pylot/component_creator.py index 9612d7987..99e2bb4e8 100644 --- a/pylot/component_creator.py +++ b/pylot/component_creator.py @@ -351,6 +351,11 @@ def add_obstacle_tracking(center_camera_stream, obstacles_wo_history_tracking_stream = \ pylot.operator_creator.add_center_track_tracking( center_camera_stream, center_camera_setup) + elif FLAGS.tracker_type == 'qd_track': + logger.debug('Using QDTrack obstacle tracker...') + obstacles_wo_history_tracking_stream = \ + pylot.operator_creator.add_qd_track_tracking( + center_camera_stream, center_camera_setup) else: logger.debug('Using obstacle tracker...') obstacles_wo_history_tracking_stream = \ diff --git a/pylot/flags.py b/pylot/flags.py index 6b7f8a5ed..96a7357f1 100644 --- a/pylot/flags.py +++ b/pylot/flags.py @@ -40,7 +40,7 @@ flags.DEFINE_bool('perfect_obstacle_tracking', False, 'True to enable perfect obstacle tracking') flags.DEFINE_enum('tracker_type', 'sort', - ['da_siam_rpn', 'deep_sort', 'sort', 'center_track'], + ['da_siam_rpn', 'deep_sort', 'sort', 'center_track', 'qd_track'], 'Sets which obstacle tracker to use') flags.DEFINE_bool('lane_detection', False, 'True to enable lane detection') flags.DEFINE_bool('perfect_lane_detection', False, diff --git a/pylot/operator_creator.py b/pylot/operator_creator.py index 9b80bff27..fa7c9979b 100644 --- a/pylot/operator_creator.py +++ b/pylot/operator_creator.py @@ -269,6 +269,21 @@ def add_center_track_tracking(bgr_camera_stream, return obstacle_tracking_stream +def add_qd_track_tracking(bgr_camera_stream, + camera_setup, + name='qd_track'): + from pylot.perception.tracking.qd_track_operator import \ + QdTrackOperator + op_config = erdos.OperatorConfig(name='qd_track_operator', + log_file_name=FLAGS.log_file_name, + csv_log_file_name=FLAGS.csv_log_file_name, + profile_file_name=FLAGS.profile_file_name) + [obstacle_tracking_stream] = erdos.connect(QdTrackOperator, op_config, + [bgr_camera_stream], FLAGS, + camera_setup) + return obstacle_tracking_stream + + def add_tracking_evaluation(obstacle_tracking_stream, ground_obstacles_stream, evaluate_timely=False, diff --git a/pylot/perception/flags.py b/pylot/perception/flags.py index 4ed856922..93ad7215f 100644 --- a/pylot/perception/flags.py +++ b/pylot/perception/flags.py @@ -84,6 +84,12 @@ ['kitti_tracking', 'coco', 'mot', 'nuscenes'], 'CenterTrack available models') +# QDTrack tracking flags. +flags.DEFINE_string( + 'qd_track_model_path', + 'dependencies/models/tracking/CenterTrack/kitti_fulltrain.pth', #FIX + 'Path to the model') + # Lane detection flags. flags.DEFINE_float('lane_detection_gpu_memory_fraction', 0.3, 'GPU memory fraction allocated to Lanenet') diff --git a/pylot/perception/tracking/qd_track_operator.py b/pylot/perception/tracking/qd_track_operator.py new file mode 100644 index 000000000..080690ac9 --- /dev/null +++ b/pylot/perception/tracking/qd_track_operator.py @@ -0,0 +1,76 @@ +import cv2 + +import erdos + +import numpy as np + +from pylot.perception.detection.obstacle import Obstacle +from pylot.perception.detection.utils import BoundingBox2D, \ + BoundingBox3D, OBSTACLE_LABELS +from pylot.perception.messages import ObstaclesMessage +from qdtrack.apis.inference import init_model, inference_model + +import torch + + +class QdTrackOperator(erdos.Operator): + def __init__(self, camera_stream, obstacle_tracking_stream, flags, + camera_setup): + camera_stream.add_callback(self.on_frame_msg, + [obstacle_tracking_stream]) + self._flags = flags + self._logger = erdos.utils.setup_logging(self.config.name, + self.config.log_file_name) + self._csv_logger = erdos.utils.setup_csv_logging( + self.config.name + '-csv', self.config.csv_log_file_name) + self._camera_setup = camera_setup + + self.model = init_model() #FIX + + + @staticmethod + def connect(camera_stream): + obstacle_tracking_stream = erdos.WriteStream() + return [obstacle_tracking_stream] + + def destroy(self): + self._logger.warn('destroying {}'.format(self.config.name)) + + @erdos.profile_method() + def on_frame_msg(self, msg, obstacle_tracking_stream): + """Invoked when a FrameMessage is received on the camera stream.""" + self._logger.debug('@{}: {} received frame'.format( + msg.timestamp, self.config.name)) + assert msg.frame.encoding == 'BGR', 'Expects BGR frames' + image_np = msg.frame.as_bgr_numpy_array() + results = self.inference_model(self.model, image_np) #FIX + obstacles = [] + for res in results: + track_id = res['tracking_id'] + bbox = res['bbox'] + score = res['score'] + (label_id, ) = res['class'] - 1, + if label_id > 80: + continue + label = self.trained_dataset.class_name[label_id] + if label in ['Pedestrian', 'pedestrian']: + label = 'person' + elif label == 'Car': + label = 'car' + elif label == 'Cyclist': + label == 'bicycle' + if label in OBSTACLE_LABELS: + bounding_box_2D = BoundingBox2D(bbox[0], bbox[2], bbox[1], + bbox[3]) + bounding_box_3D = None + if 'dim' in res and 'loc' in res and 'rot_y' in res: + bounding_box_3D = BoundingBox3D.from_dimensions( + res['dim'], res['loc'], res['rot_y']) + obstacles.append( + Obstacle(bounding_box_3D, + score, + label, + track_id, + bounding_box_2D=bounding_box_2D)) + obstacle_tracking_stream.send( + ObstaclesMessage(msg.timestamp, obstacles, 0)) diff --git a/requirements.txt b/requirements.txt index 6b3d1776d..4ca42c1a4 100644 --- a/requirements.txt +++ b/requirements.txt @@ -27,5 +27,7 @@ nuscenes-devkit progress pyquaternion scikit-learn==0.22.2 +mmcv>=0.3.0 +mmdet ##### CARLA dependencies ##### networkx==2.2 From 3dcfbfb6e525feac31eae8d136b15fb34358bb05 Mon Sep 17 00:00:00 2001 From: mageofboy Date: Fri, 23 Apr 2021 19:33:24 +0000 Subject: [PATCH 23/34] model working, need fix operator --- configs/tracking.conf | 4 ++-- install.sh | 11 ++++++--- pylot/perception/flags.py | 6 ++++- .../perception/tracking/qd_track_operator.py | 23 ++++++++++++++----- 4 files changed, 32 insertions(+), 12 deletions(-) diff --git a/configs/tracking.conf b/configs/tracking.conf index 6fee38356..97ddd63e3 100644 --- a/configs/tracking.conf +++ b/configs/tracking.conf @@ -4,7 +4,7 @@ --obstacle_detection_gpu_memory_fraction=0.4 ######### Tracker config ######### --obstacle_tracking ---tracker_type=sort +--tracker_type=qd_track #--tracker_type=deep_sort #--tracker_type=da_siam_rpn ###### Control config ##### @@ -15,7 +15,7 @@ --simulator_num_people=200 --simulator_num_vehicles=10 ######### Other config ######### ---visualize_tracked_obstacles +# --visualize_tracked_obstacles --log_file_name=pylot.log --csv_log_file_name=pylot.csv --v=1 diff --git a/install.sh b/install.sh index ebe72c976..e9fbedb79 100755 --- a/install.sh +++ b/install.sh @@ -79,6 +79,11 @@ mkdir -p tracking/center_track ; cd tracking/center_track # COCO model ~/.local/bin/gdown https://drive.google.com/uc?id=1tJCEJmdtYIh8VuN8CClGNws3YO7QGd40 +###### Download QDTrack models ###### +cd $PYLOT_HOME/dependencies/models +mkdir -p tracking/qd_track ; cd tracking/qd_track +~/.local/bin/gdown https://drive.google.com/uc?id=1YNAQgd8rMqqEG-fRj3VWlO4G5kdwJbxz + ##### Download AnyNet depth estimation models ##### echo "[x] Downloading the depth estimation models..." cd $PYLOT_HOME/dependencies/models @@ -141,12 +146,12 @@ sudo apt-get install llvm-9 export LLVM_CONFIG=/usr/bin/llvm-config-9 python3 setup.py build develop --user -###### Install CenterTrack ###### +###### Install QDTrack ###### cd $PYLOT_HOME/dependencies/ -git clone https://github.com/SysCV/qdtrack #FIX to use fork? +git clone https://github.com/mageofboy/qdtrack.git git clone https://github.com/open-mmlab/mmdetection.git cd mmdetection -python3 setup.py develop +python3 setup.py develop #need to add mmcv cd $PYLOT_HOME/dependencies/qdtrack python3 setup.py develop diff --git a/pylot/perception/flags.py b/pylot/perception/flags.py index 93ad7215f..f0b5825bc 100644 --- a/pylot/perception/flags.py +++ b/pylot/perception/flags.py @@ -87,7 +87,11 @@ # QDTrack tracking flags. flags.DEFINE_string( 'qd_track_model_path', - 'dependencies/models/tracking/CenterTrack/kitti_fulltrain.pth', #FIX + 'dependencies/models/tracking/qd_track/qdtrack-frcnn_r50_fpn_12e_bdd100k-13328aed.pth', #FIX + 'Path to the model') +flags.DEFINE_string( + 'qd_track_config_path', + 'dependencies/qdtrack/configs/qdtrack-frcnn_r50_fpn_12e_bdd100k.py', #FIX 'Path to the model') # Lane detection flags. diff --git a/pylot/perception/tracking/qd_track_operator.py b/pylot/perception/tracking/qd_track_operator.py index 080690ac9..bef027f7b 100644 --- a/pylot/perception/tracking/qd_track_operator.py +++ b/pylot/perception/tracking/qd_track_operator.py @@ -8,14 +8,14 @@ from pylot.perception.detection.utils import BoundingBox2D, \ BoundingBox3D, OBSTACLE_LABELS from pylot.perception.messages import ObstaclesMessage -from qdtrack.apis.inference import init_model, inference_model import torch - class QdTrackOperator(erdos.Operator): def __init__(self, camera_stream, obstacle_tracking_stream, flags, camera_setup): + from qdtrack.apis import init_model + camera_stream.add_callback(self.on_frame_msg, [obstacle_tracking_stream]) self._flags = flags @@ -24,9 +24,7 @@ def __init__(self, camera_stream, obstacle_tracking_stream, flags, self._csv_logger = erdos.utils.setup_csv_logging( self.config.name + '-csv', self.config.csv_log_file_name) self._camera_setup = camera_setup - - self.model = init_model() #FIX - + self.model = init_model(self._flags.qd_track_config_path, checkpoint=self._flags.qd_track_model_path, device='cuda:0', cfg_options=None) @staticmethod def connect(camera_stream): @@ -39,12 +37,25 @@ def destroy(self): @erdos.profile_method() def on_frame_msg(self, msg, obstacle_tracking_stream): """Invoked when a FrameMessage is received on the camera stream.""" + from qdtrack.apis import inference_model + self._logger.debug('@{}: {} received frame'.format( msg.timestamp, self.config.name)) assert msg.frame.encoding == 'BGR', 'Expects BGR frames' + image_np = msg.frame.as_bgr_numpy_array() - results = self.inference_model(self.model, image_np) #FIX + results = inference_model(self.model, image_np) + bbox_result, track_result = results.values() + bboxes = np.vstack(bbox_result) + labels = [ + np.full(bbox.shape[0], i, dtype=np.int32) + for i, bbox in enumerate(bbox_result) + ] + labels = np.concatenate(labels) obstacles = [] + # for i in range(len(labels)): + # bbox = bboxes[i] + for res in results: track_id = res['tracking_id'] bbox = res['bbox'] From c971388c1b7c4afb81c07eed853475e60ec9b2d5 Mon Sep 17 00:00:00 2001 From: mageofboy Date: Fri, 23 Apr 2021 19:35:56 +0000 Subject: [PATCH 24/34] add lanenet --- scripts/lanenet_data_postprocess.py | 47 +++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) create mode 100644 scripts/lanenet_data_postprocess.py diff --git a/scripts/lanenet_data_postprocess.py b/scripts/lanenet_data_postprocess.py new file mode 100644 index 000000000..44920eb9b --- /dev/null +++ b/scripts/lanenet_data_postprocess.py @@ -0,0 +1,47 @@ +from absl import app, flags +from pathlib import Path +import numpy as np +import os +from PIL import Image +import matplotlib.pyplot as plt +import math +from random import sample +FLAGS = flags.FLAGS +flags.DEFINE_string('data_path', 'data/', 'binary images directory') + +def main(argv): + data_dir = os.path.abspath(FLAGS.data_path) + res = [] + # ct = [] + for f in os.listdir(data_dir): + curr_path = os.path.join(data_dir, f) + image = Image.open(curr_path) + im = np.asarray(image) + num_nonzero = np.count_nonzero(im) + # ct.append(num_nonzero) + if num_nonzero < 3000: + res.append(curr_path) + # n = math.ceil((max(ct) - min(ct))/1000) + # plt.hist(ct, bins=n) + # plt.show() + print(len(res)) + res_sample = res + directories = ['gt_binary_image','gt_instance_image', 'gt_image'] + filtered_dir = os.path.join(data_dir[:-len('gt_binary_image')], 'filtered/') + Path(filtered_dir).mkdir(parents=True, exist_ok=True) + for d in directories: + res_dir = os.path.join(filtered_dir, d) + Path(res_dir).mkdir(parents=True, exist_ok=True) + # Path(output_dir).mkdir(parents=True, exist_ok=True) + for f in res_sample: + s = f.split('gt_binary_image') + for d in directories: + curr_dir = d.join(s) + res_dir = os.path.join(filtered_dir, d) + s[1] + # print(curr_dir, res_dir) + Path(curr_dir).rename(res_dir) + + + +if __name__ == "__main__": + app.run(main) \ No newline at end of file From cb69924bf16f54a4f6a01452a37eb959d1caef02 Mon Sep 17 00:00:00 2001 From: Eric Leong Date: Fri, 30 Apr 2021 02:55:08 +0000 Subject: [PATCH 25/34] working versison of qd track op --- .../perception/tracking/qd_track_operator.py | 47 +++++++++---------- 1 file changed, 21 insertions(+), 26 deletions(-) diff --git a/pylot/perception/tracking/qd_track_operator.py b/pylot/perception/tracking/qd_track_operator.py index bef027f7b..1718ed742 100644 --- a/pylot/perception/tracking/qd_track_operator.py +++ b/pylot/perception/tracking/qd_track_operator.py @@ -25,6 +25,8 @@ def __init__(self, camera_stream, obstacle_tracking_stream, flags, self.config.name + '-csv', self.config.csv_log_file_name) self._camera_setup = camera_setup self.model = init_model(self._flags.qd_track_config_path, checkpoint=self._flags.qd_track_model_path, device='cuda:0', cfg_options=None) + self.classes = ('pedestrian', 'rider', 'car', 'bus', 'truck', 'bicycle', 'motorcycle', 'train') + self.frame_id = 0 @staticmethod def connect(camera_stream): @@ -44,41 +46,34 @@ def on_frame_msg(self, msg, obstacle_tracking_stream): assert msg.frame.encoding == 'BGR', 'Expects BGR frames' image_np = msg.frame.as_bgr_numpy_array() - results = inference_model(self.model, image_np) + results = inference_model(self.model, image_np, self.frame_id) bbox_result, track_result = results.values() - bboxes = np.vstack(bbox_result) - labels = [ - np.full(bbox.shape[0], i, dtype=np.int32) - for i, bbox in enumerate(bbox_result) - ] - labels = np.concatenate(labels) + track_bboxes = np.zeros((0, 5)) + track_ids = np.zeros((0)) + track_labels = np.zeros((0)) + obstacle_count = 0 + for k, v in track_result.items(): + track_bboxes = np.concatenate((track_bboxes, v['bbox'][None, :]), axis=0) + track_ids = np.concatenate((track_ids, np.array([k])), axis=0) + track_labels = np.concatenate((track_labels, np.array([v['label']]))) + obstacle_count += 1 obstacles = [] - # for i in range(len(labels)): - # bbox = bboxes[i] + for i in range(obstacle_count): + track_id = track_ids[i] + bbox = track_bboxes[i, :] + score = bbox[4] + label_id = track_labels[i] + label = self.classes[int(label_id)] - for res in results: - track_id = res['tracking_id'] - bbox = res['bbox'] - score = res['score'] - (label_id, ) = res['class'] - 1, - if label_id > 80: - continue - label = self.trained_dataset.class_name[label_id] - if label in ['Pedestrian', 'pedestrian']: + if label in ['pedestrian', 'rider']: label = 'person' - elif label == 'Car': - label = 'car' - elif label == 'Cyclist': - label == 'bicycle' + if label in OBSTACLE_LABELS: bounding_box_2D = BoundingBox2D(bbox[0], bbox[2], bbox[1], bbox[3]) bounding_box_3D = None - if 'dim' in res and 'loc' in res and 'rot_y' in res: - bounding_box_3D = BoundingBox3D.from_dimensions( - res['dim'], res['loc'], res['rot_y']) obstacles.append( - Obstacle(bounding_box_3D, + Obstacle(bounding_box_2D, score, label, track_id, From fcde4aa3c4ff80330f69caf26762979e70b1d108 Mon Sep 17 00:00:00 2001 From: Eric Leong Date: Sun, 2 May 2021 21:51:19 +0000 Subject: [PATCH 26/34] update frame_id, add timing, id to int --- pylot/perception/tracking/qd_track_operator.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/pylot/perception/tracking/qd_track_operator.py b/pylot/perception/tracking/qd_track_operator.py index 1718ed742..bbaa09508 100644 --- a/pylot/perception/tracking/qd_track_operator.py +++ b/pylot/perception/tracking/qd_track_operator.py @@ -1,4 +1,5 @@ import cv2 +import time import erdos @@ -44,13 +45,15 @@ def on_frame_msg(self, msg, obstacle_tracking_stream): self._logger.debug('@{}: {} received frame'.format( msg.timestamp, self.config.name)) assert msg.frame.encoding == 'BGR', 'Expects BGR frames' - + start_time = time.time() image_np = msg.frame.as_bgr_numpy_array() results = inference_model(self.model, image_np, self.frame_id) + self.frame_id += 1 + bbox_result, track_result = results.values() track_bboxes = np.zeros((0, 5)) - track_ids = np.zeros((0)) - track_labels = np.zeros((0)) + track_ids = np.zeros((0), dtype=int) + track_labels = np.zeros((0), dtype=int) obstacle_count = 0 for k, v in track_result.items(): track_bboxes = np.concatenate((track_bboxes, v['bbox'][None, :]), axis=0) @@ -63,7 +66,7 @@ def on_frame_msg(self, msg, obstacle_tracking_stream): bbox = track_bboxes[i, :] score = bbox[4] label_id = track_labels[i] - label = self.classes[int(label_id)] + label = self.classes[label_id] if label in ['pedestrian', 'rider']: label = 'person' @@ -78,5 +81,6 @@ def on_frame_msg(self, msg, obstacle_tracking_stream): label, track_id, bounding_box_2D=bounding_box_2D)) + runtime = (time.time() - start_time) * 1000 obstacle_tracking_stream.send( - ObstaclesMessage(msg.timestamp, obstacles, 0)) + ObstaclesMessage(msg.timestamp, obstacles, runtime)) From 6418774277c652308d2be246b401a007b1b0bcbe Mon Sep 17 00:00:00 2001 From: mageofboy Date: Sat, 17 Jul 2021 03:00:02 +0000 Subject: [PATCH 27/34] revert conf --- configs/tracking.conf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/configs/tracking.conf b/configs/tracking.conf index 97ddd63e3..6fee38356 100644 --- a/configs/tracking.conf +++ b/configs/tracking.conf @@ -4,7 +4,7 @@ --obstacle_detection_gpu_memory_fraction=0.4 ######### Tracker config ######### --obstacle_tracking ---tracker_type=qd_track +--tracker_type=sort #--tracker_type=deep_sort #--tracker_type=da_siam_rpn ###### Control config ##### @@ -15,7 +15,7 @@ --simulator_num_people=200 --simulator_num_vehicles=10 ######### Other config ######### -# --visualize_tracked_obstacles +--visualize_tracked_obstacles --log_file_name=pylot.log --csv_log_file_name=pylot.csv --v=1 From 66b361cf79afca181cc03fe5bf6405e2274d489c Mon Sep 17 00:00:00 2001 From: mageofboy Date: Sat, 17 Jul 2021 03:49:39 +0000 Subject: [PATCH 28/34] revise alg --- configs/tracking.conf | 4 ++-- pylot/perception/tracking/qd_track_operator.py | 18 ++++-------------- 2 files changed, 6 insertions(+), 16 deletions(-) diff --git a/configs/tracking.conf b/configs/tracking.conf index 6fee38356..1c69898cd 100644 --- a/configs/tracking.conf +++ b/configs/tracking.conf @@ -4,7 +4,7 @@ --obstacle_detection_gpu_memory_fraction=0.4 ######### Tracker config ######### --obstacle_tracking ---tracker_type=sort +--tracker_type=qd_track #--tracker_type=deep_sort #--tracker_type=da_siam_rpn ###### Control config ##### @@ -15,7 +15,7 @@ --simulator_num_people=200 --simulator_num_vehicles=10 ######### Other config ######### ---visualize_tracked_obstacles +##--visualize_tracked_obstacles --log_file_name=pylot.log --csv_log_file_name=pylot.csv --v=1 diff --git a/pylot/perception/tracking/qd_track_operator.py b/pylot/perception/tracking/qd_track_operator.py index bbaa09508..1c2b0bcb8 100644 --- a/pylot/perception/tracking/qd_track_operator.py +++ b/pylot/perception/tracking/qd_track_operator.py @@ -51,23 +51,13 @@ def on_frame_msg(self, msg, obstacle_tracking_stream): self.frame_id += 1 bbox_result, track_result = results.values() - track_bboxes = np.zeros((0, 5)) - track_ids = np.zeros((0), dtype=int) - track_labels = np.zeros((0), dtype=int) - obstacle_count = 0 - for k, v in track_result.items(): - track_bboxes = np.concatenate((track_bboxes, v['bbox'][None, :]), axis=0) - track_ids = np.concatenate((track_ids, np.array([k])), axis=0) - track_labels = np.concatenate((track_labels, np.array([v['label']]))) - obstacle_count += 1 obstacles = [] - for i in range(obstacle_count): - track_id = track_ids[i] - bbox = track_bboxes[i, :] + for k, v in track_result.items(): + track_id = k + bbox = v['bbox'][None, :] score = bbox[4] - label_id = track_labels[i] + label_id = v['label'] label = self.classes[label_id] - if label in ['pedestrian', 'rider']: label = 'person' From 16b3e46b0651c1ecb844e89a054224b530350a63 Mon Sep 17 00:00:00 2001 From: mageofboy Date: Sat, 17 Jul 2021 04:21:13 +0000 Subject: [PATCH 29/34] lint --- pylot/flags.py | 7 ++++--- pylot/operator_creator.py | 4 +--- pylot/perception/flags.py | 4 ++-- pylot/perception/tracking/qd_track_operator.py | 10 +++++++--- 4 files changed, 14 insertions(+), 11 deletions(-) diff --git a/pylot/flags.py b/pylot/flags.py index 96a7357f1..54ff9f7d7 100644 --- a/pylot/flags.py +++ b/pylot/flags.py @@ -39,9 +39,10 @@ 'True to enable obstacle tracking operator') flags.DEFINE_bool('perfect_obstacle_tracking', False, 'True to enable perfect obstacle tracking') -flags.DEFINE_enum('tracker_type', 'sort', - ['da_siam_rpn', 'deep_sort', 'sort', 'center_track', 'qd_track'], - 'Sets which obstacle tracker to use') +flags.DEFINE_enum( + 'tracker_type', 'sort', + ['da_siam_rpn', 'deep_sort', 'sort', 'center_track', 'qd_track'], + 'Sets which obstacle tracker to use') flags.DEFINE_bool('lane_detection', False, 'True to enable lane detection') flags.DEFINE_bool('perfect_lane_detection', False, 'True to enable perfect lane detection') diff --git a/pylot/operator_creator.py b/pylot/operator_creator.py index fa7c9979b..b32b70420 100644 --- a/pylot/operator_creator.py +++ b/pylot/operator_creator.py @@ -269,9 +269,7 @@ def add_center_track_tracking(bgr_camera_stream, return obstacle_tracking_stream -def add_qd_track_tracking(bgr_camera_stream, - camera_setup, - name='qd_track'): +def add_qd_track_tracking(bgr_camera_stream, camera_setup, name='qd_track'): from pylot.perception.tracking.qd_track_operator import \ QdTrackOperator op_config = erdos.OperatorConfig(name='qd_track_operator', diff --git a/pylot/perception/flags.py b/pylot/perception/flags.py index f0b5825bc..4fcf22968 100644 --- a/pylot/perception/flags.py +++ b/pylot/perception/flags.py @@ -87,11 +87,11 @@ # QDTrack tracking flags. flags.DEFINE_string( 'qd_track_model_path', - 'dependencies/models/tracking/qd_track/qdtrack-frcnn_r50_fpn_12e_bdd100k-13328aed.pth', #FIX + 'dependencies/models/tracking/qd_track/qdtrack-frcnn_r50_fpn_12e_bdd100k-13328aed.pth', 'Path to the model') flags.DEFINE_string( 'qd_track_config_path', - 'dependencies/qdtrack/configs/qdtrack-frcnn_r50_fpn_12e_bdd100k.py', #FIX + 'dependencies/qdtrack/configs/qdtrack-frcnn_r50_fpn_12e_bdd100k.py', 'Path to the model') # Lane detection flags. diff --git a/pylot/perception/tracking/qd_track_operator.py b/pylot/perception/tracking/qd_track_operator.py index 1c2b0bcb8..f815ecb64 100644 --- a/pylot/perception/tracking/qd_track_operator.py +++ b/pylot/perception/tracking/qd_track_operator.py @@ -12,6 +12,7 @@ import torch + class QdTrackOperator(erdos.Operator): def __init__(self, camera_stream, obstacle_tracking_stream, flags, camera_setup): @@ -25,8 +26,12 @@ def __init__(self, camera_stream, obstacle_tracking_stream, flags, self._csv_logger = erdos.utils.setup_csv_logging( self.config.name + '-csv', self.config.csv_log_file_name) self._camera_setup = camera_setup - self.model = init_model(self._flags.qd_track_config_path, checkpoint=self._flags.qd_track_model_path, device='cuda:0', cfg_options=None) - self.classes = ('pedestrian', 'rider', 'car', 'bus', 'truck', 'bicycle', 'motorcycle', 'train') + self.model = init_model(self._flags.qd_track_config_path, + checkpoint=self._flags.qd_track_model_path, + device='cuda:0', + cfg_options=None) + self.classes = ('pedestrian', 'rider', 'car', 'bus', 'truck', + 'bicycle', 'motorcycle', 'train') self.frame_id = 0 @staticmethod @@ -60,7 +65,6 @@ def on_frame_msg(self, msg, obstacle_tracking_stream): label = self.classes[label_id] if label in ['pedestrian', 'rider']: label = 'person' - if label in OBSTACLE_LABELS: bounding_box_2D = BoundingBox2D(bbox[0], bbox[2], bbox[1], bbox[3]) From a6ba4f51c1f390d9a01fb10f3913a383e3031f16 Mon Sep 17 00:00:00 2001 From: mageofboy Date: Sat, 17 Jul 2021 04:29:02 +0000 Subject: [PATCH 30/34] lint --- pylot/perception/detection/lane.py | 1 - pylot/perception/flags.py | 3 ++- pylot/perception/tracking/qd_track_operator.py | 8 +------- pylot/simulation/perfect_lane_detector_operator.py | 2 -- 4 files changed, 3 insertions(+), 11 deletions(-) diff --git a/pylot/perception/detection/lane.py b/pylot/perception/detection/lane.py index e555a6387..9f3cd4e66 100644 --- a/pylot/perception/detection/lane.py +++ b/pylot/perception/detection/lane.py @@ -5,7 +5,6 @@ import numpy as np from pylot.utils import Location, Rotation, Transform, Vector3D -import cv2 from shapely.geometry import Point from shapely.geometry.polygon import Polygon diff --git a/pylot/perception/flags.py b/pylot/perception/flags.py index 4fcf22968..41fc68100 100644 --- a/pylot/perception/flags.py +++ b/pylot/perception/flags.py @@ -87,7 +87,8 @@ # QDTrack tracking flags. flags.DEFINE_string( 'qd_track_model_path', - 'dependencies/models/tracking/qd_track/qdtrack-frcnn_r50_fpn_12e_bdd100k-13328aed.pth', + 'dependencies/models/tracking/qd_track/' + + 'qdtrack-frcnn_r50_fpn_12e_bdd100k-13328aed.pth', 'Path to the model') flags.DEFINE_string( 'qd_track_config_path', diff --git a/pylot/perception/tracking/qd_track_operator.py b/pylot/perception/tracking/qd_track_operator.py index f815ecb64..2439c78d0 100644 --- a/pylot/perception/tracking/qd_track_operator.py +++ b/pylot/perception/tracking/qd_track_operator.py @@ -1,17 +1,12 @@ -import cv2 import time import erdos -import numpy as np - from pylot.perception.detection.obstacle import Obstacle from pylot.perception.detection.utils import BoundingBox2D, \ - BoundingBox3D, OBSTACLE_LABELS + OBSTACLE_LABELS from pylot.perception.messages import ObstaclesMessage -import torch - class QdTrackOperator(erdos.Operator): def __init__(self, camera_stream, obstacle_tracking_stream, flags, @@ -68,7 +63,6 @@ def on_frame_msg(self, msg, obstacle_tracking_stream): if label in OBSTACLE_LABELS: bounding_box_2D = BoundingBox2D(bbox[0], bbox[2], bbox[1], bbox[3]) - bounding_box_3D = None obstacles.append( Obstacle(bounding_box_2D, score, diff --git a/pylot/simulation/perfect_lane_detector_operator.py b/pylot/simulation/perfect_lane_detector_operator.py index a94a64d13..e4b12103b 100644 --- a/pylot/simulation/perfect_lane_detector_operator.py +++ b/pylot/simulation/perfect_lane_detector_operator.py @@ -4,8 +4,6 @@ import PIL.Image as Image import erdos -import PIL.Image as Image -import os from erdos import Message, ReadStream, Timestamp, WriteStream import numpy as np From aaf900210c6911c3bbbee9e655b39933f09d2e1b Mon Sep 17 00:00:00 2001 From: mageofboy Date: Sat, 17 Jul 2021 04:41:14 +0000 Subject: [PATCH 31/34] lint --- pylot/perception/flags.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/pylot/perception/flags.py b/pylot/perception/flags.py index 41fc68100..a72b94a8b 100644 --- a/pylot/perception/flags.py +++ b/pylot/perception/flags.py @@ -86,10 +86,8 @@ # QDTrack tracking flags. flags.DEFINE_string( - 'qd_track_model_path', - 'dependencies/models/tracking/qd_track/' + - 'qdtrack-frcnn_r50_fpn_12e_bdd100k-13328aed.pth', - 'Path to the model') + 'qd_track_model_path', 'dependencies/models/tracking/qd_track/' + + 'qdtrack-frcnn_r50_fpn_12e_bdd100k-13328aed.pth', 'Path to the model') flags.DEFINE_string( 'qd_track_config_path', 'dependencies/qdtrack/configs/qdtrack-frcnn_r50_fpn_12e_bdd100k.py', From b4c756a9b3294e89235a38b7c31c4912a0d066e2 Mon Sep 17 00:00:00 2001 From: Eric Leong Date: Sun, 18 Jul 2021 01:25:07 -0700 Subject: [PATCH 32/34] Update tracking.conf --- configs/tracking.conf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/configs/tracking.conf b/configs/tracking.conf index 1c69898cd..6fee38356 100644 --- a/configs/tracking.conf +++ b/configs/tracking.conf @@ -4,7 +4,7 @@ --obstacle_detection_gpu_memory_fraction=0.4 ######### Tracker config ######### --obstacle_tracking ---tracker_type=qd_track +--tracker_type=sort #--tracker_type=deep_sort #--tracker_type=da_siam_rpn ###### Control config ##### @@ -15,7 +15,7 @@ --simulator_num_people=200 --simulator_num_vehicles=10 ######### Other config ######### -##--visualize_tracked_obstacles +--visualize_tracked_obstacles --log_file_name=pylot.log --csv_log_file_name=pylot.csv --v=1 From 2c9bcbdda33b3cacdc55a9e50cc7f7594b0abae5 Mon Sep 17 00:00:00 2001 From: mageofboy Date: Sun, 18 Jul 2021 08:28:50 +0000 Subject: [PATCH 33/34] remove lanenet frm pr --- scripts/lanenet_data_process.py | 59 --------------------------------- 1 file changed, 59 deletions(-) delete mode 100644 scripts/lanenet_data_process.py diff --git a/scripts/lanenet_data_process.py b/scripts/lanenet_data_process.py deleted file mode 100644 index 20d35bfeb..000000000 --- a/scripts/lanenet_data_process.py +++ /dev/null @@ -1,59 +0,0 @@ -from absl import app, flags -import os -from pathlib import Path -import re -FLAGS = flags.FLAGS - -flags.DEFINE_string('data_path', 'data/', 'Main path where data is logged, not including town and start') -flags.DEFINE_string('output_path', 'data/lanenet/', 'Path where to place lanenet structured data') -flags.DEFINE_string('town_start', '', 'Town and start where images were collected from') -def main(argv): - data_path = os.path.join(FLAGS.data_path, FLAGS.town_start) - data_dir = os.path.abspath(data_path) - output_dir = os.path.abspath(FLAGS.output_path) - binary_img_dir = os.path.join(output_dir, "gt_binary_image") - instance_img_dir = os.path.join(output_dir, "gt_instance_image") - img_dir = os.path.join(output_dir, "gt_image") - Path(output_dir).mkdir(parents=True, exist_ok=True) - Path(binary_img_dir).mkdir(parents=True, exist_ok=True) - Path(instance_img_dir).mkdir(parents=True, exist_ok=True) - Path(img_dir).mkdir(parents=True, exist_ok=True) - group_files = {} - rename = {} - i, j, k = 0, 0, 0 - for f in os.listdir(data_dir): - curr_path = os.path.join(data_dir, f) - m = re.search(r"-(\d+)\.", f) - timestamp = m.group(1) - new_f = FLAGS.town_start + "_" + timestamp + ".png" if FLAGS.town_start else timestamp +".png" - if re.search(r"binary_lane-.+\.png", f): - new_fname = os.path.join(binary_img_dir, new_f) - i+=1 - elif re.search(r"lane-.+\.png", f): - new_fname = os.path.join(instance_img_dir, new_f) - j+=1 - elif re.search(r"center-.+\.png", f): - new_fname = os.path.join(img_dir, new_f) - k+=1 - rename[new_fname] = curr_path - if timestamp in group_files: - group_files[timestamp].append(new_fname) - else: - group_files[timestamp] = [new_fname] - print(f"{i} binary {j} lane {k} center") - with open(os.path.join(output_dir,"im_names.txt"), "a") as f: - i = 0 - for v in group_files.values(): - if len(v) == 3: - v_sort = sorted(v) #binary, center, lane - v_join = [v_sort[1], v_sort[0], v_sort[2]] - f.write(" ".join(v_join) +'\n') - for new_fname in v: - curr_path = rename[new_fname] - Path(curr_path).rename(new_fname) - i+=1 - print(f"{i} data points ") - - -if __name__ == "__main__": - app.run(main) \ No newline at end of file From 2b76f4161ccb052b6e431689ecd9d7f5754554e1 Mon Sep 17 00:00:00 2001 From: mageofboy Date: Sun, 18 Jul 2021 08:30:22 +0000 Subject: [PATCH 34/34] rm lanenet postprocess, readd process --- scripts/lanenet_data_postprocess.py | 47 ----------------------- scripts/lanenet_data_process.py | 59 +++++++++++++++++++++++++++++ 2 files changed, 59 insertions(+), 47 deletions(-) delete mode 100644 scripts/lanenet_data_postprocess.py create mode 100644 scripts/lanenet_data_process.py diff --git a/scripts/lanenet_data_postprocess.py b/scripts/lanenet_data_postprocess.py deleted file mode 100644 index 44920eb9b..000000000 --- a/scripts/lanenet_data_postprocess.py +++ /dev/null @@ -1,47 +0,0 @@ -from absl import app, flags -from pathlib import Path -import numpy as np -import os -from PIL import Image -import matplotlib.pyplot as plt -import math -from random import sample -FLAGS = flags.FLAGS -flags.DEFINE_string('data_path', 'data/', 'binary images directory') - -def main(argv): - data_dir = os.path.abspath(FLAGS.data_path) - res = [] - # ct = [] - for f in os.listdir(data_dir): - curr_path = os.path.join(data_dir, f) - image = Image.open(curr_path) - im = np.asarray(image) - num_nonzero = np.count_nonzero(im) - # ct.append(num_nonzero) - if num_nonzero < 3000: - res.append(curr_path) - # n = math.ceil((max(ct) - min(ct))/1000) - # plt.hist(ct, bins=n) - # plt.show() - print(len(res)) - res_sample = res - directories = ['gt_binary_image','gt_instance_image', 'gt_image'] - filtered_dir = os.path.join(data_dir[:-len('gt_binary_image')], 'filtered/') - Path(filtered_dir).mkdir(parents=True, exist_ok=True) - for d in directories: - res_dir = os.path.join(filtered_dir, d) - Path(res_dir).mkdir(parents=True, exist_ok=True) - # Path(output_dir).mkdir(parents=True, exist_ok=True) - for f in res_sample: - s = f.split('gt_binary_image') - for d in directories: - curr_dir = d.join(s) - res_dir = os.path.join(filtered_dir, d) + s[1] - # print(curr_dir, res_dir) - Path(curr_dir).rename(res_dir) - - - -if __name__ == "__main__": - app.run(main) \ No newline at end of file diff --git a/scripts/lanenet_data_process.py b/scripts/lanenet_data_process.py new file mode 100644 index 000000000..20d35bfeb --- /dev/null +++ b/scripts/lanenet_data_process.py @@ -0,0 +1,59 @@ +from absl import app, flags +import os +from pathlib import Path +import re +FLAGS = flags.FLAGS + +flags.DEFINE_string('data_path', 'data/', 'Main path where data is logged, not including town and start') +flags.DEFINE_string('output_path', 'data/lanenet/', 'Path where to place lanenet structured data') +flags.DEFINE_string('town_start', '', 'Town and start where images were collected from') +def main(argv): + data_path = os.path.join(FLAGS.data_path, FLAGS.town_start) + data_dir = os.path.abspath(data_path) + output_dir = os.path.abspath(FLAGS.output_path) + binary_img_dir = os.path.join(output_dir, "gt_binary_image") + instance_img_dir = os.path.join(output_dir, "gt_instance_image") + img_dir = os.path.join(output_dir, "gt_image") + Path(output_dir).mkdir(parents=True, exist_ok=True) + Path(binary_img_dir).mkdir(parents=True, exist_ok=True) + Path(instance_img_dir).mkdir(parents=True, exist_ok=True) + Path(img_dir).mkdir(parents=True, exist_ok=True) + group_files = {} + rename = {} + i, j, k = 0, 0, 0 + for f in os.listdir(data_dir): + curr_path = os.path.join(data_dir, f) + m = re.search(r"-(\d+)\.", f) + timestamp = m.group(1) + new_f = FLAGS.town_start + "_" + timestamp + ".png" if FLAGS.town_start else timestamp +".png" + if re.search(r"binary_lane-.+\.png", f): + new_fname = os.path.join(binary_img_dir, new_f) + i+=1 + elif re.search(r"lane-.+\.png", f): + new_fname = os.path.join(instance_img_dir, new_f) + j+=1 + elif re.search(r"center-.+\.png", f): + new_fname = os.path.join(img_dir, new_f) + k+=1 + rename[new_fname] = curr_path + if timestamp in group_files: + group_files[timestamp].append(new_fname) + else: + group_files[timestamp] = [new_fname] + print(f"{i} binary {j} lane {k} center") + with open(os.path.join(output_dir,"im_names.txt"), "a") as f: + i = 0 + for v in group_files.values(): + if len(v) == 3: + v_sort = sorted(v) #binary, center, lane + v_join = [v_sort[1], v_sort[0], v_sort[2]] + f.write(" ".join(v_join) +'\n') + for new_fname in v: + curr_path = rename[new_fname] + Path(curr_path).rename(new_fname) + i+=1 + print(f"{i} data points ") + + +if __name__ == "__main__": + app.run(main) \ No newline at end of file